source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
my_sgemm.c | /*
* --------------------------------------------------------------------------
* BLISLAB
* --------------------------------------------------------------------------
* Copyright (C) 2016, The University of Texas at Austin
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - Neither the name of The University of Texas nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* bl_sgemm.c
*
*
* Purpose:
* this is the main file of blislab sgemm.
*
* Todo:
*
*
* Modification:
*
*
* */
#include <stdio.h>
#include <omp.h>
#include "bl_config.h"
#include "bl_sgemm_kernel.h"
#include "bl_sgemm.h"
#define min( i, j ) ( (i)<(j) ? (i): (j) )
inline void packA_mcxkc_d(
int m,
int k,
float *XA,
int ldXA,
int offseta,
float *packA
)
{
int i, p;
float *a_pntr[ SGEMM_MR ];
for ( i = 0; i < m; i ++ ) {
a_pntr[ i ] = XA + ( offseta + i );
}
for ( i = m; i < SGEMM_MR; i ++ ) {
a_pntr[ i ] = XA + ( offseta + 0 );
}
for ( p = 0; p < k; p ++ ) {
for ( i = 0; i < SGEMM_MR; i ++ ) {
*packA = *a_pntr[ i ];
packA ++;
a_pntr[ i ] = a_pntr[ i ] + ldXA;
}
}
}
/*
* --------------------------------------------------------------------------
*/
inline void packB_kcxnc_d(
int n,
int k,
float *XB,
int ldXB, // ldXB is the original k
int offsetb,
float *packB
)
{
int j, p;
float *b_pntr[ SGEMM_NR ];
for ( j = 0; j < n; j ++ ) {
b_pntr[ j ] = XB + ldXB * ( offsetb + j );
}
for ( j = n; j < SGEMM_NR; j ++ ) {
b_pntr[ j ] = XB + ldXB * ( offsetb + 0 );
}
for ( p = 0; p < k; p ++ ) {
for ( j = 0; j < SGEMM_NR; j ++ ) {
*packB ++ = *b_pntr[ j ] ++;
}
}
}
/*
* --------------------------------------------------------------------------
*/
void bl_macro_kernel(
int m,
int n,
int k,
float *packA,
float *packB,
float *C,
int ldc
)
{
int bl_ic_nt;
int i, ii, j;
aux_t aux;
char *str;
aux.b_next = packB;
// We can also parallelize with OMP here.
//// sequential is the default situation
//bl_ic_nt = 1;
//// check the environment variable
//str = getenv( "BLISLAB_IC_NT" );
//if ( str != NULL ) {
// bl_ic_nt = (int)strtol( str, NULL, 10 );
//}
//#pragma omp parallel for num_threads( bl_ic_nt ) private( j, i, aux )
for ( j = 0; j < n; j += SGEMM_NR ) { // 2-th loop around micro-kernel
aux.n = min( n - j, SGEMM_NR );
for ( i = 0; i < m; i += SGEMM_MR ) { // 1-th loop around micro-kernel
aux.m = min( m - i, SGEMM_MR );
if ( i + SGEMM_MR >= m ) {
aux.b_next += SGEMM_NR * k;
}
( *bl_micro_kernel ) (
k,
&packA[ i * k ],
&packB[ j * k ],
&C[ j * ldc + i ],
(unsigned long long) ldc,
&aux
);
} // 1-th loop around micro-kernel
} // 2-th loop around micro-kernel
}
// C must be aligned
void bl_sgemm(
int m,
int n,
int k,
float *XA,
int lda,
float *XB,
int ldb,
float *C, // must be aligned
int ldc // ldc must also be aligned
)
{
int i, j, p, bl_ic_nt;
int ic, ib, jc, jb, pc, pb;
int ir, jr;
float *packA, *packB;
char *str;
// Early return if possible
if ( m == 0 || n == 0 || k == 0 ) {
printf( "bl_sgemm(): early return\n" );
return;
}
// sequential is the default situation
bl_ic_nt = 1;
// check the environment variable
//str = getenv( "BLISLAB_IC_NT" );
//if ( str != NULL ) {
// bl_ic_nt = (int)strtol( str, NULL, 10 );
//}
// Allocate packing buffers
packA = bl_malloc_aligned( SGEMM_KC, ( SGEMM_MC + 1 ) * bl_ic_nt, sizeof(float) );
packB = bl_malloc_aligned( SGEMM_KC, ( SGEMM_NC + 1 ) , sizeof(float) );
for ( jc = 0; jc < n; jc += SGEMM_NC ) { // 5-th loop around micro-kernel
jb = min( n - jc, SGEMM_NC );
for ( pc = 0; pc < k; pc += SGEMM_KC ) { // 4-th loop around micro-kernel
pb = min( k - pc, SGEMM_KC );
#pragma omp parallel for num_threads( bl_ic_nt ) private( jr )
for ( j = 0; j < jb; j += SGEMM_NR ) {
packB_kcxnc_d(
min( jb - j, SGEMM_NR ),
pb,
&XB[ pc ],
k, // should be ldXB instead
jc + j,
&packB[ j * pb ]
);
}
#pragma omp parallel for num_threads( bl_ic_nt ) private( ic, ib, i, ir )
for ( ic = 0; ic < m; ic += SGEMM_MC ) { // 3-rd loop around micro-kernel
int tid = omp_get_thread_num();
ib = min( m - ic, SGEMM_MC );
for ( i = 0; i < ib; i += SGEMM_MR ) {
packA_mcxkc_d(
min( ib - i, SGEMM_MR ),
pb,
&XA[ pc * lda ],
m,
ic + i,
&packA[ tid * SGEMM_MC * pb + i * pb ]
);
}
bl_macro_kernel(
ib,
jb,
pb,
packA + tid * SGEMM_MC * pb,
packB,
&C[ jc * ldc + ic ],
ldc
);
} // End 3.rd loop around micro-kernel
} // End 4.th loop around micro-kernel
} // End 5.th loop around micro-kernel
free( packA );
free( packB );
}
|
broadcast_reduce-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015-2017 by Contributors
* \file broadcast_reduce-inl.h
* \brief CPU-specific Function definition of broadcast and reduce operators
*/
#ifndef MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
#define MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
#include <mxnet/operator_util.h>
#include <algorithm>
#include <vector>
#include <string>
#include <utility>
#include "../mshadow_op.h"
#include "../operator_common.h"
namespace mxnet {
namespace op {
namespace broadcast {
using namespace mshadow;
const int MAX_DIM = 5;
template<int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
template<int ndim>
MSHADOW_XINLINE void unravel_dot(const index_t idx, const Shape<ndim>& shape,
const Shape<ndim>& stridej, const Shape<ndim>& stridek, index_t* j, index_t* k) {
*j = 0;
*k = 0;
#pragma unroll
for (index_t i = ndim-1, idx_t = idx; i >=0; --i) {
const auto tmp = idx_t / shape[i];
const auto coord = idx_t - tmp*shape[i];
*j += coord*stridej[i];
*k += coord*stridek[i];
idx_t = tmp;
}
}
template<int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / shape[i];
ret[i] = j - tmp*shape[i];
j = tmp;
}
return ret;
}
template<int ndim>
MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
index_t ret = 0;
#pragma unroll
for (index_t i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > 1) * coord[i];
}
return ret;
}
template<int ndim>
MSHADOW_XINLINE int diff(const Shape<ndim>& small,
const Shape<ndim>& big,
Shape<ndim>* dims,
Shape<ndim>* stride) {
int mdim = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
mdim += small[i] != big[i];
(*dims)[i] = (*stride)[i] = 1;
}
index_t s = 1;
#pragma unroll
for (int i = ndim - 1, j = mdim; i >= 0; --i) {
if (small[i] != big[i]) {
--j;
(*stride)[j] = s;
(*dims)[j] = big[i];
}
s *= big[i];
}
return mdim;
}
template<int ndim>
MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape,
const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / shape[i];
ret += (j - tmp*shape[i])*stride[i];
j = tmp;
}
return ret;
}
template<int ndim>
MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i)
ret += coord[i] * stride[i];
return ret;
}
template<typename DType>
MSHADOW_XINLINE void assign(DType* dst, const bool addto, const DType src) {
if (addto) {
*dst += src;
} else {
*dst = src;
}
}
template<int ndim, typename DType, typename OP>
MSHADOW_XINLINE void binary_broadcast_assign(const index_t idx, const bool addto,
const DType* __restrict lhs,
const DType* __restrict rhs, DType* out,
const Shape<ndim>& lshape, const Shape<ndim>& rshape,
const Shape<ndim>& oshape) {
const Shape<ndim> coord = unravel(idx, oshape);
const index_t j = ravel(coord, lshape);
const index_t k = ravel(coord, rshape);
assign(&out[idx], addto, OP::Map(lhs[j], rhs[k]));
}
template<typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP>
MSHADOW_XINLINE void seq_reduce_assign(const index_t idx, const size_t M, const bool addto,
const DType* __restrict big, OType *small,
const Shape<ndim>& bshape, const Shape<ndim>& sshape,
const Shape<ndim>& rshape, const Shape<ndim>& rstride) {
Shape<ndim> coord = unravel(idx, sshape);
index_t j = ravel(coord, bshape);
AType val, residual;
Reducer::SetInitValue(val, residual);
for (size_t k = 0; k < M; ++k) {
coord = unravel(k, rshape);
Reducer::Reduce(val, AType(OP::Map(big[j + dot(coord, rstride)])), residual);
}
Reducer::Finalize(val, residual);
assign(&small[idx], addto, OType(val));
}
#ifdef __CUDACC__
#include "broadcast_reduce-inl.cuh"
#else
template<int ndim, typename DType, typename OP>
void binary_broadcast_compute(const size_t N, const bool addto, const DType *lhs,
const DType *rhs, DType *out, const Shape<ndim> lshape,
const Shape<ndim> rshape, const Shape<ndim> oshape) {
for (size_t idx = 0; idx < N; ++idx) {
binary_broadcast_assign<ndim, DType, OP>(idx, addto, lhs, rhs, out, lshape, rshape, oshape);
}
}
template<int ndim, typename DType, typename OP>
void BinaryBroadcastComputeImpl(Stream<cpu> *s, const OpReqType req,
const TBlob& lhs, const TBlob& rhs, const TBlob& out) {
if (req == kNullOp) return;
size_t N = out.shape_.Size();
binary_broadcast_compute<ndim, DType, OP>(N, req == kAddTo, lhs.dptr<DType>(), rhs.dptr<DType>(),
out.dptr<DType>(), lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>(),
out.shape_.get<ndim>());
}
template<typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP>
void seq_reduce_compute(const size_t N, const size_t M, const bool addto,
const DType *big, OType *small, const Shape<ndim> bshape,
const Shape<ndim> sshape, const Shape<ndim> rshape,
const Shape<ndim> rstride) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) {
seq_reduce_assign<Reducer, ndim, AType, DType, OType, OP>(idx, M, addto, big, small,
bshape, sshape, rshape, rstride);
}
}
template <typename Reducer, int ndim, typename DType, typename OP>
void seq_reduce_compute_extra_mem(const size_t N, const size_t M, const bool addto,
const DType* big, DType* small,
const Shape<ndim> bshape,
const Shape<ndim> sshape,
const Shape<ndim> rshape,
const Shape<ndim> rstride,
const index_t* ws_dptr) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) {
Shape<ndim> coord = unravel(idx, sshape);
index_t j = ravel(coord, bshape);
DType val, residual;
Reducer::SetInitValue(val, residual);
for (size_t k = 0; k < M; ++k) {
Reducer::Reduce(val, OP::Map(big[j + ws_dptr[k]]), residual);
}
assign(&small[idx], addto, val);
}
}
template <typename Reducer, int ndim, typename DType, typename OP, bool safe_acc = false>
void Reduce(Stream<cpu>* s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big) {
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
size_t N = small.shape_.Size(), M = rshape.Size();
if (!safe_acc) {
seq_reduce_compute<Reducer, ndim, DType, DType, DType, OP>(
N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride);
} else {
MXNET_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, {
typedef typename std::conditional<safe_acc, AType, DataType>::type AccType;
MSHADOW_TYPE_SWITCH_WITH_BOOL(small.type_flag_, OType, {
typedef typename std::conditional<safe_acc, OType, DataType>::type OutType;
seq_reduce_compute<Reducer, ndim, AccType, DataType, OutType, OP>(
N, M, req == kAddTo, big.dptr<DataType>(), small.dptr<OutType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride);
});
});
}
}
template <typename Reducer, int ndim, typename DType, typename OP>
void ReduceWithExtraMem(Stream<cpu>* s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big) {
using namespace mxnet_op;
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
index_t* ws_dptr = reinterpret_cast<index_t*>(workspace.dptr_);
size_t N = small.shape_.Size(), M = rshape.Size();
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t k = 0; k < static_cast<index_t>(M); k++) {
Shape<ndim> coord = unravel(k, rshape);
ws_dptr[k] = dot(coord, rstride);
}
seq_reduce_compute_extra_mem<Reducer, ndim, DType, OP>(
N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(),
small.shape_.get<ndim>(), rshape, rstride, ws_dptr);
}
template<int ndim, typename DType>
size_t ReduceWorkspaceSize(Stream<cpu> *s, const mxnet::TShape& small, const OpReqType req,
const mxnet::TShape& big) {
return 0;
}
template<int ndim, typename DType>
size_t ReduceWorkspaceSize(Stream<cpu> *s, const mxnet::TShape& small, const OpReqType req,
const mxnet::TShape& big, const mxnet::TShape& lhs,
const mxnet::TShape& rhs) {
return 0;
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
MSHADOW_XINLINE void seq_reduce_assign(const index_t idx, const size_t M, const bool addto,
const DType* __restrict big, const DType* __restrict lhs,
const DType* __restrict rhs, DType *small,
const Shape<ndim>& big_shape, const Shape<ndim>& lhs_shape0,
const Shape<ndim>& rhs_shape0,
const Shape<ndim>& small_shape, const Shape<ndim>& rshape,
const Shape<ndim>& lhs_shape, const Shape<ndim>& rhs_shape,
const Shape<ndim>& rstride, const Shape<ndim>& lhs_stride,
const Shape<ndim>& rhs_stride) {
Shape<ndim> coord = unravel(idx, small_shape);
const index_t idx_big0 = ravel(coord, big_shape);
const index_t idx_lhs0 = ravel(coord, lhs_shape0);
const index_t idx_rhs0 = ravel(coord, rhs_shape0);
DType val, residual;
Reducer::SetInitValue(val, residual);
for (size_t k = 0; k < M; ++k) {
Shape<ndim> coord_big = unravel(k, rshape);
index_t idx_big = idx_big0 + dot(coord_big, rstride);
Shape<ndim> coord_lhs = unravel(k, lhs_shape);
index_t idx_lhs = idx_lhs0 + dot(coord_lhs, lhs_stride);
Shape<ndim> coord_rhs = unravel(k, rhs_shape);
index_t idx_rhs = idx_rhs0 + dot(coord_rhs, rhs_stride);
Reducer::Reduce(val, OP1::Map(big[idx_big], OP2::Map(lhs[idx_lhs], rhs[idx_rhs])), residual);
}
Reducer::Finalize(val, residual);
assign(&small[idx], addto, val);
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
void seq_reduce_compute(const size_t N, const size_t M, const bool addto,
const DType *big, const DType *lhs, const DType *rhs, DType *small,
const Shape<ndim> big_shape, const Shape<ndim> small_shape,
const Shape<ndim> rshape, const Shape<ndim> rstride,
const Shape<ndim> lhs_shape, const Shape<ndim> lhs_stride,
const Shape<ndim> rhs_shape, const Shape<ndim> rhs_stride,
const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) {
seq_reduce_assign<Reducer, ndim, DType, OP1, OP2>(idx, M, addto, big, lhs, rhs, small,
big_shape, lhs_shape0, rhs_shape0, small_shape, rshape, lhs_shape, rhs_shape, rstride,
lhs_stride, rhs_stride);
}
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
void Reduce(Stream<cpu> *s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big, const TBlob& lhs,
const TBlob& rhs) {
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
size_t N = small.shape_.Size();
size_t M = rshape.Size();
Shape<ndim> lhs_shape, lhs_stride;
diff(small.shape_.get<ndim>(), lhs.shape_.get<ndim>(), &lhs_shape, &lhs_stride);
Shape<ndim> rhs_shape, rhs_stride;
diff(small.shape_.get<ndim>(), rhs.shape_.get<ndim>(), &rhs_shape, &rhs_stride);
seq_reduce_compute<Reducer, ndim, DType, OP1, OP2>(
N, M, req == kAddTo,
big.dptr<DType>(), lhs.dptr<DType>(), rhs.dptr<DType>(), small.dptr<DType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(),
rshape, rstride,
lhs_shape, lhs_stride,
rhs_shape, rhs_stride,
lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>());
}
#endif
} // namespace broadcast
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
|
convdw5x5s2_pack4_neon.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void convdw5x5s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2*outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j+3 < outw; j+=4)
{
float32x4_t _sum0 = _bias0;
float32x4_t _sum1 = _bias0;
float32x4_t _sum2 = _bias0;
float32x4_t _sum3 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _r05 = vld1q_f32(r0+20);
float32x4_t _r06 = vld1q_f32(r0+24);
float32x4_t _r07 = vld1q_f32(r0+28);
float32x4_t _r08 = vld1q_f32(r0+32);
float32x4_t _r09 = vld1q_f32(r0+36);
float32x4_t _r010 = vld1q_f32(r0+40);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
_sum1 = vmlaq_f32(_sum1, _k00, _r02);
_sum1 = vmlaq_f32(_sum1, _k01, _r03);
_sum1 = vmlaq_f32(_sum1, _k02, _r04);
_sum1 = vmlaq_f32(_sum1, _k03, _r05);
_sum1 = vmlaq_f32(_sum1, _k04, _r06);
_sum2 = vmlaq_f32(_sum2, _k00, _r04);
_sum2 = vmlaq_f32(_sum2, _k01, _r05);
_sum2 = vmlaq_f32(_sum2, _k02, _r06);
_sum2 = vmlaq_f32(_sum2, _k03, _r07);
_sum2 = vmlaq_f32(_sum2, _k04, _r08);
_sum3 = vmlaq_f32(_sum3, _k00, _r06);
_sum3 = vmlaq_f32(_sum3, _k01, _r07);
_sum3 = vmlaq_f32(_sum3, _k02, _r08);
_sum3 = vmlaq_f32(_sum3, _k03, _r09);
_sum3 = vmlaq_f32(_sum3, _k04, _r010);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _r15 = vld1q_f32(r1+20);
float32x4_t _r16 = vld1q_f32(r1+24);
float32x4_t _r17 = vld1q_f32(r1+28);
float32x4_t _r18 = vld1q_f32(r1+32);
float32x4_t _r19 = vld1q_f32(r1+36);
float32x4_t _r110 = vld1q_f32(r1+40);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
_sum1 = vmlaq_f32(_sum1, _k10, _r12);
_sum1 = vmlaq_f32(_sum1, _k11, _r13);
_sum1 = vmlaq_f32(_sum1, _k12, _r14);
_sum1 = vmlaq_f32(_sum1, _k13, _r15);
_sum1 = vmlaq_f32(_sum1, _k14, _r16);
_sum2 = vmlaq_f32(_sum2, _k10, _r14);
_sum2 = vmlaq_f32(_sum2, _k11, _r15);
_sum2 = vmlaq_f32(_sum2, _k12, _r16);
_sum2 = vmlaq_f32(_sum2, _k13, _r17);
_sum2 = vmlaq_f32(_sum2, _k14, _r18);
_sum3 = vmlaq_f32(_sum3, _k10, _r16);
_sum3 = vmlaq_f32(_sum3, _k11, _r17);
_sum3 = vmlaq_f32(_sum3, _k12, _r18);
_sum3 = vmlaq_f32(_sum3, _k13, _r19);
_sum3 = vmlaq_f32(_sum3, _k14, _r110);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _r25 = vld1q_f32(r2+20);
float32x4_t _r26 = vld1q_f32(r2+24);
float32x4_t _r27 = vld1q_f32(r2+28);
float32x4_t _r28 = vld1q_f32(r2+32);
float32x4_t _r29 = vld1q_f32(r2+36);
float32x4_t _r210 = vld1q_f32(r2+40);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
_sum1 = vmlaq_f32(_sum1, _k20, _r22);
_sum1 = vmlaq_f32(_sum1, _k21, _r23);
_sum1 = vmlaq_f32(_sum1, _k22, _r24);
_sum1 = vmlaq_f32(_sum1, _k23, _r25);
_sum1 = vmlaq_f32(_sum1, _k24, _r26);
_sum2 = vmlaq_f32(_sum2, _k20, _r24);
_sum2 = vmlaq_f32(_sum2, _k21, _r25);
_sum2 = vmlaq_f32(_sum2, _k22, _r26);
_sum2 = vmlaq_f32(_sum2, _k23, _r27);
_sum2 = vmlaq_f32(_sum2, _k24, _r28);
_sum3 = vmlaq_f32(_sum3, _k20, _r26);
_sum3 = vmlaq_f32(_sum3, _k21, _r27);
_sum3 = vmlaq_f32(_sum3, _k22, _r28);
_sum3 = vmlaq_f32(_sum3, _k23, _r29);
_sum3 = vmlaq_f32(_sum3, _k24, _r210);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _r35 = vld1q_f32(r3+20);
float32x4_t _r36 = vld1q_f32(r3+24);
float32x4_t _r37 = vld1q_f32(r3+28);
float32x4_t _r38 = vld1q_f32(r3+32);
float32x4_t _r39 = vld1q_f32(r3+36);
float32x4_t _r310 = vld1q_f32(r3+40);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
_sum1 = vmlaq_f32(_sum1, _k30, _r32);
_sum1 = vmlaq_f32(_sum1, _k31, _r33);
_sum1 = vmlaq_f32(_sum1, _k32, _r34);
_sum1 = vmlaq_f32(_sum1, _k33, _r35);
_sum1 = vmlaq_f32(_sum1, _k34, _r36);
_sum2 = vmlaq_f32(_sum2, _k30, _r34);
_sum2 = vmlaq_f32(_sum2, _k31, _r35);
_sum2 = vmlaq_f32(_sum2, _k32, _r36);
_sum2 = vmlaq_f32(_sum2, _k33, _r37);
_sum2 = vmlaq_f32(_sum2, _k34, _r38);
_sum3 = vmlaq_f32(_sum3, _k30, _r36);
_sum3 = vmlaq_f32(_sum3, _k31, _r37);
_sum3 = vmlaq_f32(_sum3, _k32, _r38);
_sum3 = vmlaq_f32(_sum3, _k33, _r39);
_sum3 = vmlaq_f32(_sum3, _k34, _r310);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _r45 = vld1q_f32(r4+20);
float32x4_t _r46 = vld1q_f32(r4+24);
float32x4_t _r47 = vld1q_f32(r4+28);
float32x4_t _r48 = vld1q_f32(r4+32);
float32x4_t _r49 = vld1q_f32(r4+36);
float32x4_t _r410 = vld1q_f32(r4+40);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
_sum1 = vmlaq_f32(_sum1, _k40, _r42);
_sum1 = vmlaq_f32(_sum1, _k41, _r43);
_sum1 = vmlaq_f32(_sum1, _k42, _r44);
_sum1 = vmlaq_f32(_sum1, _k43, _r45);
_sum1 = vmlaq_f32(_sum1, _k44, _r46);
_sum2 = vmlaq_f32(_sum2, _k40, _r44);
_sum2 = vmlaq_f32(_sum2, _k41, _r45);
_sum2 = vmlaq_f32(_sum2, _k42, _r46);
_sum2 = vmlaq_f32(_sum2, _k43, _r47);
_sum2 = vmlaq_f32(_sum2, _k44, _r48);
_sum3 = vmlaq_f32(_sum3, _k40, _r46);
_sum3 = vmlaq_f32(_sum3, _k41, _r47);
_sum3 = vmlaq_f32(_sum3, _k42, _r48);
_sum3 = vmlaq_f32(_sum3, _k43, _r49);
_sum3 = vmlaq_f32(_sum3, _k44, _r410);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr0+4, _sum1);
vst1q_f32(outptr0+8, _sum2);
vst1q_f32(outptr0+12, _sum3);
r0 += 8*4;
r1 += 8*4;
r2 += 8*4;
r3 += 8*4;
r4 += 8*4;
outptr0 += 16;
}
for (; j+1 < outw; j+=2)
{
float32x4_t _sum0 = _bias0;
float32x4_t _sum1 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _r05 = vld1q_f32(r0+20);
float32x4_t _r06 = vld1q_f32(r0+24);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
_sum1 = vmlaq_f32(_sum1, _k00, _r02);
_sum1 = vmlaq_f32(_sum1, _k01, _r03);
_sum1 = vmlaq_f32(_sum1, _k02, _r04);
_sum1 = vmlaq_f32(_sum1, _k03, _r05);
_sum1 = vmlaq_f32(_sum1, _k04, _r06);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _r15 = vld1q_f32(r1+20);
float32x4_t _r16 = vld1q_f32(r1+24);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
_sum1 = vmlaq_f32(_sum1, _k10, _r12);
_sum1 = vmlaq_f32(_sum1, _k11, _r13);
_sum1 = vmlaq_f32(_sum1, _k12, _r14);
_sum1 = vmlaq_f32(_sum1, _k13, _r15);
_sum1 = vmlaq_f32(_sum1, _k14, _r16);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _r25 = vld1q_f32(r2+20);
float32x4_t _r26 = vld1q_f32(r2+24);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
_sum1 = vmlaq_f32(_sum1, _k20, _r22);
_sum1 = vmlaq_f32(_sum1, _k21, _r23);
_sum1 = vmlaq_f32(_sum1, _k22, _r24);
_sum1 = vmlaq_f32(_sum1, _k23, _r25);
_sum1 = vmlaq_f32(_sum1, _k24, _r26);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _r35 = vld1q_f32(r3+20);
float32x4_t _r36 = vld1q_f32(r3+24);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
_sum1 = vmlaq_f32(_sum1, _k30, _r32);
_sum1 = vmlaq_f32(_sum1, _k31, _r33);
_sum1 = vmlaq_f32(_sum1, _k32, _r34);
_sum1 = vmlaq_f32(_sum1, _k33, _r35);
_sum1 = vmlaq_f32(_sum1, _k34, _r36);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _r45 = vld1q_f32(r4+20);
float32x4_t _r46 = vld1q_f32(r4+24);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
_sum1 = vmlaq_f32(_sum1, _k40, _r42);
_sum1 = vmlaq_f32(_sum1, _k41, _r43);
_sum1 = vmlaq_f32(_sum1, _k42, _r44);
_sum1 = vmlaq_f32(_sum1, _k43, _r45);
_sum1 = vmlaq_f32(_sum1, _k44, _r46);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr0+4, _sum1);
r0 += 4*4;
r1 += 4*4;
r2 += 4*4;
r3 += 4*4;
r4 += 4*4;
outptr0 += 8;
}
for (; j < outw; j++)
{
float32x4_t _sum0 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
vst1q_f32(outptr0, _sum0);
r0 += 2*4;
r1 += 2*4;
r2 += 2*4;
r3 += 2*4;
r4 += 2*4;
outptr0 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
}
|
DMD5_fmt_plug.c | /*
* DMD5_fmt.c
*
* DIGEST-MD5 authentication module for Solar Designer's John the Ripper
* Uses Solar Designer's MD5 implementation.
*
* This software is Copyright 2006, regenrecht@o2.pl, and
* Copyright 2011, 2013 magnum, and it is hereby released to the general
* public under the following terms: Redistribution and use in source and
* binary forms, with or without modification, are permitted.
*
* Input format:
* $DIGEST-MD5$ username $ realm $ nonce $ digest_uri $ cnonce $ nc $ qop $ response [ $ authzid ]
*
* Just base64-decode the blob you see when sniffing, to get all data needed
* for above.
*
* See https://tools.ietf.org/html/rfc2831 (Using Digest Authentication as a
* SASL Mechanism) for algorithm details.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_DMD5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_DMD5);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1024
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "md5.h"
#include "common.h"
#include "formats.h"
#include "memdbg.h"
#define FORMAT_LABEL "dmd5"
#define FORMAT_NAME "DIGEST-MD5 C/R"
#define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR
#define FORMAT_TAG "$DIGEST-MD5$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define MD5_HEX_SIZE (2 * BINARY_SIZE)
#define BINARY_SIZE 16
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#define DSIZE (128 - sizeof(int))
#define CIPHERTEXT_LENGTH (DSIZE * 4)
#define PLAINTEXT_LENGTH 32
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static const char itoa16_shr_04[] =
"0000000000000000"
"1111111111111111"
"2222222222222222"
"3333333333333333"
"4444444444444444"
"5555555555555555"
"6666666666666666"
"7777777777777777"
"8888888888888888"
"9999999999999999"
"aaaaaaaaaaaaaaaa"
"bbbbbbbbbbbbbbbb"
"cccccccccccccccc"
"dddddddddddddddd"
"eeeeeeeeeeeeeeee"
"ffffffffffffffff";
static const char itoa16_and_0f[] =
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef";
static struct custom_salt {
unsigned char login_id[DSIZE]; // username:realm
unsigned int login_id_len;
unsigned char nonces[DSIZE]; // :nonce:cnonce[:authzid]
unsigned int nonces_len;
unsigned char prehash_KD[DSIZE]; // :nonce:nc:cnonce:qop:hex_A2_hash
unsigned int prehash_KD_len;
} *cur_salt;
static uint32_t (*crypt_key)[BINARY_SIZE/4];
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static struct fmt_tests tests[] = {
{"$DIGEST-MD5$s3443$pjwstk$00$ldap/10.253.34.43$0734d94ad9abd5bd7fc5e7e77bcf49a8$00000001$auth-int$dd98347e6da3efd6c4ff2263a729ef77", "test"},
// Two hashes from https://tools.ietf.org/html/rfc2831#section-8
{"$DIGEST-MD5$chris$elwood.innosoft.com$OA6MG9tEQGm2hh$imap/elwood.innosoft.com$OA6MHXh6VqTrRk$00000001$auth$d388dad90d4bbd760a152321f2143af7", "secret"},
{"$DIGEST-MD5$chris$elwood.innosoft.com$OA9BSXrbuRhWay$acap/elwood.innosoft.com$OA9BSuZWMSpW8m$00000001$auth$6084c6db3fede7352c551284490fd0fc", "secret"},
{NULL}
};
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
PLAINTEXT_LENGTH + 1);
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
BINARY_SIZE);
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *data = ciphertext + FORMAT_TAG_LEN;
int extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
if (strlen(ciphertext) > CIPHERTEXT_LENGTH)
return 0;
if (!(p = strchr(data, '$')) || (int)(p-data) >= 64) // username
return 0;
data = p + 1; // realm
if (!(p = strchr(data, '$')) || (int)(p-data) >= 64)
return 0;
data = p + 1; // nonce
if (!(p = strchr(data, '$')) || (int)(p-data) >= 64)
return 0;
data = p + 1; // digest_uri
if (!(p = strchr(data, '$')) || (int)(p-data) >= DSIZE)
return 0;
data = p + 1; // cnonce
if (!(p = strchr(data, '$')) || (int)(p-data) > MD5_HEX_SIZE)
return 0;
/* if (hexlenl(data, 0) != p-data) // this is not always hex data!
return 0; */
data = p + 1; // nc
if (!(p = strchr(data, '$')) || (int)(p-data) >= 9)
return 0;
data = p + 1; // qop
if (strncmp(data, "auth", 4) && strncmp(data, "auth-int", 8) &&
strncmp(data, "auth-conf", 9))
return 0;
if (!(p = strchr(data, '$')) || (int)(p-data) >= 9)
return 0;
data = p + 1; // authzid, optional
if ((p = strchr(data, '$'))) {
if ((int)(p-data) > MD5_HEX_SIZE || strlen(&p[1]) >= 8)
return 0;
} else if (strlen(data) > MD5_HEX_SIZE)
return 0;
if (hexlenl(data, &extra) != MD5_HEX_SIZE || extra)
return 0;
return 1;
}
static void *get_binary(char *ciphertext)
{
static uint32_t out[BINARY_SIZE/4];
char response[MD5_HEX_SIZE + 1];
unsigned int i;
char *p, *data = ciphertext + FORMAT_TAG_LEN;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$');
if (p && (p - data + 1) < sizeof(response))
strnzcpy(response, data, p - data + 1);
else
strnzcpy(response, data, sizeof(response));
for (i = 0; i < BINARY_SIZE; ++i)
((unsigned char*)out)[i] =
(atoi16[ARCH_INDEX(response[i*2])] << 4)
+ atoi16[ARCH_INDEX(response[i*2+1])];
return (void*)out;
}
static void *get_salt(char *ciphertext)
{
char username[64];
char realm[64];
char nonce[64];
char digest_uri[DSIZE];
char cnonce[MD5_HEX_SIZE + 1];
char nc[9];
char qop[9];
char authzid[8];
unsigned char *ptr_src, *ptr_dst, v, i;
char *ccopy = strdup(ciphertext);
char *p, *data = ccopy + FORMAT_TAG_LEN;
MD5_CTX ctx;
char A2[DSIZE];
unsigned char hash[BINARY_SIZE];
unsigned char hex_hash[2*MD5_HEX_SIZE];
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(username, data, sizeof(username));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(realm, data, sizeof(realm));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(nonce, data, sizeof(nonce));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(digest_uri, data, sizeof(digest_uri));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(cnonce, data, sizeof(cnonce));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(nc, data, sizeof(nc));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(qop, data, sizeof(qop));
data = p + 1;
if ((p = strchr(data, '$'))) {
*p = 0;
data = p + 1;
if (*data)
strnzcpy(authzid, data, sizeof(authzid));
else
*authzid = 0;
} else {
*authzid = 0;
}
if (!strcmp(qop, "auth"))
snprintf((char*)A2, sizeof(A2),
"AUTHENTICATE:%s", digest_uri);
else if (!strcmp(qop, "auth-int") || !strcmp(qop, "auth-conf"))
snprintf((char*)A2, sizeof(A2),
"AUTHENTICATE:%s:00000000000000000000000000000000",
digest_uri);
MD5_Init(&ctx);
MD5_Update(&ctx, A2, strlen((char*)A2));
MD5_Final(hash, &ctx);
ptr_src = hash;
ptr_dst = hex_hash;
for (i = 0; i < BINARY_SIZE; ++i) {
v = *ptr_src++;
*ptr_dst++ = itoa16_shr_04[ARCH_INDEX(v)];
*ptr_dst++ = itoa16_and_0f[ARCH_INDEX(v)];
}
*ptr_dst = 0;
snprintf((char*)cs.prehash_KD, sizeof(cs.prehash_KD),
":%s:%s:%s:%s:%s", nonce, nc, cnonce, qop, hex_hash);
cs.prehash_KD_len = strlen((char*)cs.prehash_KD);
if (authzid[0])
snprintf((char*)cs.nonces, sizeof(cs.nonces),
":%s:%s:%s", nonce, cnonce, authzid);
else
snprintf((char*)cs.nonces, sizeof(cs.nonces),
":%s:%s", nonce, cnonce);
cs.nonces_len = strlen((char*)cs.nonces);
snprintf((char*)cs.login_id, sizeof(cs.login_id),
"%s:%s:", username, realm);
cs.login_id_len = strlen((char*)cs.login_id);
MEM_FREE(ccopy);
return (void*)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static void set_key(char *key, int index)
{
strnzcpyn(saved_key[index], key, PLAINTEXT_LENGTH + 1);
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
unsigned char hash[16];
unsigned char hex_hash[MD5_HEX_SIZE];
unsigned char *ptr_src, *ptr_dst;
MD5_CTX ctx;
int i;
MD5_Init(&ctx);
// "username:realm"
MD5_Update(&ctx, cur_salt->login_id, cur_salt->login_id_len);
// "password"
MD5_Update(&ctx, saved_key[index], strlen(saved_key[index]));
MD5_Final(hash, &ctx);
MD5_Init(&ctx);
// previous result
MD5_Update(&ctx, hash, BINARY_SIZE);
// ":nonce:cnonce[:authzid]"
MD5_Update(&ctx, cur_salt->nonces, cur_salt->nonces_len);
MD5_Final(hash, &ctx);
// hexify
ptr_src = hash;
ptr_dst = hex_hash;
for (i = 0; i < BINARY_SIZE; ++i) {
unsigned char v = *ptr_src++;
*ptr_dst++ = itoa16_shr_04[ARCH_INDEX(v)];
*ptr_dst++ = itoa16_and_0f[ARCH_INDEX(v)];
}
MD5_Init(&ctx);
// previous result, in hex
MD5_Update(&ctx, hex_hash, MD5_HEX_SIZE);
// ":nonce:nc:cnonce:qop:hex_A2_hash
MD5_Update(&ctx, cur_salt->prehash_KD, cur_salt->prehash_KD_len);
MD5_Final((unsigned char*)crypt_key[index], &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
#if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1)
int index;
uint32_t b = ((uint32_t*)binary)[0];
for (index = 0; index < count; index++)
if (crypt_key[index][0] == b)
return 1;
return 0;
#else
return ((uint32_t*)binary)[0] == crypt_key[0][0];
#endif
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; }
struct fmt_main fmt_DMD5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
},
{
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
utils.h | // Copyright 2020 D-Wave Systems Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef DIMOD_UTILS_H_
#define DIMOD_UTILS_H_
#include <utility>
#define BLOCK_SIZE 64 // Block size for cache blocking.
#define CACHE_LINE_SIZE 64 // General cache line size in bytes.
namespace dimod {
namespace utils {
template<class V, class B>
bool comp_v(std::pair<V, B> ub, V v) {
return ub.first < v;
}
// The aligned_malloc and aligned_free functions were written with the help of this link:
// https://stackoverflow.com/questions/38088732/explanation-to-aligned-malloc-implementation
// Allocate memory and make sure the returned pointer address
// is a multiple of the given alignment
void* aligned_malloc(size_t required_bytes, size_t alignment = 0) {
if (!alignment) {
alignment = CACHE_LINE_SIZE;
}
void* p1; // original pointer
void** p2; // aligned pointer
int extra_bytes = alignment - 1 + sizeof(void*);
if ((p1 = (void*)malloc(required_bytes + extra_bytes)) == NULL) {
return NULL;
}
p2 = (void**)(alignment * (((size_t)(p1) + extra_bytes) / alignment));
p2[-1] = p1;
return p2;
}
// Corresponding aligned free for the aligned malloc
void aligned_free(void* p) { free(((void**)p)[-1]); }
// Allocate memory and fill it with zeroes but also make sure
// the returned address is a multiple of the given alignment
void* aligned_calloc(size_t num, size_t size, size_t alignment = 0) {
if (!alignment) {
alignment = CACHE_LINE_SIZE;
}
size_t required_bytes = num * size;
void* ptr = aligned_malloc(required_bytes, alignment);
long long int* ptr_ll = (long long int*)ptr;
size_t numfill_by_ll = required_bytes / sizeof(long long int);
#pragma omp parallel for schedule(static)
for (size_t i_ll = 0; i_ll < numfill_by_ll; i_ll++) {
ptr_ll[i_ll] = 0;
}
char* ptr_char = (char*)(ptr_ll + numfill_by_ll);
size_t bytes_left = required_bytes - (numfill_by_ll * sizeof(long long int));
for (size_t i_char = 0; i_char < bytes_left; i_char++) {
ptr_char[i_char] = 0;
}
return ptr;
}
} // namespace utils
} // namespace dimod
#endif // DIMOD_UTILS_H_
|
GB_binop__isne_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_fp64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__isne_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_fp64)
// A*D function (colscale): GB (_AxD__isne_fp64)
// D*A function (rowscale): GB (_DxB__isne_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_fp64)
// C=scalar+B GB (_bind1st__isne_fp64)
// C=scalar+B' GB (_bind1st_tran__isne_fp64)
// C=A+scalar GB (_bind2nd__isne_fp64)
// C=A'+scalar GB (_bind2nd_tran__isne_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_FP64 || GxB_NO_ISNE_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isne_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isne_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isne_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Nonlocal_TV_core.c | /*
* This work is part of the Core Imaging Library developed by
* Visual Analytics and Imaging System Group of the Science Technology
* Facilities Council, STFC and Diamond Light Source Ltd.
*
* Copyright 2017 Daniil Kazantsev
* Copyright 2017 Srikanth Nagella, Edoardo Pasca
* Copyright 2018 Diamond Light Source Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "Nonlocal_TV_core.h"
/* C-OMP implementation of non-local regulariser
* Weights and associated indices must be given as an input.
* Gauss-Seidel fixed point iteration requires ~ 3 iterations, so the main effort
* goes in pre-calculation of weights and selection of patches
*
*
* Input Parameters:
* 1. 2D/3D grayscale image/volume
* 2. AR_i - indeces of i neighbours
* 3. AR_j - indeces of j neighbours
* 4. AR_k - indeces of k neighbours (0 - for 2D case)
* 5. Weights_ij(k) - associated weights
* 6. regularisation parameter
* 7. iterations number
* Output:
* 1. denoised image/volume
* Elmoataz, Abderrahim, Olivier Lezoray, and Sébastien Bougleux. "Nonlocal discrete regularization on weighted graphs: a framework for image and manifold processing." IEEE Trans. Image Processing 17, no. 7 (2008): 1047-1060.
*/
/*****************************************************************************/
float Nonlocal_TV_CPU_main(float *A_orig, float *Output, unsigned short *H_i, unsigned short *H_j, unsigned short *H_k, float *Weights, int dimX, int dimY, int dimZ, int NumNeighb, float lambdaReg, int IterNumb)
{
long i, j, k;
int iter;
lambdaReg = 1.0f/lambdaReg;
/*****2D INPUT *****/
if (dimZ == 0) {
copyIm(A_orig, Output, (long)(dimX), (long)(dimY), 1l);
/* for each pixel store indeces of the most similar neighbours (patches) */
for(iter=0; iter<IterNumb; iter++) {
#pragma omp parallel for shared (A_orig, Output, Weights, H_i, H_j, iter) private(i,j)
for(i=0; i<(long)(dimX); i++) {
for(j=0; j<(long)(dimY); j++) {
/*NLM_H1_2D(Output, A_orig, H_i, H_j, Weights, i, j, (long)(dimX), (long)(dimY), NumNeighb, lambdaReg);*/ /* NLM - H1 penalty */
NLM_TV_2D(Output, A_orig, H_i, H_j, Weights, i, j, (long)(dimX), (long)(dimY), NumNeighb, lambdaReg); /* NLM - TV penalty */
}}
}
}
else {
/*****3D INPUT *****/
copyIm(A_orig, Output, (long)(dimX), (long)(dimY), (long)(dimZ));
/* for each pixel store indeces of the most similar neighbours (patches) */
for(iter=0; iter<IterNumb; iter++) {
#pragma omp parallel for shared (A_orig, Output, Weights, H_i, H_j, H_k, iter) private(i,j,k)
for(i=0; i<(long)(dimX); i++) {
for(j=0; j<(long)(dimY); j++) {
for(k=0; k<(long)(dimZ); k++) {
/* NLM_H1_3D(Output, A_orig, H_i, H_j, H_k, Weights, i, j, k, dimX, dimY, dimZ, NumNeighb, lambdaReg); */ /* NLM - H1 penalty */
NLM_TV_3D(Output, A_orig, H_i, H_j, H_k, Weights, i, j, k, (long)(dimX), (long)(dimY), (long)(dimZ), NumNeighb, lambdaReg); /* NLM - TV penalty */
}}}
}
}
return *Output;
}
/***********<<<<Main Function for NLM - H1 penalty>>>>**********/
float NLM_H1_2D(float *A, float *A_orig, unsigned short *H_i, unsigned short *H_j, float *Weights, long i, long j, long dimX, long dimY, int NumNeighb, float lambdaReg)
{
long x, i1, j1, index, index_m;
float value = 0.0f, normweight = 0.0f;
index_m = j*dimX+i;
for(x=0; x < NumNeighb; x++) {
index = (dimX*dimY*x) + j*dimX+i;
i1 = H_i[index];
j1 = H_j[index];
value += A[j1*dimX+i1]*Weights[index];
normweight += Weights[index];
}
A[index_m] = (lambdaReg*A_orig[index_m] + value)/(lambdaReg + normweight);
return *A;
}
/*3D version*/
float NLM_H1_3D(float *A, float *A_orig, unsigned short *H_i, unsigned short *H_j, unsigned short *H_k, float *Weights, long i, long j, long k, long dimX, long dimY, long dimZ, int NumNeighb, float lambdaReg)
{
long x, i1, j1, k1, index;
float value = 0.0f, normweight = 0.0f;
for(x=0; x < NumNeighb; x++) {
index = dimX*dimY*dimZ*x + (dimX*dimY*k) + j*dimX+i;
i1 = H_i[index];
j1 = H_j[index];
k1 = H_k[index];
value += A[(dimX*dimY*k1) + j1*dimX+i1]*Weights[index];
normweight += Weights[index];
}
A[(dimX*dimY*k) + j*dimX+i] = (lambdaReg*A_orig[(dimX*dimY*k) + j*dimX+i] + value)/(lambdaReg + normweight);
return *A;
}
/***********<<<<Main Function for NLM - TV penalty>>>>**********/
float NLM_TV_2D(float *A, float *A_orig, unsigned short *H_i, unsigned short *H_j, float *Weights, long i, long j, long dimX, long dimY, int NumNeighb, float lambdaReg)
{
long x, i1, j1, index, index_m;
float value = 0.0f, normweight = 0.0f, NLgrad_magn = 0.0f, NLCoeff;
index_m = j*dimX+i;
for(x=0; x < NumNeighb; x++) {
index = (dimX*dimY*x) + j*dimX+i; /*c*/
i1 = H_i[index];
j1 = H_j[index];
NLgrad_magn += powf((A[j1*dimX+i1] - A[index_m]),2)*Weights[index];
}
NLgrad_magn = sqrtf(NLgrad_magn); /*Non Local Gradients Magnitude */
NLCoeff = 2.0f*(1.0f/(NLgrad_magn + EPS));
for(x=0; x < NumNeighb; x++) {
index = (dimX*dimY*x) + j*dimX+i; /*c*/
i1 = H_i[index];
j1 = H_j[index];
value += A[j1*dimX+i1]*NLCoeff*Weights[index];
normweight += Weights[index]*NLCoeff;
}
A[index_m] = (lambdaReg*A_orig[index_m] + value)/(lambdaReg + normweight);
return *A;
}
/*3D version*/
float NLM_TV_3D(float *A, float *A_orig, unsigned short *H_i, unsigned short *H_j, unsigned short *H_k, float *Weights, long i, long j, long k, long dimX, long dimY, long dimZ, int NumNeighb, float lambdaReg)
{
long x, i1, j1, k1, index;
float value = 0.0f, normweight = 0.0f, NLgrad_magn = 0.0f, NLCoeff;
for(x=0; x < NumNeighb; x++) {
index = dimX*dimY*dimZ*x + (dimX*dimY*k) + j*dimX+i;
i1 = H_i[index];
j1 = H_j[index];
k1 = H_k[index];
NLgrad_magn += powf((A[(dimX*dimY*k1) + j1*dimX+i1] - A[(dimX*dimY*k1) + j*dimX+i]),2)*Weights[index];
}
NLgrad_magn = sqrtf(NLgrad_magn); /*Non Local Gradients Magnitude */
NLCoeff = 2.0f*(1.0f/(NLgrad_magn + EPS));
for(x=0; x < NumNeighb; x++) {
index = dimX*dimY*dimZ*x + (dimX*dimY*k) + j*dimX+i;
i1 = H_i[index];
j1 = H_j[index];
k1 = H_k[index];
value += A[(dimX*dimY*k1) + j1*dimX+i1]*NLCoeff*Weights[index];
normweight += Weights[index]*NLCoeff;
}
A[(dimX*dimY*k) + j*dimX+i] = (lambdaReg*A_orig[(dimX*dimY*k) + j*dimX+i] + value)/(lambdaReg + normweight);
return *A;
}
|
GB_binop__bor_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bor_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__bor_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__bor_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__bor_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_int8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bor_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__bor_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_int8)
// C=scalar+B GB (_bind1st__bor_int8)
// C=scalar+B' GB (_bind1st_tran__bor_int8)
// C=A+scalar GB (_bind2nd__bor_int8)
// C=A'+scalar GB (_bind2nd_tran__bor_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) | (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_INT8 || GxB_NO_BOR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bor_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bor_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bor_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bor_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB (_bind1st_tran__bor_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB (_bind2nd_tran__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
util.h | #ifndef _C_UTIL_
#define _C_UTIL_
#include <math.h>
#include <iostream>
//#include <omp.h>
//-------------------------------------------------------------------
//--initialize array with maximum limit
//-------------------------------------------------------------------
template<typename datatype>
void fill(datatype *A, const int n, const datatype maxi){
for (int j = 0; j < n; j++)
{
A[j] = ((datatype) maxi * (rand() / (RAND_MAX + 1.0f)));
}
}
//--print matrix
template<typename datatype>
void print_matrix(datatype *A, int height, int width){
for(int i=0; i<height; i++){
for(int j=0; j<width; j++){
int idx = i*width + j;
std::cout<<A[idx]<<" ";
}
std::cout<<std::endl;
}
return;
}
//-------------------------------------------------------------------
//--verify results
//-------------------------------------------------------------------
#define MAX_RELATIVE_ERROR .002
template<typename datatype>
void verify_array(const datatype *cpuResults, const datatype *gpuResults, const int size){
char passed = true;
//#pragma omp parallel for
for (int i=0; i<size; i++){
if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i] > MAX_RELATIVE_ERROR){
passed = false;
}
}
if (passed){
std::cout << "--cambine:passed:-)" << std::endl;
}
else{
std::cout << "--cambine: failed:-(" << std::endl;
}
return ;
}
template<typename datatype>
void compare_results(const datatype *cpu_results, const datatype *gpu_results, const int size){
char passed = true;
//#pragma omp parallel for
for (int i=0; i<size; i++){
if (cpu_results[i]!=gpu_results[i]){
passed = false;
}
}
if (passed){
std::cout << "--cambine:passed:-)" << std::endl;
}
else{
std::cout << "--cambine: failed:-(" << std::endl;
}
return ;
}
#endif
|
statistical_mechanics.c | /* Generated by Cython 0.29.12 */
/* BEGIN: Cython Metadata
{
"distutils": {
"depends": [],
"extra_compile_args": [
"/openmp"
],
"name": "quantas.utils.physics.statistical_mechanics",
"sources": [
"quantas/utils/physics/statistical_mechanics.pyx"
]
},
"module_name": "quantas.utils.physics.statistical_mechanics"
}
END: Cython Metadata */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
#error Cython requires Python 2.6+ or Python 3.3+.
#else
#define CYTHON_ABI "0_29_12"
#define CYTHON_HEX_VERSION 0x001D0CF0
#define CYTHON_FUTURE_DIVISION 0
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#ifndef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
#endif
#ifndef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#ifdef SIZEOF_VOID_P
enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
#endif
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#else
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#endif
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef METH_STACKLESS
#define METH_STACKLESS 0
#endif
#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
#define PyMem_RawMalloc(n) PyMem_Malloc(n)
#define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
#define PyMem_RawFree(p) PyMem_Free(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
#include "pythread.h"
#define Py_tss_NEEDS_INIT 0
typedef int Py_tss_t;
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
*key = PyThread_create_key();
return 0;
}
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
*key = Py_tss_NEEDS_INIT;
return key;
}
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
PyObject_Free(key);
}
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
return *key != Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
PyThread_delete_key(*key);
*key = Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
return PyThread_set_key_value(*key, value);
}
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
return PyThread_get_key_value(*key);
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
#else
#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#define PyObject_Unicode PyObject_Str
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#else
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ \
__pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \
}
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__quantas__utils__physics__statistical_mechanics
#define __PYX_HAVE_API__quantas__utils__physics__statistical_mechanics
/* Early includes */
#include "math.h"
#include "pythread.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "pystate.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
return (size_t) i < (size_t) limit;
}
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m = NULL;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime = NULL;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
static const char *__pyx_f[] = {
"quantas\\utils\\physics\\statistical_mechanics.pyx",
"stringsource",
};
/* NoFastGil.proto */
#define __Pyx_PyGILState_Ensure PyGILState_Ensure
#define __Pyx_PyGILState_Release PyGILState_Release
#define __Pyx_FastGIL_Remember()
#define __Pyx_FastGIL_Forget()
#define __Pyx_FastGilFuncInit()
/* MemviewSliceStruct.proto */
struct __pyx_memoryview_obj;
typedef struct {
struct __pyx_memoryview_obj *memview;
char *data;
Py_ssize_t shape[8];
Py_ssize_t strides[8];
Py_ssize_t suboffsets[8];
} __Pyx_memviewslice;
#define __Pyx_MemoryView_Len(m) (m.shape[0])
/* Atomics.proto */
#include <pythread.h>
#ifndef CYTHON_ATOMICS
#define CYTHON_ATOMICS 1
#endif
#define __pyx_atomic_int_type int
#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\
(__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\
!defined(__i386__)
#define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
#define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using GNU atomics"
#endif
#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0
#include <Windows.h>
#undef __pyx_atomic_int_type
#define __pyx_atomic_int_type LONG
#define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#pragma message ("Using MSVC atomics")
#endif
#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
#define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using Intel atomics"
#endif
#else
#undef CYTHON_ATOMICS
#define CYTHON_ATOMICS 0
#ifdef __PYX_DEBUG_ATOMICS
#warning "Not using atomics"
#endif
#endif
typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
#if CYTHON_ATOMICS
#define __pyx_add_acquisition_count(memview)\
__pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#else
#define __pyx_add_acquisition_count(memview)\
__pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#endif
/* ForceInitThreads.proto */
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/*--- Type declarations ---*/
struct __pyx_array_obj;
struct __pyx_MemviewEnum_obj;
struct __pyx_memoryview_obj;
struct __pyx_memoryviewslice_obj;
/* "View.MemoryView":105
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_array_obj {
PyObject_HEAD
struct __pyx_vtabstruct_array *__pyx_vtab;
char *data;
Py_ssize_t len;
char *format;
int ndim;
Py_ssize_t *_shape;
Py_ssize_t *_strides;
Py_ssize_t itemsize;
PyObject *mode;
PyObject *_format;
void (*callback_free_data)(void *);
int free_data;
int dtype_is_object;
};
/* "View.MemoryView":279
*
* @cname('__pyx_MemviewEnum')
* cdef class Enum(object): # <<<<<<<<<<<<<<
* cdef object name
* def __init__(self, name):
*/
struct __pyx_MemviewEnum_obj {
PyObject_HEAD
PyObject *name;
};
/* "View.MemoryView":330
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_memoryview_obj {
PyObject_HEAD
struct __pyx_vtabstruct_memoryview *__pyx_vtab;
PyObject *obj;
PyObject *_size;
PyObject *_array_interface;
PyThread_type_lock lock;
__pyx_atomic_int acquisition_count[2];
__pyx_atomic_int *acquisition_count_aligned_p;
Py_buffer view;
int flags;
int dtype_is_object;
__Pyx_TypeInfo *typeinfo;
};
/* "View.MemoryView":961
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_memoryviewslice_obj {
struct __pyx_memoryview_obj __pyx_base;
__Pyx_memviewslice from_slice;
PyObject *from_object;
PyObject *(*to_object_func)(char *);
int (*to_dtype_func)(char *, PyObject *);
};
/* "View.MemoryView":105
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_vtabstruct_array {
PyObject *(*get_memview)(struct __pyx_array_obj *);
};
static struct __pyx_vtabstruct_array *__pyx_vtabptr_array;
/* "View.MemoryView":330
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_vtabstruct_memoryview {
char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *);
PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *);
};
static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview;
/* "View.MemoryView":961
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_vtabstruct__memoryviewslice {
struct __pyx_vtabstruct_memoryview __pyx_base;
};
static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice;
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* PyDictVersioning.proto */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
(version_var) = __PYX_GET_DICT_VERSION(dict);\
(cache_var) = (value);
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
(VAR) = __pyx_dict_cached_value;\
} else {\
(VAR) = __pyx_dict_cached_value = (LOOKUP);\
__pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
}\
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
#else
#define __PYX_GET_DICT_VERSION(dict) (0)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
#endif
/* GetModuleGlobalName.proto */
#if CYTHON_USE_DICT_VERSIONS
#define __Pyx_GetModuleGlobalName(var, name) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
(var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
(likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
__Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
PY_UINT64_T __pyx_dict_version;\
PyObject *__pyx_dict_cached_value;\
(var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
#else
#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
#endif
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* MemviewSliceInit.proto */
#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
#define __Pyx_MEMVIEW_DIRECT 1
#define __Pyx_MEMVIEW_PTR 2
#define __Pyx_MEMVIEW_FULL 4
#define __Pyx_MEMVIEW_CONTIG 8
#define __Pyx_MEMVIEW_STRIDED 16
#define __Pyx_MEMVIEW_FOLLOW 32
#define __Pyx_IS_C_CONTIG 1
#define __Pyx_IS_F_CONTIG 2
static int __Pyx_init_memviewslice(
struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference);
static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p)
#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview))
#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int);
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* None.proto */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* WriteUnraisableException.proto */
static void __Pyx_WriteUnraisable(const char *name, int clineno,
int lineno, const char *filename,
int full_traceback, int nogil);
/* ArgTypeTest.proto */
#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\
__Pyx__ArgTypeTest(obj, type, name, exact))
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#define __Pyx_BUILD_ASSERT_EXPR(cond)\
(sizeof(char [1 - 2*!(cond)]) - 1)
#ifndef Py_MEMBER_SIZE
#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#endif
static size_t __pyx_pyframe_localsplus_offset = 0;
#include "frameobject.h"
#define __Pxy_PyFrame_Initialize_Offsets()\
((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
(void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
#define __Pyx_PyFrame_GetLocalsplus(frame)\
(assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
#endif
/* PyObjectCall2Args.proto */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* IncludeStringH.proto */
#include <string.h>
/* BytesEquals.proto */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);
/* UnicodeEquals.proto */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);
/* StrEquals.proto */
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
#else
#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
#endif
/* None.proto */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t);
/* UnaryNegOverflows.proto */
#define UNARY_NEG_WOULD_OVERFLOW(x)\
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/
/* GetAttr.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
/* ObjectGetItem.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);
#else
#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
#endif
/* decode_c_string_utf16.proto */
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 0;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = -1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
/* decode_c_string.proto */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* GetAttr3.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *);
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* GetTopmostException.proto */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
#endif
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* SwapException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
/* ListCompAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len)) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
Py_SIZE(list) = len+1;
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
#endif
/* PyIntBinop.proto */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check);
#else
#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\
(inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2))
#endif
/* ListExtend.proto */
static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject* none = _PyList_Extend((PyListObject*)L, v);
if (unlikely(!none))
return -1;
Py_DECREF(none);
return 0;
#else
return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v);
#endif
}
/* ListAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
Py_SIZE(list) = len+1;
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
#endif
/* None.proto */
static CYTHON_INLINE long __Pyx_div_long(long, long);
/* ImportFrom.proto */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
/* HasAttr.proto */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *);
/* PyObject_GenericGetAttrNoDict.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
#endif
/* PyObject_GenericGetAttr.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
#endif
/* SetVTable.proto */
static int __Pyx_SetVtable(PyObject *dict, void *vtable);
/* SetupReduce.proto */
static int __Pyx_setup_reduce(PyObject* type_obj);
/* CLineInTraceback.proto */
#ifdef CYTHON_CLINE_IN_TRACEBACK
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#endif
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
/* MemviewSliceIsContig.proto */
static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim);
/* OverlappingSlices.proto */
static int __pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize);
/* Capsule.proto */
static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig);
/* IsLittleEndian.proto */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void);
/* BufferFormatCheck.proto */
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type);
/* TypeInfoCompare.proto */
static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b);
/* MemviewSliceValidateAndInit.proto */
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *, int writable_flag);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_double(PyObject *, int writable_flag);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *, int writable_flag);
/* MemviewSliceCopyTemplate.proto */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* CIntFromPy.proto */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *);
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
/* Module declarations from 'cython.view' */
/* Module declarations from 'cython' */
/* Module declarations from 'quantas.utils.physics.statistical_mechanics' */
static PyTypeObject *__pyx_array_type = 0;
static PyTypeObject *__pyx_MemviewEnum_type = 0;
static PyTypeObject *__pyx_memoryview_type = 0;
static PyTypeObject *__pyx_memoryviewslice_type = 0;
static double __pyx_v_7quantas_5utils_7physics_21statistical_mechanics_H;
static double __pyx_v_7quantas_5utils_7physics_21statistical_mechanics_KB;
static double __pyx_v_7quantas_5utils_7physics_21statistical_mechanics_NA;
static PyObject *generic = 0;
static PyObject *strided = 0;
static PyObject *indirect = 0;
static PyObject *contiguous = 0;
static PyObject *indirect_contiguous = 0;
static int __pyx_memoryview_thread_locks_used;
static PyThread_type_lock __pyx_memoryview_thread_locks[8];
static CYTHON_INLINE double __pyx_f_7quantas_5utils_7physics_21statistical_mechanics_ho_zpe(double, double); /*proto*/
static PyObject *__pyx_f_7quantas_5utils_7physics_21statistical_mechanics_zero_point_energy(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/
static CYTHON_INLINE double __pyx_f_7quantas_5utils_7physics_21statistical_mechanics_ho_eth(double, double); /*proto*/
static PyObject *__pyx_f_7quantas_5utils_7physics_21statistical_mechanics_thermal_energy(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_7quantas_5utils_7physics_21statistical_mechanics_internal_energy(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/
static double __pyx_f_7quantas_5utils_7physics_21statistical_mechanics_ho_S(double, double); /*proto*/
static PyObject *__pyx_f_7quantas_5utils_7physics_21statistical_mechanics_entropy(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/
static CYTHON_INLINE double __pyx_f_7quantas_5utils_7physics_21statistical_mechanics_ho_F(double, double); /*proto*/
static PyObject *__pyx_f_7quantas_5utils_7physics_21statistical_mechanics_vibrational_free_energy(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/
static PyObject *__pyx_f_7quantas_5utils_7physics_21statistical_mechanics_free_energy(__Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/
static CYTHON_INLINE double __pyx_f_7quantas_5utils_7physics_21statistical_mechanics_ho_Cv(double, double); /*proto*/
static PyObject *__pyx_f_7quantas_5utils_7physics_21statistical_mechanics_isochoric_heat_capacity(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/
static void *__pyx_align_pointer(void *, size_t); /*proto*/
static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/
static PyObject *_unellipsify(PyObject *, int); /*proto*/
static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/
static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/
static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/
static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/
static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/
static int __pyx_memoryview_err(PyObject *, char *); /*proto*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/
static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 };
#define __Pyx_MODULE_NAME "quantas.utils.physics.statistical_mechanics"
extern int __pyx_module_is_main_quantas__utils__physics__statistical_mechanics;
int __pyx_module_is_main_quantas__utils__physics__statistical_mechanics = 0;
/* Implementation of 'quantas.utils.physics.statistical_mechanics' */
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_MemoryError;
static PyObject *__pyx_builtin_enumerate;
static PyObject *__pyx_builtin_TypeError;
static PyObject *__pyx_builtin_Ellipsis;
static PyObject *__pyx_builtin_id;
static PyObject *__pyx_builtin_IndexError;
static const char __pyx_k_O[] = "O";
static const char __pyx_k_c[] = "c";
static const char __pyx_k_U0[] = "U0";
static const char __pyx_k_cs[] = "cs";
static const char __pyx_k_id[] = "id";
static const char __pyx_k_np[] = "np";
static const char __pyx_k_Uth[] = "Uth";
static const char __pyx_k_Uzp[] = "Uzp";
static const char __pyx_k__19[] = "*";
static const char __pyx_k_new[] = "__new__";
static const char __pyx_k_obj[] = "obj";
static const char __pyx_k_Fvib[] = "Fvib";
static const char __pyx_k_band[] = "band";
static const char __pyx_k_base[] = "base";
static const char __pyx_k_dict[] = "__dict__";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_mode[] = "mode";
static const char __pyx_k_name[] = "name";
static const char __pyx_k_ndim[] = "ndim";
static const char __pyx_k_pack[] = "pack";
static const char __pyx_k_size[] = "size";
static const char __pyx_k_step[] = "step";
static const char __pyx_k_stop[] = "stop";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_ASCII[] = "ASCII";
static const char __pyx_k_class[] = "__class__";
static const char __pyx_k_dtype[] = "dtype";
static const char __pyx_k_error[] = "error";
static const char __pyx_k_flags[] = "flags";
static const char __pyx_k_numpy[] = "numpy";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_shape[] = "shape";
static const char __pyx_k_start[] = "start";
static const char __pyx_k_zeros[] = "zeros";
static const char __pyx_k_Planck[] = "Planck";
static const char __pyx_k_encode[] = "encode";
static const char __pyx_k_format[] = "format";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_name_2[] = "__name__";
static const char __pyx_k_pickle[] = "pickle";
static const char __pyx_k_reduce[] = "__reduce__";
static const char __pyx_k_struct[] = "struct";
static const char __pyx_k_unpack[] = "unpack";
static const char __pyx_k_update[] = "update";
static const char __pyx_k_float64[] = "float64";
static const char __pyx_k_fortran[] = "fortran";
static const char __pyx_k_memview[] = "memview";
static const char __pyx_k_weights[] = "weights";
static const char __pyx_k_Avogadro[] = "Avogadro";
static const char __pyx_k_Ellipsis[] = "Ellipsis";
static const char __pyx_k_getstate[] = "__getstate__";
static const char __pyx_k_itemsize[] = "itemsize";
static const char __pyx_k_pyx_type[] = "__pyx_type";
static const char __pyx_k_setstate[] = "__setstate__";
static const char __pyx_k_Boltzmann[] = "Boltzmann";
static const char __pyx_k_TypeError[] = "TypeError";
static const char __pyx_k_enumerate[] = "enumerate";
static const char __pyx_k_pyx_state[] = "__pyx_state";
static const char __pyx_k_reduce_ex[] = "__reduce_ex__";
static const char __pyx_k_IndexError[] = "IndexError";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_pyx_result[] = "__pyx_result";
static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
static const char __pyx_k_MemoryError[] = "MemoryError";
static const char __pyx_k_PickleError[] = "PickleError";
static const char __pyx_k_temperature[] = "temperature";
static const char __pyx_k_pyx_checksum[] = "__pyx_checksum";
static const char __pyx_k_stringsource[] = "stringsource";
static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
static const char __pyx_k_reduce_cython[] = "__reduce_cython__";
static const char __pyx_k_View_MemoryView[] = "View.MemoryView";
static const char __pyx_k_allocate_buffer[] = "allocate_buffer";
static const char __pyx_k_dtype_is_object[] = "dtype_is_object";
static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError";
static const char __pyx_k_scipy_constants[] = "scipy.constants";
static const char __pyx_k_setstate_cython[] = "__setstate_cython__";
static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static const char __pyx_k_strided_and_direct[] = "<strided and direct>";
static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>";
static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>";
static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>";
static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>";
static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>";
static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'";
static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d.";
static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array";
static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data.";
static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>";
static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides";
static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory.";
static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview";
static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview";
static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array";
static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))";
static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported";
static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s";
static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)";
static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object";
static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)";
static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__";
static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides.";
static PyObject *__pyx_n_s_ASCII;
static PyObject *__pyx_n_s_Avogadro;
static PyObject *__pyx_n_s_Boltzmann;
static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri;
static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is;
static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor;
static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi;
static PyObject *__pyx_kp_s_Cannot_index_with_type_s;
static PyObject *__pyx_n_s_Ellipsis;
static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr;
static PyObject *__pyx_n_s_Fvib;
static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0;
static PyObject *__pyx_n_s_IndexError;
static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte;
static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr;
static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d;
static PyObject *__pyx_n_s_MemoryError;
static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x;
static PyObject *__pyx_kp_s_MemoryView_of_r_object;
static PyObject *__pyx_n_b_O;
static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a;
static PyObject *__pyx_n_s_PickleError;
static PyObject *__pyx_n_s_Planck;
static PyObject *__pyx_n_s_TypeError;
static PyObject *__pyx_n_s_U0;
static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object;
static PyObject *__pyx_n_s_Uth;
static PyObject *__pyx_n_s_Uzp;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_View_MemoryView;
static PyObject *__pyx_n_s__19;
static PyObject *__pyx_n_s_allocate_buffer;
static PyObject *__pyx_n_s_band;
static PyObject *__pyx_n_s_base;
static PyObject *__pyx_n_s_c;
static PyObject *__pyx_n_u_c;
static PyObject *__pyx_n_s_class;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_kp_s_contiguous_and_direct;
static PyObject *__pyx_kp_s_contiguous_and_indirect;
static PyObject *__pyx_n_s_cs;
static PyObject *__pyx_n_s_dict;
static PyObject *__pyx_n_s_dtype;
static PyObject *__pyx_n_s_dtype_is_object;
static PyObject *__pyx_n_s_encode;
static PyObject *__pyx_n_s_enumerate;
static PyObject *__pyx_n_s_error;
static PyObject *__pyx_n_s_flags;
static PyObject *__pyx_n_s_float64;
static PyObject *__pyx_n_s_format;
static PyObject *__pyx_n_s_fortran;
static PyObject *__pyx_n_u_fortran;
static PyObject *__pyx_n_s_getstate;
static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi;
static PyObject *__pyx_n_s_id;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_itemsize;
static PyObject *__pyx_kp_s_itemsize_0_for_cython_array;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_memview;
static PyObject *__pyx_n_s_mode;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_name_2;
static PyObject *__pyx_n_s_ndim;
static PyObject *__pyx_n_s_new;
static PyObject *__pyx_kp_s_no_default___reduce___due_to_non;
static PyObject *__pyx_n_s_np;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_n_s_obj;
static PyObject *__pyx_n_s_pack;
static PyObject *__pyx_n_s_pickle;
static PyObject *__pyx_n_s_pyx_PickleError;
static PyObject *__pyx_n_s_pyx_checksum;
static PyObject *__pyx_n_s_pyx_getbuffer;
static PyObject *__pyx_n_s_pyx_result;
static PyObject *__pyx_n_s_pyx_state;
static PyObject *__pyx_n_s_pyx_type;
static PyObject *__pyx_n_s_pyx_unpickle_Enum;
static PyObject *__pyx_n_s_pyx_vtable;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_reduce;
static PyObject *__pyx_n_s_reduce_cython;
static PyObject *__pyx_n_s_reduce_ex;
static PyObject *__pyx_n_s_scipy_constants;
static PyObject *__pyx_n_s_setstate;
static PyObject *__pyx_n_s_setstate_cython;
static PyObject *__pyx_n_s_shape;
static PyObject *__pyx_n_s_size;
static PyObject *__pyx_n_s_start;
static PyObject *__pyx_n_s_step;
static PyObject *__pyx_n_s_stop;
static PyObject *__pyx_kp_s_strided_and_direct;
static PyObject *__pyx_kp_s_strided_and_direct_or_indirect;
static PyObject *__pyx_kp_s_strided_and_indirect;
static PyObject *__pyx_kp_s_stringsource;
static PyObject *__pyx_n_s_struct;
static PyObject *__pyx_n_s_temperature;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_kp_s_unable_to_allocate_array_data;
static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str;
static PyObject *__pyx_n_s_unpack;
static PyObject *__pyx_n_s_update;
static PyObject *__pyx_n_s_weights;
static PyObject *__pyx_n_s_zeros;
static PyObject *__pyx_pf_7quantas_5utils_7physics_21statistical_mechanics_zero_point_energy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_temperature, __Pyx_memviewslice __pyx_v_band, __Pyx_memviewslice __pyx_v_weights); /* proto */
static PyObject *__pyx_pf_7quantas_5utils_7physics_21statistical_mechanics_2thermal_energy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_temperature, __Pyx_memviewslice __pyx_v_band, __Pyx_memviewslice __pyx_v_weights); /* proto */
static PyObject *__pyx_pf_7quantas_5utils_7physics_21statistical_mechanics_4internal_energy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_U0, __Pyx_memviewslice __pyx_v_Uzp, __Pyx_memviewslice __pyx_v_Uth); /* proto */
static PyObject *__pyx_pf_7quantas_5utils_7physics_21statistical_mechanics_6entropy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_temperature, __Pyx_memviewslice __pyx_v_band, __Pyx_memviewslice __pyx_v_weights); /* proto */
static PyObject *__pyx_pf_7quantas_5utils_7physics_21statistical_mechanics_8vibrational_free_energy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_temperature, __Pyx_memviewslice __pyx_v_band, __Pyx_memviewslice __pyx_v_weights); /* proto */
static PyObject *__pyx_pf_7quantas_5utils_7physics_21statistical_mechanics_10free_energy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_U0, __Pyx_memviewslice __pyx_v_Fvib); /* proto */
static PyObject *__pyx_pf_7quantas_5utils_7physics_21statistical_mechanics_12isochoric_heat_capacity(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_temperature, __Pyx_memviewslice __pyx_v_band, __Pyx_memviewslice __pyx_v_weights); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_184977713;
static PyObject *__pyx_int_neg_1;
static PyObject *__pyx_tuple_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__3;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_slice__15;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_tuple__12;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_tuple__14;
static PyObject *__pyx_tuple__16;
static PyObject *__pyx_tuple__17;
static PyObject *__pyx_tuple__18;
static PyObject *__pyx_tuple__20;
static PyObject *__pyx_tuple__21;
static PyObject *__pyx_tuple__22;
static PyObject *__pyx_tuple__23;
static PyObject *__pyx_tuple__24;
static PyObject *__pyx_tuple__25;
static PyObject *__pyx_codeobj__26;
/* Late includes */
/* "quantas/utils/physics/statistical_mechanics.pyx":28
*
*
* cdef inline double ho_zpe(double temperature, double omega) nogil: # <<<<<<<<<<<<<<
* """ Zero-point energy of single harmonic oscillator. """
* if omega <= 0.:
*/
static CYTHON_INLINE double __pyx_f_7quantas_5utils_7physics_21statistical_mechanics_ho_zpe(CYTHON_UNUSED double __pyx_v_temperature, double __pyx_v_omega) {
double __pyx_r;
int __pyx_t_1;
/* "quantas/utils/physics/statistical_mechanics.pyx":30
* cdef inline double ho_zpe(double temperature, double omega) nogil:
* """ Zero-point energy of single harmonic oscillator. """
* if omega <= 0.: # <<<<<<<<<<<<<<
* return 0.
* else:
*/
__pyx_t_1 = ((__pyx_v_omega <= 0.) != 0);
if (__pyx_t_1) {
/* "quantas/utils/physics/statistical_mechanics.pyx":31
* """ Zero-point energy of single harmonic oscillator. """
* if omega <= 0.:
* return 0. # <<<<<<<<<<<<<<
* else:
* return pow(10., -3.) * NA * H * (omega * 0.5)
*/
__pyx_r = 0.;
goto __pyx_L0;
/* "quantas/utils/physics/statistical_mechanics.pyx":30
* cdef inline double ho_zpe(double temperature, double omega) nogil:
* """ Zero-point energy of single harmonic oscillator. """
* if omega <= 0.: # <<<<<<<<<<<<<<
* return 0.
* else:
*/
}
/* "quantas/utils/physics/statistical_mechanics.pyx":33
* return 0.
* else:
* return pow(10., -3.) * NA * H * (omega * 0.5) # <<<<<<<<<<<<<<
*
* @cython.boundscheck(False)
*/
/*else*/ {
__pyx_r = (((pow(10., -3.) * __pyx_v_7quantas_5utils_7physics_21statistical_mechanics_NA) * __pyx_v_7quantas_5utils_7physics_21statistical_mechanics_H) * (__pyx_v_omega * 0.5));
goto __pyx_L0;
}
/* "quantas/utils/physics/statistical_mechanics.pyx":28
*
*
* cdef inline double ho_zpe(double temperature, double omega) nogil: # <<<<<<<<<<<<<<
* """ Zero-point energy of single harmonic oscillator. """
* if omega <= 0.:
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "quantas/utils/physics/statistical_mechanics.pyx":37
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cpdef zero_point_energy(double[::1] temperature, double[:, :, ::1] band, double[::1] weights): # <<<<<<<<<<<<<<
* """
* Calculate the zero-point energy for a complete phonon band structure as:
*/
static PyObject *__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_1zero_point_energy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_f_7quantas_5utils_7physics_21statistical_mechanics_zero_point_energy(__Pyx_memviewslice __pyx_v_temperature, __Pyx_memviewslice __pyx_v_band, __Pyx_memviewslice __pyx_v_weights, CYTHON_UNUSED int __pyx_skip_dispatch) {
Py_ssize_t __pyx_v_nt;
Py_ssize_t __pyx_v_nb;
Py_ssize_t __pyx_v_nf;
Py_ssize_t __pyx_v_nv;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_v_k;
int __pyx_v_n;
PyObject *__pyx_v_result = NULL;
__Pyx_memviewslice __pyx_v_result_view = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } };
Py_ssize_t __pyx_t_7;
Py_ssize_t __pyx_t_8;
Py_ssize_t __pyx_t_9;
Py_ssize_t __pyx_t_10;
Py_ssize_t __pyx_t_11;
int __pyx_t_12;
Py_ssize_t __pyx_t_13;
Py_ssize_t __pyx_t_14;
int __pyx_t_15;
Py_ssize_t __pyx_t_16;
Py_ssize_t __pyx_t_17;
int __pyx_t_18;
Py_ssize_t __pyx_t_19;
Py_ssize_t __pyx_t_20;
Py_ssize_t __pyx_t_21;
Py_ssize_t __pyx_t_22;
Py_ssize_t __pyx_t_23;
Py_ssize_t __pyx_t_24;
Py_ssize_t __pyx_t_25;
__Pyx_RefNannySetupContext("zero_point_energy", 0);
/* "quantas/utils/physics/statistical_mechanics.pyx":72
*
* """
* cdef Py_ssize_t nt = temperature.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t nb = band.shape[0]
* cdef Py_ssize_t nf = band.shape[1]
*/
__pyx_v_nt = (__pyx_v_temperature.shape[0]);
/* "quantas/utils/physics/statistical_mechanics.pyx":73
* """
* cdef Py_ssize_t nt = temperature.shape[0]
* cdef Py_ssize_t nb = band.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t nf = band.shape[1]
* cdef Py_ssize_t nv = band.shape[2]
*/
__pyx_v_nb = (__pyx_v_band.shape[0]);
/* "quantas/utils/physics/statistical_mechanics.pyx":74
* cdef Py_ssize_t nt = temperature.shape[0]
* cdef Py_ssize_t nb = band.shape[0]
* cdef Py_ssize_t nf = band.shape[1] # <<<<<<<<<<<<<<
* cdef Py_ssize_t nv = band.shape[2]
* cdef int i, j, k, n
*/
__pyx_v_nf = (__pyx_v_band.shape[1]);
/* "quantas/utils/physics/statistical_mechanics.pyx":75
* cdef Py_ssize_t nb = band.shape[0]
* cdef Py_ssize_t nf = band.shape[1]
* cdef Py_ssize_t nv = band.shape[2] # <<<<<<<<<<<<<<
* cdef int i, j, k, n
*
*/
__pyx_v_nv = (__pyx_v_band.shape[2]);
/* "quantas/utils/physics/statistical_mechanics.pyx":78
* cdef int i, j, k, n
*
* result = np.zeros( (nt,nv), dtype=np.float64 ) # <<<<<<<<<<<<<<
* cdef double[:,::1] result_view = result
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_nt); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_nv); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_v_result = __pyx_t_5;
__pyx_t_5 = 0;
/* "quantas/utils/physics/statistical_mechanics.pyx":79
*
* result = np.zeros( (nt,nv), dtype=np.float64 )
* cdef double[:,::1] result_view = result # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_v_result, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 79, __pyx_L1_error)
__pyx_v_result_view = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "quantas/utils/physics/statistical_mechanics.pyx":82
*
*
* for i in prange(nt, nogil=True): # <<<<<<<<<<<<<<
* for j in range(nb):
* for k in range(nf):
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_7 = __pyx_v_nt;
if (1 == 0) abort();
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_9 = (__pyx_t_7 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_9 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) lastprivate(__pyx_v_k) lastprivate(__pyx_v_n)
#endif /* _OPENMP */
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_9; __pyx_t_8++){
{
__pyx_v_i = (int)(0 + 1 * __pyx_t_8);
/* Initialize private variables to invalid values */
__pyx_v_j = ((int)0xbad0bad0);
__pyx_v_k = ((int)0xbad0bad0);
__pyx_v_n = ((int)0xbad0bad0);
/* "quantas/utils/physics/statistical_mechanics.pyx":83
*
* for i in prange(nt, nogil=True):
* for j in range(nb): # <<<<<<<<<<<<<<
* for k in range(nf):
* for n in range(nv):
*/
__pyx_t_10 = __pyx_v_nb;
__pyx_t_11 = __pyx_t_10;
for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
__pyx_v_j = __pyx_t_12;
/* "quantas/utils/physics/statistical_mechanics.pyx":84
* for i in prange(nt, nogil=True):
* for j in range(nb):
* for k in range(nf): # <<<<<<<<<<<<<<
* for n in range(nv):
* result_view[i, n] += ho_zpe(temperature[i], band[j,k,n]) * \
*/
__pyx_t_13 = __pyx_v_nf;
__pyx_t_14 = __pyx_t_13;
for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) {
__pyx_v_k = __pyx_t_15;
/* "quantas/utils/physics/statistical_mechanics.pyx":85
* for j in range(nb):
* for k in range(nf):
* for n in range(nv): # <<<<<<<<<<<<<<
* result_view[i, n] += ho_zpe(temperature[i], band[j,k,n]) * \
* weights[j]
*/
__pyx_t_16 = __pyx_v_nv;
__pyx_t_17 = __pyx_t_16;
for (__pyx_t_18 = 0; __pyx_t_18 < __pyx_t_17; __pyx_t_18+=1) {
__pyx_v_n = __pyx_t_18;
/* "quantas/utils/physics/statistical_mechanics.pyx":86
* for k in range(nf):
* for n in range(nv):
* result_view[i, n] += ho_zpe(temperature[i], band[j,k,n]) * \ # <<<<<<<<<<<<<<
* weights[j]
*
*/
__pyx_t_19 = __pyx_v_i;
__pyx_t_20 = __pyx_v_j;
__pyx_t_21 = __pyx_v_k;
__pyx_t_22 = __pyx_v_n;
/* "quantas/utils/physics/statistical_mechanics.pyx":87
* for n in range(nv):
* result_view[i, n] += ho_zpe(temperature[i], band[j,k,n]) * \
* weights[j] # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_t_23 = __pyx_v_j;
/* "quantas/utils/physics/statistical_mechanics.pyx":86
* for k in range(nf):
* for n in range(nv):
* result_view[i, n] += ho_zpe(temperature[i], band[j,k,n]) * \ # <<<<<<<<<<<<<<
* weights[j]
*
*/
__pyx_t_24 = __pyx_v_i;
__pyx_t_25 = __pyx_v_n;
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_result_view.data + __pyx_t_24 * __pyx_v_result_view.strides[0]) )) + __pyx_t_25)) )) += (__pyx_f_7quantas_5utils_7physics_21statistical_mechanics_ho_zpe((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_temperature.data) + __pyx_t_19)) ))), (*((double *) ( /* dim=2 */ ((char *) (((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_band.data + __pyx_t_20 * __pyx_v_band.strides[0]) ) + __pyx_t_21 * __pyx_v_band.strides[1]) )) + __pyx_t_22)) )))) * (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_weights.data) + __pyx_t_23)) ))));
}
}
}
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "quantas/utils/physics/statistical_mechanics.pyx":82
*
*
* for i in prange(nt, nogil=True): # <<<<<<<<<<<<<<
* for j in range(nb):
* for k in range(nf):
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "quantas/utils/physics/statistical_mechanics.pyx":89
* weights[j]
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "quantas/utils/physics/statistical_mechanics.pyx":37
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cpdef zero_point_energy(double[::1] temperature, double[:, :, ::1] band, double[::1] weights): # <<<<<<<<<<<<<<
* """
* Calculate the zero-point energy for a complete phonon band structure as:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__PYX_XDEC_MEMVIEW(&__pyx_t_6, 1);
__Pyx_AddTraceback("quantas.utils.physics.statistical_mechanics.zero_point_energy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__PYX_XDEC_MEMVIEW(&__pyx_v_result_view, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_1zero_point_energy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_7quantas_5utils_7physics_21statistical_mechanics_zero_point_energy[] = "\n Calculate the zero-point energy for a complete phonon band structure as:\n\n .. math::\n\n U_{zp}\\big(V \\big) = \\sum_{\\vec{k}} \\sum_{i=0}^{3N}\n \\frac{1}{2} h \\nu_i(\\vec{k})\n\n with :math:`\\vec{k}` the sampled *k*-points, :math:`N` the number of\n atoms in the considered unit cell and :math:`h` the Planck constant.\n\n .. note::\n\n Zero-point energy is independent on temperature.\n\n Parameters\n ----------\n\n temperature: ndarray\n Array of temperature values with `float` type.\n\n band: ndarray(ndim=3)\n Array of phonon band frequencies with `float` type.\n\n weights: ndarray\n Array of weights for each phonon band with `float` type.\n\n Returns\n -------\n\n result: ndarray\n Array of zero-point energy values with `float` type.\n\n ";
static PyObject *__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_1zero_point_energy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_temperature = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_band = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_weights = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("zero_point_energy (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_temperature,&__pyx_n_s_band,&__pyx_n_s_weights,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_temperature)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_band)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("zero_point_energy", 1, 3, 3, 1); __PYX_ERR(0, 37, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_weights)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("zero_point_energy", 1, 3, 3, 2); __PYX_ERR(0, 37, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "zero_point_energy") < 0)) __PYX_ERR(0, 37, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v_temperature = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_temperature.memview)) __PYX_ERR(0, 37, __pyx_L3_error)
__pyx_v_band = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_band.memview)) __PYX_ERR(0, 37, __pyx_L3_error)
__pyx_v_weights = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_weights.memview)) __PYX_ERR(0, 37, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("zero_point_energy", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 37, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("quantas.utils.physics.statistical_mechanics.zero_point_energy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_7quantas_5utils_7physics_21statistical_mechanics_zero_point_energy(__pyx_self, __pyx_v_temperature, __pyx_v_band, __pyx_v_weights);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7quantas_5utils_7physics_21statistical_mechanics_zero_point_energy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_temperature, __Pyx_memviewslice __pyx_v_band, __Pyx_memviewslice __pyx_v_weights) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("zero_point_energy", 0);
__Pyx_XDECREF(__pyx_r);
if (unlikely(!__pyx_v_temperature.memview)) { __Pyx_RaiseUnboundLocalError("temperature"); __PYX_ERR(0, 37, __pyx_L1_error) }
if (unlikely(!__pyx_v_band.memview)) { __Pyx_RaiseUnboundLocalError("band"); __PYX_ERR(0, 37, __pyx_L1_error) }
if (unlikely(!__pyx_v_weights.memview)) { __Pyx_RaiseUnboundLocalError("weights"); __PYX_ERR(0, 37, __pyx_L1_error) }
__pyx_t_1 = __pyx_f_7quantas_5utils_7physics_21statistical_mechanics_zero_point_energy(__pyx_v_temperature, __pyx_v_band, __pyx_v_weights, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 37, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("quantas.utils.physics.statistical_mechanics.zero_point_energy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_temperature, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_band, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_weights, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "quantas/utils/physics/statistical_mechanics.pyx":92
*
*
* cdef inline double ho_eth(double temperature, double omega) nogil: # <<<<<<<<<<<<<<
* """ Thermal internal energy of single harmonic oscillator. """
* cdef double x, n, l
*/
static CYTHON_INLINE double __pyx_f_7quantas_5utils_7physics_21statistical_mechanics_ho_eth(double __pyx_v_temperature, double __pyx_v_omega) {
double __pyx_v_x;
double __pyx_v_n;
double __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
double __pyx_t_3;
/* "quantas/utils/physics/statistical_mechanics.pyx":95
* """ Thermal internal energy of single harmonic oscillator. """
* cdef double x, n, l
* if omega <= 0. or temperature == 0.: # <<<<<<<<<<<<<<
* return 0.
* else:
*/
__pyx_t_2 = ((__pyx_v_omega <= 0.) != 0);
if (!__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_temperature == 0.) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (__pyx_t_1) {
/* "quantas/utils/physics/statistical_mechanics.pyx":96
* cdef double x, n, l
* if omega <= 0. or temperature == 0.:
* return 0. # <<<<<<<<<<<<<<
* else:
* n = H * omega
*/
__pyx_r = 0.;
goto __pyx_L0;
/* "quantas/utils/physics/statistical_mechanics.pyx":95
* """ Thermal internal energy of single harmonic oscillator. """
* cdef double x, n, l
* if omega <= 0. or temperature == 0.: # <<<<<<<<<<<<<<
* return 0.
* else:
*/
}
/* "quantas/utils/physics/statistical_mechanics.pyx":98
* return 0.
* else:
* n = H * omega # <<<<<<<<<<<<<<
* x = n / (KB * temperature)
* return pow(10., -3.) * NA * (n/expm1(x))
*/
/*else*/ {
__pyx_v_n = (__pyx_v_7quantas_5utils_7physics_21statistical_mechanics_H * __pyx_v_omega);
/* "quantas/utils/physics/statistical_mechanics.pyx":99
* else:
* n = H * omega
* x = n / (KB * temperature) # <<<<<<<<<<<<<<
* return pow(10., -3.) * NA * (n/expm1(x))
*
*/
__pyx_t_3 = (__pyx_v_7quantas_5utils_7physics_21statistical_mechanics_KB * __pyx_v_temperature);
if (unlikely(__pyx_t_3 == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 99, __pyx_L1_error)
}
__pyx_v_x = (__pyx_v_n / __pyx_t_3);
/* "quantas/utils/physics/statistical_mechanics.pyx":100
* n = H * omega
* x = n / (KB * temperature)
* return pow(10., -3.) * NA * (n/expm1(x)) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = expm1(__pyx_v_x);
if (unlikely(__pyx_t_3 == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 100, __pyx_L1_error)
}
__pyx_r = ((pow(10., -3.) * __pyx_v_7quantas_5utils_7physics_21statistical_mechanics_NA) * (__pyx_v_n / __pyx_t_3));
goto __pyx_L0;
}
/* "quantas/utils/physics/statistical_mechanics.pyx":92
*
*
* cdef inline double ho_eth(double temperature, double omega) nogil: # <<<<<<<<<<<<<<
* """ Thermal internal energy of single harmonic oscillator. """
* cdef double x, n, l
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_WriteUnraisable("quantas.utils.physics.statistical_mechanics.ho_eth", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1);
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "quantas/utils/physics/statistical_mechanics.pyx":105
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cpdef thermal_energy(double[::1] temperature, double[:, :, ::1] band, double[::1] weights): # <<<<<<<<<<<<<<
* """
* Calculate the thermal internal energy for a complete phonon band
*/
static PyObject *__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_3thermal_energy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_f_7quantas_5utils_7physics_21statistical_mechanics_thermal_energy(__Pyx_memviewslice __pyx_v_temperature, __Pyx_memviewslice __pyx_v_band, __Pyx_memviewslice __pyx_v_weights, CYTHON_UNUSED int __pyx_skip_dispatch) {
Py_ssize_t __pyx_v_nt;
Py_ssize_t __pyx_v_nb;
Py_ssize_t __pyx_v_nf;
Py_ssize_t __pyx_v_nv;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_v_k;
int __pyx_v_n;
PyObject *__pyx_v_result = NULL;
__Pyx_memviewslice __pyx_v_result_view = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } };
Py_ssize_t __pyx_t_7;
Py_ssize_t __pyx_t_8;
Py_ssize_t __pyx_t_9;
Py_ssize_t __pyx_t_10;
Py_ssize_t __pyx_t_11;
int __pyx_t_12;
Py_ssize_t __pyx_t_13;
Py_ssize_t __pyx_t_14;
int __pyx_t_15;
Py_ssize_t __pyx_t_16;
Py_ssize_t __pyx_t_17;
int __pyx_t_18;
Py_ssize_t __pyx_t_19;
Py_ssize_t __pyx_t_20;
Py_ssize_t __pyx_t_21;
Py_ssize_t __pyx_t_22;
Py_ssize_t __pyx_t_23;
Py_ssize_t __pyx_t_24;
Py_ssize_t __pyx_t_25;
__Pyx_RefNannySetupContext("thermal_energy", 0);
/* "quantas/utils/physics/statistical_mechanics.pyx":139
*
* """
* cdef Py_ssize_t nt = temperature.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t nb = band.shape[0]
* cdef Py_ssize_t nf = band.shape[1]
*/
__pyx_v_nt = (__pyx_v_temperature.shape[0]);
/* "quantas/utils/physics/statistical_mechanics.pyx":140
* """
* cdef Py_ssize_t nt = temperature.shape[0]
* cdef Py_ssize_t nb = band.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t nf = band.shape[1]
* cdef Py_ssize_t nv = band.shape[2]
*/
__pyx_v_nb = (__pyx_v_band.shape[0]);
/* "quantas/utils/physics/statistical_mechanics.pyx":141
* cdef Py_ssize_t nt = temperature.shape[0]
* cdef Py_ssize_t nb = band.shape[0]
* cdef Py_ssize_t nf = band.shape[1] # <<<<<<<<<<<<<<
* cdef Py_ssize_t nv = band.shape[2]
* cdef int i, j, k, n
*/
__pyx_v_nf = (__pyx_v_band.shape[1]);
/* "quantas/utils/physics/statistical_mechanics.pyx":142
* cdef Py_ssize_t nb = band.shape[0]
* cdef Py_ssize_t nf = band.shape[1]
* cdef Py_ssize_t nv = band.shape[2] # <<<<<<<<<<<<<<
* cdef int i, j, k, n
*
*/
__pyx_v_nv = (__pyx_v_band.shape[2]);
/* "quantas/utils/physics/statistical_mechanics.pyx":145
* cdef int i, j, k, n
*
* result = np.zeros( (nt,nv), dtype=np.float64 ) # <<<<<<<<<<<<<<
* cdef double[:,::1] result_view = result
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 145, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 145, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_nt); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 145, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_nv); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 145, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 145, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 145, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 145, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 145, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 145, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 145, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 145, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_v_result = __pyx_t_5;
__pyx_t_5 = 0;
/* "quantas/utils/physics/statistical_mechanics.pyx":146
*
* result = np.zeros( (nt,nv), dtype=np.float64 )
* cdef double[:,::1] result_view = result # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_v_result, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 146, __pyx_L1_error)
__pyx_v_result_view = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "quantas/utils/physics/statistical_mechanics.pyx":149
*
*
* for i in prange(nt, nogil=True): # <<<<<<<<<<<<<<
* for j in range(nb):
* for k in range(nf):
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_7 = __pyx_v_nt;
if (1 == 0) abort();
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_9 = (__pyx_t_7 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_9 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) lastprivate(__pyx_v_k) lastprivate(__pyx_v_n)
#endif /* _OPENMP */
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_9; __pyx_t_8++){
{
__pyx_v_i = (int)(0 + 1 * __pyx_t_8);
/* Initialize private variables to invalid values */
__pyx_v_j = ((int)0xbad0bad0);
__pyx_v_k = ((int)0xbad0bad0);
__pyx_v_n = ((int)0xbad0bad0);
/* "quantas/utils/physics/statistical_mechanics.pyx":150
*
* for i in prange(nt, nogil=True):
* for j in range(nb): # <<<<<<<<<<<<<<
* for k in range(nf):
* for n in range(nv):
*/
__pyx_t_10 = __pyx_v_nb;
__pyx_t_11 = __pyx_t_10;
for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
__pyx_v_j = __pyx_t_12;
/* "quantas/utils/physics/statistical_mechanics.pyx":151
* for i in prange(nt, nogil=True):
* for j in range(nb):
* for k in range(nf): # <<<<<<<<<<<<<<
* for n in range(nv):
* result_view[i, n] += ho_eth(temperature[i], band[j,k,n]) * \
*/
__pyx_t_13 = __pyx_v_nf;
__pyx_t_14 = __pyx_t_13;
for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) {
__pyx_v_k = __pyx_t_15;
/* "quantas/utils/physics/statistical_mechanics.pyx":152
* for j in range(nb):
* for k in range(nf):
* for n in range(nv): # <<<<<<<<<<<<<<
* result_view[i, n] += ho_eth(temperature[i], band[j,k,n]) * \
* weights[j]
*/
__pyx_t_16 = __pyx_v_nv;
__pyx_t_17 = __pyx_t_16;
for (__pyx_t_18 = 0; __pyx_t_18 < __pyx_t_17; __pyx_t_18+=1) {
__pyx_v_n = __pyx_t_18;
/* "quantas/utils/physics/statistical_mechanics.pyx":153
* for k in range(nf):
* for n in range(nv):
* result_view[i, n] += ho_eth(temperature[i], band[j,k,n]) * \ # <<<<<<<<<<<<<<
* weights[j]
*
*/
__pyx_t_19 = __pyx_v_i;
__pyx_t_20 = __pyx_v_j;
__pyx_t_21 = __pyx_v_k;
__pyx_t_22 = __pyx_v_n;
/* "quantas/utils/physics/statistical_mechanics.pyx":154
* for n in range(nv):
* result_view[i, n] += ho_eth(temperature[i], band[j,k,n]) * \
* weights[j] # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_t_23 = __pyx_v_j;
/* "quantas/utils/physics/statistical_mechanics.pyx":153
* for k in range(nf):
* for n in range(nv):
* result_view[i, n] += ho_eth(temperature[i], band[j,k,n]) * \ # <<<<<<<<<<<<<<
* weights[j]
*
*/
__pyx_t_24 = __pyx_v_i;
__pyx_t_25 = __pyx_v_n;
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_result_view.data + __pyx_t_24 * __pyx_v_result_view.strides[0]) )) + __pyx_t_25)) )) += (__pyx_f_7quantas_5utils_7physics_21statistical_mechanics_ho_eth((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_temperature.data) + __pyx_t_19)) ))), (*((double *) ( /* dim=2 */ ((char *) (((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_band.data + __pyx_t_20 * __pyx_v_band.strides[0]) ) + __pyx_t_21 * __pyx_v_band.strides[1]) )) + __pyx_t_22)) )))) * (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_weights.data) + __pyx_t_23)) ))));
}
}
}
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "quantas/utils/physics/statistical_mechanics.pyx":149
*
*
* for i in prange(nt, nogil=True): # <<<<<<<<<<<<<<
* for j in range(nb):
* for k in range(nf):
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "quantas/utils/physics/statistical_mechanics.pyx":156
* weights[j]
*
* return result # <<<<<<<<<<<<<<
*
* @cython.boundscheck(False)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "quantas/utils/physics/statistical_mechanics.pyx":105
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cpdef thermal_energy(double[::1] temperature, double[:, :, ::1] band, double[::1] weights): # <<<<<<<<<<<<<<
* """
* Calculate the thermal internal energy for a complete phonon band
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__PYX_XDEC_MEMVIEW(&__pyx_t_6, 1);
__Pyx_AddTraceback("quantas.utils.physics.statistical_mechanics.thermal_energy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__PYX_XDEC_MEMVIEW(&__pyx_v_result_view, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_3thermal_energy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_7quantas_5utils_7physics_21statistical_mechanics_2thermal_energy[] = "\n Calculate the thermal internal energy for a complete phonon band\n structure as:\n\n .. math::\n\n U_{th}\\big(T, V \\big) = \\sum_{\\vec{k}} \\sum_{i=0}^{3N}\n \\frac{\\nu_i(\\vec{k})}\n {e^{\\frac{h \\nu_i(\\vec{k})}{k_B T}} -1}\n\n with :math:`\\vec{k}` the sampled *k*-points, :math:`N` the number of\n atoms in the considered unit cell, :math:`h` the Planck constant and\n :math:`k_B` the Boltzmann constant.\n\n Parameters\n ----------\n\n temperature: ndarray\n Array of temperature values with `float` type.\n\n band: ndarray(ndim=3)\n Array of phonon band frequencies with `float` type.\n\n weights: ndarray\n Array of weights for each phonon band with `float` type.\n\n Returns\n -------\n\n result: ndarray\n Array of thermal internal energy values with `float` type.\n\n ";
static PyObject *__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_3thermal_energy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_temperature = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_band = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_weights = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("thermal_energy (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_temperature,&__pyx_n_s_band,&__pyx_n_s_weights,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_temperature)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_band)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("thermal_energy", 1, 3, 3, 1); __PYX_ERR(0, 105, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_weights)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("thermal_energy", 1, 3, 3, 2); __PYX_ERR(0, 105, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "thermal_energy") < 0)) __PYX_ERR(0, 105, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v_temperature = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_temperature.memview)) __PYX_ERR(0, 105, __pyx_L3_error)
__pyx_v_band = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_band.memview)) __PYX_ERR(0, 105, __pyx_L3_error)
__pyx_v_weights = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_weights.memview)) __PYX_ERR(0, 105, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("thermal_energy", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 105, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("quantas.utils.physics.statistical_mechanics.thermal_energy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_7quantas_5utils_7physics_21statistical_mechanics_2thermal_energy(__pyx_self, __pyx_v_temperature, __pyx_v_band, __pyx_v_weights);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7quantas_5utils_7physics_21statistical_mechanics_2thermal_energy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_temperature, __Pyx_memviewslice __pyx_v_band, __Pyx_memviewslice __pyx_v_weights) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("thermal_energy", 0);
__Pyx_XDECREF(__pyx_r);
if (unlikely(!__pyx_v_temperature.memview)) { __Pyx_RaiseUnboundLocalError("temperature"); __PYX_ERR(0, 105, __pyx_L1_error) }
if (unlikely(!__pyx_v_band.memview)) { __Pyx_RaiseUnboundLocalError("band"); __PYX_ERR(0, 105, __pyx_L1_error) }
if (unlikely(!__pyx_v_weights.memview)) { __Pyx_RaiseUnboundLocalError("weights"); __PYX_ERR(0, 105, __pyx_L1_error) }
__pyx_t_1 = __pyx_f_7quantas_5utils_7physics_21statistical_mechanics_thermal_energy(__pyx_v_temperature, __pyx_v_band, __pyx_v_weights, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 105, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("quantas.utils.physics.statistical_mechanics.thermal_energy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_temperature, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_band, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_weights, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "quantas/utils/physics/statistical_mechanics.pyx":160
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cpdef internal_energy(double[::1] U0, double[::1] Uzp, double[:,::1] Uth): # <<<<<<<<<<<<<<
* """
* Calculate the total internal energy of the system as:
*/
static PyObject *__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_5internal_energy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_f_7quantas_5utils_7physics_21statistical_mechanics_internal_energy(__Pyx_memviewslice __pyx_v_U0, __Pyx_memviewslice __pyx_v_Uzp, __Pyx_memviewslice __pyx_v_Uth, CYTHON_UNUSED int __pyx_skip_dispatch) {
Py_ssize_t __pyx_v_nt;
Py_ssize_t __pyx_v_nv;
int __pyx_v_i;
int __pyx_v_j;
__Pyx_memviewslice __pyx_v_U0_v = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_Uzp_v = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_Uth_v = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_v_result = NULL;
__Pyx_memviewslice __pyx_v_result_view = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } };
Py_ssize_t __pyx_t_7;
Py_ssize_t __pyx_t_8;
Py_ssize_t __pyx_t_9;
Py_ssize_t __pyx_t_10;
Py_ssize_t __pyx_t_11;
int __pyx_t_12;
Py_ssize_t __pyx_t_13;
Py_ssize_t __pyx_t_14;
Py_ssize_t __pyx_t_15;
Py_ssize_t __pyx_t_16;
Py_ssize_t __pyx_t_17;
Py_ssize_t __pyx_t_18;
__Pyx_RefNannySetupContext("internal_energy", 0);
/* "quantas/utils/physics/statistical_mechanics.pyx":176
*
* """
* cdef Py_ssize_t nt = Uth.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t nv = U0.shape[0]
* cdef int i, j
*/
__pyx_v_nt = (__pyx_v_Uth.shape[0]);
/* "quantas/utils/physics/statistical_mechanics.pyx":177
* """
* cdef Py_ssize_t nt = Uth.shape[0]
* cdef Py_ssize_t nv = U0.shape[0] # <<<<<<<<<<<<<<
* cdef int i, j
*
*/
__pyx_v_nv = (__pyx_v_U0.shape[0]);
/* "quantas/utils/physics/statistical_mechanics.pyx":180
* cdef int i, j
*
* cdef double[::1] U0_v = U0 # <<<<<<<<<<<<<<
* cdef double[::1] Uzp_v = Uzp
* cdef double[:,::1] Uth_v = Uth
*/
__PYX_INC_MEMVIEW(&__pyx_v_U0, 0);
__pyx_v_U0_v = __pyx_v_U0;
/* "quantas/utils/physics/statistical_mechanics.pyx":181
*
* cdef double[::1] U0_v = U0
* cdef double[::1] Uzp_v = Uzp # <<<<<<<<<<<<<<
* cdef double[:,::1] Uth_v = Uth
*
*/
__PYX_INC_MEMVIEW(&__pyx_v_Uzp, 0);
__pyx_v_Uzp_v = __pyx_v_Uzp;
/* "quantas/utils/physics/statistical_mechanics.pyx":182
* cdef double[::1] U0_v = U0
* cdef double[::1] Uzp_v = Uzp
* cdef double[:,::1] Uth_v = Uth # <<<<<<<<<<<<<<
*
* result = np.zeros( (nt,nv), dtype=np.float64 )
*/
__PYX_INC_MEMVIEW(&__pyx_v_Uth, 0);
__pyx_v_Uth_v = __pyx_v_Uth;
/* "quantas/utils/physics/statistical_mechanics.pyx":184
* cdef double[:,::1] Uth_v = Uth
*
* result = np.zeros( (nt,nv), dtype=np.float64 ) # <<<<<<<<<<<<<<
* cdef double[:,::1] result_view = result
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 184, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 184, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_nt); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 184, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_nv); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 184, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 184, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 184, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 184, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 184, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 184, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 184, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 184, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_v_result = __pyx_t_5;
__pyx_t_5 = 0;
/* "quantas/utils/physics/statistical_mechanics.pyx":185
*
* result = np.zeros( (nt,nv), dtype=np.float64 )
* cdef double[:,::1] result_view = result # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_v_result, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 185, __pyx_L1_error)
__pyx_v_result_view = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "quantas/utils/physics/statistical_mechanics.pyx":188
*
*
* for i in prange(nt, nogil=True): # <<<<<<<<<<<<<<
* for j in range(nv):
* result_view[i, j] += U0_v[j] + Uzp_v[j] + Uth_v[i,j]
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_7 = __pyx_v_nt;
if (1 == 0) abort();
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_9 = (__pyx_t_7 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_9 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j)
#endif /* _OPENMP */
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_9; __pyx_t_8++){
{
__pyx_v_i = (int)(0 + 1 * __pyx_t_8);
/* Initialize private variables to invalid values */
__pyx_v_j = ((int)0xbad0bad0);
/* "quantas/utils/physics/statistical_mechanics.pyx":189
*
* for i in prange(nt, nogil=True):
* for j in range(nv): # <<<<<<<<<<<<<<
* result_view[i, j] += U0_v[j] + Uzp_v[j] + Uth_v[i,j]
*
*/
__pyx_t_10 = __pyx_v_nv;
__pyx_t_11 = __pyx_t_10;
for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
__pyx_v_j = __pyx_t_12;
/* "quantas/utils/physics/statistical_mechanics.pyx":190
* for i in prange(nt, nogil=True):
* for j in range(nv):
* result_view[i, j] += U0_v[j] + Uzp_v[j] + Uth_v[i,j] # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_t_13 = __pyx_v_j;
__pyx_t_14 = __pyx_v_j;
__pyx_t_15 = __pyx_v_i;
__pyx_t_16 = __pyx_v_j;
__pyx_t_17 = __pyx_v_i;
__pyx_t_18 = __pyx_v_j;
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_result_view.data + __pyx_t_17 * __pyx_v_result_view.strides[0]) )) + __pyx_t_18)) )) += (((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_U0_v.data) + __pyx_t_13)) ))) + (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_Uzp_v.data) + __pyx_t_14)) )))) + (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_Uth_v.data + __pyx_t_15 * __pyx_v_Uth_v.strides[0]) )) + __pyx_t_16)) ))));
}
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "quantas/utils/physics/statistical_mechanics.pyx":188
*
*
* for i in prange(nt, nogil=True): # <<<<<<<<<<<<<<
* for j in range(nv):
* result_view[i, j] += U0_v[j] + Uzp_v[j] + Uth_v[i,j]
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "quantas/utils/physics/statistical_mechanics.pyx":192
* result_view[i, j] += U0_v[j] + Uzp_v[j] + Uth_v[i,j]
*
* return result # <<<<<<<<<<<<<<
*
* cdef double ho_S(double temperature, double omega) nogil:
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "quantas/utils/physics/statistical_mechanics.pyx":160
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cpdef internal_energy(double[::1] U0, double[::1] Uzp, double[:,::1] Uth): # <<<<<<<<<<<<<<
* """
* Calculate the total internal energy of the system as:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__PYX_XDEC_MEMVIEW(&__pyx_t_6, 1);
__Pyx_AddTraceback("quantas.utils.physics.statistical_mechanics.internal_energy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_U0_v, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_Uzp_v, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_Uth_v, 1);
__Pyx_XDECREF(__pyx_v_result);
__PYX_XDEC_MEMVIEW(&__pyx_v_result_view, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_5internal_energy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_7quantas_5utils_7physics_21statistical_mechanics_4internal_energy[] = "\n Calculate the total internal energy of the system as:\n\n .. math::\n\n U\\big(T, V \\big) = U_{zp}\\big(V \\big) + U_{th}\\big(T, V \\big) +\n U_0\\big(V \\big)\n\n Returns\n -------\n\n result: ndarray\n Array of total internal energy values with `float` type.\n\n ";
static PyObject *__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_5internal_energy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_U0 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_Uzp = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_Uth = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("internal_energy (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_U0,&__pyx_n_s_Uzp,&__pyx_n_s_Uth,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_U0)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_Uzp)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("internal_energy", 1, 3, 3, 1); __PYX_ERR(0, 160, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_Uth)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("internal_energy", 1, 3, 3, 2); __PYX_ERR(0, 160, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "internal_energy") < 0)) __PYX_ERR(0, 160, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v_U0 = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_U0.memview)) __PYX_ERR(0, 160, __pyx_L3_error)
__pyx_v_Uzp = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_Uzp.memview)) __PYX_ERR(0, 160, __pyx_L3_error)
__pyx_v_Uth = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_Uth.memview)) __PYX_ERR(0, 160, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("internal_energy", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 160, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("quantas.utils.physics.statistical_mechanics.internal_energy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_7quantas_5utils_7physics_21statistical_mechanics_4internal_energy(__pyx_self, __pyx_v_U0, __pyx_v_Uzp, __pyx_v_Uth);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7quantas_5utils_7physics_21statistical_mechanics_4internal_energy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_U0, __Pyx_memviewslice __pyx_v_Uzp, __Pyx_memviewslice __pyx_v_Uth) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("internal_energy", 0);
__Pyx_XDECREF(__pyx_r);
if (unlikely(!__pyx_v_U0.memview)) { __Pyx_RaiseUnboundLocalError("U0"); __PYX_ERR(0, 160, __pyx_L1_error) }
if (unlikely(!__pyx_v_Uzp.memview)) { __Pyx_RaiseUnboundLocalError("Uzp"); __PYX_ERR(0, 160, __pyx_L1_error) }
if (unlikely(!__pyx_v_Uth.memview)) { __Pyx_RaiseUnboundLocalError("Uth"); __PYX_ERR(0, 160, __pyx_L1_error) }
__pyx_t_1 = __pyx_f_7quantas_5utils_7physics_21statistical_mechanics_internal_energy(__pyx_v_U0, __pyx_v_Uzp, __pyx_v_Uth, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("quantas.utils.physics.statistical_mechanics.internal_energy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_U0, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_Uzp, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_Uth, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "quantas/utils/physics/statistical_mechanics.pyx":194
* return result
*
* cdef double ho_S(double temperature, double omega) nogil: # <<<<<<<<<<<<<<
* """ Entropy of single harmonic oscillator. """
* cdef double x, n, l
*/
static double __pyx_f_7quantas_5utils_7physics_21statistical_mechanics_ho_S(double __pyx_v_temperature, double __pyx_v_omega) {
double __pyx_v_x;
double __pyx_v_n;
double __pyx_v_l;
double __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
double __pyx_t_3;
double __pyx_t_4;
/* "quantas/utils/physics/statistical_mechanics.pyx":197
* """ Entropy of single harmonic oscillator. """
* cdef double x, n, l
* if omega <= 0. or temperature == 0.: # <<<<<<<<<<<<<<
* return 0.
* else:
*/
__pyx_t_2 = ((__pyx_v_omega <= 0.) != 0);
if (!__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_temperature == 0.) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (__pyx_t_1) {
/* "quantas/utils/physics/statistical_mechanics.pyx":198
* cdef double x, n, l
* if omega <= 0. or temperature == 0.:
* return 0. # <<<<<<<<<<<<<<
* else:
* x = H * omega / (KB * temperature)
*/
__pyx_r = 0.;
goto __pyx_L0;
/* "quantas/utils/physics/statistical_mechanics.pyx":197
* """ Entropy of single harmonic oscillator. """
* cdef double x, n, l
* if omega <= 0. or temperature == 0.: # <<<<<<<<<<<<<<
* return 0.
* else:
*/
}
/* "quantas/utils/physics/statistical_mechanics.pyx":200
* return 0.
* else:
* x = H * omega / (KB * temperature) # <<<<<<<<<<<<<<
* n = 1./expm1(x)
* l = log(1. - exp(-x))
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_7quantas_5utils_7physics_21statistical_mechanics_H * __pyx_v_omega);
__pyx_t_4 = (__pyx_v_7quantas_5utils_7physics_21statistical_mechanics_KB * __pyx_v_temperature);
if (unlikely(__pyx_t_4 == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 200, __pyx_L1_error)
}
__pyx_v_x = (__pyx_t_3 / __pyx_t_4);
/* "quantas/utils/physics/statistical_mechanics.pyx":201
* else:
* x = H * omega / (KB * temperature)
* n = 1./expm1(x) # <<<<<<<<<<<<<<
* l = log(1. - exp(-x))
* return NA * KB * (n * x - l)
*/
__pyx_t_4 = expm1(__pyx_v_x);
if (unlikely(__pyx_t_4 == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 201, __pyx_L1_error)
}
__pyx_v_n = (1. / __pyx_t_4);
/* "quantas/utils/physics/statistical_mechanics.pyx":202
* x = H * omega / (KB * temperature)
* n = 1./expm1(x)
* l = log(1. - exp(-x)) # <<<<<<<<<<<<<<
* return NA * KB * (n * x - l)
*
*/
__pyx_v_l = log((1. - exp((-__pyx_v_x))));
/* "quantas/utils/physics/statistical_mechanics.pyx":203
* n = 1./expm1(x)
* l = log(1. - exp(-x))
* return NA * KB * (n * x - l) # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((__pyx_v_7quantas_5utils_7physics_21statistical_mechanics_NA * __pyx_v_7quantas_5utils_7physics_21statistical_mechanics_KB) * ((__pyx_v_n * __pyx_v_x) - __pyx_v_l));
goto __pyx_L0;
}
/* "quantas/utils/physics/statistical_mechanics.pyx":194
* return result
*
* cdef double ho_S(double temperature, double omega) nogil: # <<<<<<<<<<<<<<
* """ Entropy of single harmonic oscillator. """
* cdef double x, n, l
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_WriteUnraisable("quantas.utils.physics.statistical_mechanics.ho_S", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1);
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "quantas/utils/physics/statistical_mechanics.pyx":208
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cpdef entropy(double[::1] temperature, double[:, :, ::1] band, double[::1] weights): # <<<<<<<<<<<<<<
* """
* Calculate the entropy for a complete phonon band structure according to:
*/
static PyObject *__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_7entropy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_f_7quantas_5utils_7physics_21statistical_mechanics_entropy(__Pyx_memviewslice __pyx_v_temperature, __Pyx_memviewslice __pyx_v_band, __Pyx_memviewslice __pyx_v_weights, CYTHON_UNUSED int __pyx_skip_dispatch) {
Py_ssize_t __pyx_v_nt;
Py_ssize_t __pyx_v_nb;
Py_ssize_t __pyx_v_nf;
Py_ssize_t __pyx_v_nv;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_v_k;
int __pyx_v_n;
PyObject *__pyx_v_result = NULL;
__Pyx_memviewslice __pyx_v_result_view = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } };
Py_ssize_t __pyx_t_7;
Py_ssize_t __pyx_t_8;
Py_ssize_t __pyx_t_9;
Py_ssize_t __pyx_t_10;
Py_ssize_t __pyx_t_11;
int __pyx_t_12;
Py_ssize_t __pyx_t_13;
Py_ssize_t __pyx_t_14;
int __pyx_t_15;
Py_ssize_t __pyx_t_16;
Py_ssize_t __pyx_t_17;
int __pyx_t_18;
Py_ssize_t __pyx_t_19;
Py_ssize_t __pyx_t_20;
Py_ssize_t __pyx_t_21;
Py_ssize_t __pyx_t_22;
Py_ssize_t __pyx_t_23;
Py_ssize_t __pyx_t_24;
Py_ssize_t __pyx_t_25;
__Pyx_RefNannySetupContext("entropy", 0);
/* "quantas/utils/physics/statistical_mechanics.pyx":243
*
* """
* cdef Py_ssize_t nt = temperature.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t nb = band.shape[0]
* cdef Py_ssize_t nf = band.shape[1]
*/
__pyx_v_nt = (__pyx_v_temperature.shape[0]);
/* "quantas/utils/physics/statistical_mechanics.pyx":244
* """
* cdef Py_ssize_t nt = temperature.shape[0]
* cdef Py_ssize_t nb = band.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t nf = band.shape[1]
* cdef Py_ssize_t nv = band.shape[2]
*/
__pyx_v_nb = (__pyx_v_band.shape[0]);
/* "quantas/utils/physics/statistical_mechanics.pyx":245
* cdef Py_ssize_t nt = temperature.shape[0]
* cdef Py_ssize_t nb = band.shape[0]
* cdef Py_ssize_t nf = band.shape[1] # <<<<<<<<<<<<<<
* cdef Py_ssize_t nv = band.shape[2]
* cdef int i, j, k, n
*/
__pyx_v_nf = (__pyx_v_band.shape[1]);
/* "quantas/utils/physics/statistical_mechanics.pyx":246
* cdef Py_ssize_t nb = band.shape[0]
* cdef Py_ssize_t nf = band.shape[1]
* cdef Py_ssize_t nv = band.shape[2] # <<<<<<<<<<<<<<
* cdef int i, j, k, n
*
*/
__pyx_v_nv = (__pyx_v_band.shape[2]);
/* "quantas/utils/physics/statistical_mechanics.pyx":249
* cdef int i, j, k, n
*
* result = np.zeros( (nt,nv), dtype=np.float64 ) # <<<<<<<<<<<<<<
* cdef double[:,::1] result_view = result
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_nt); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_nv); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 249, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_v_result = __pyx_t_5;
__pyx_t_5 = 0;
/* "quantas/utils/physics/statistical_mechanics.pyx":250
*
* result = np.zeros( (nt,nv), dtype=np.float64 )
* cdef double[:,::1] result_view = result # <<<<<<<<<<<<<<
*
* for i in prange(nt, nogil=True):
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_v_result, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 250, __pyx_L1_error)
__pyx_v_result_view = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "quantas/utils/physics/statistical_mechanics.pyx":252
* cdef double[:,::1] result_view = result
*
* for i in prange(nt, nogil=True): # <<<<<<<<<<<<<<
* for j in range(nb):
* for k in range(nf):
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_7 = __pyx_v_nt;
if (1 == 0) abort();
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_9 = (__pyx_t_7 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_9 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) lastprivate(__pyx_v_k) lastprivate(__pyx_v_n)
#endif /* _OPENMP */
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_9; __pyx_t_8++){
{
__pyx_v_i = (int)(0 + 1 * __pyx_t_8);
/* Initialize private variables to invalid values */
__pyx_v_j = ((int)0xbad0bad0);
__pyx_v_k = ((int)0xbad0bad0);
__pyx_v_n = ((int)0xbad0bad0);
/* "quantas/utils/physics/statistical_mechanics.pyx":253
*
* for i in prange(nt, nogil=True):
* for j in range(nb): # <<<<<<<<<<<<<<
* for k in range(nf):
* for n in range(nv):
*/
__pyx_t_10 = __pyx_v_nb;
__pyx_t_11 = __pyx_t_10;
for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
__pyx_v_j = __pyx_t_12;
/* "quantas/utils/physics/statistical_mechanics.pyx":254
* for i in prange(nt, nogil=True):
* for j in range(nb):
* for k in range(nf): # <<<<<<<<<<<<<<
* for n in range(nv):
* result_view[i, n] += ho_S(temperature[i], band[j,k,n]) * \
*/
__pyx_t_13 = __pyx_v_nf;
__pyx_t_14 = __pyx_t_13;
for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) {
__pyx_v_k = __pyx_t_15;
/* "quantas/utils/physics/statistical_mechanics.pyx":255
* for j in range(nb):
* for k in range(nf):
* for n in range(nv): # <<<<<<<<<<<<<<
* result_view[i, n] += ho_S(temperature[i], band[j,k,n]) * \
* weights[j]
*/
__pyx_t_16 = __pyx_v_nv;
__pyx_t_17 = __pyx_t_16;
for (__pyx_t_18 = 0; __pyx_t_18 < __pyx_t_17; __pyx_t_18+=1) {
__pyx_v_n = __pyx_t_18;
/* "quantas/utils/physics/statistical_mechanics.pyx":256
* for k in range(nf):
* for n in range(nv):
* result_view[i, n] += ho_S(temperature[i], band[j,k,n]) * \ # <<<<<<<<<<<<<<
* weights[j]
*
*/
__pyx_t_19 = __pyx_v_i;
__pyx_t_20 = __pyx_v_j;
__pyx_t_21 = __pyx_v_k;
__pyx_t_22 = __pyx_v_n;
/* "quantas/utils/physics/statistical_mechanics.pyx":257
* for n in range(nv):
* result_view[i, n] += ho_S(temperature[i], band[j,k,n]) * \
* weights[j] # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_t_23 = __pyx_v_j;
/* "quantas/utils/physics/statistical_mechanics.pyx":256
* for k in range(nf):
* for n in range(nv):
* result_view[i, n] += ho_S(temperature[i], band[j,k,n]) * \ # <<<<<<<<<<<<<<
* weights[j]
*
*/
__pyx_t_24 = __pyx_v_i;
__pyx_t_25 = __pyx_v_n;
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_result_view.data + __pyx_t_24 * __pyx_v_result_view.strides[0]) )) + __pyx_t_25)) )) += (__pyx_f_7quantas_5utils_7physics_21statistical_mechanics_ho_S((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_temperature.data) + __pyx_t_19)) ))), (*((double *) ( /* dim=2 */ ((char *) (((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_band.data + __pyx_t_20 * __pyx_v_band.strides[0]) ) + __pyx_t_21 * __pyx_v_band.strides[1]) )) + __pyx_t_22)) )))) * (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_weights.data) + __pyx_t_23)) ))));
}
}
}
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "quantas/utils/physics/statistical_mechanics.pyx":252
* cdef double[:,::1] result_view = result
*
* for i in prange(nt, nogil=True): # <<<<<<<<<<<<<<
* for j in range(nb):
* for k in range(nf):
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "quantas/utils/physics/statistical_mechanics.pyx":259
* weights[j]
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "quantas/utils/physics/statistical_mechanics.pyx":208
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cpdef entropy(double[::1] temperature, double[:, :, ::1] band, double[::1] weights): # <<<<<<<<<<<<<<
* """
* Calculate the entropy for a complete phonon band structure according to:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__PYX_XDEC_MEMVIEW(&__pyx_t_6, 1);
__Pyx_AddTraceback("quantas.utils.physics.statistical_mechanics.entropy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__PYX_XDEC_MEMVIEW(&__pyx_v_result_view, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_7entropy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_7quantas_5utils_7physics_21statistical_mechanics_6entropy[] = "\n Calculate the entropy for a complete phonon band structure according to:\n\n .. math::\n\n S\\big(T, V \\big) = N_A k_B \\sum_{\\vec{k}} \\sum_{i=0}^{3N}{\n \\Bigg[\n \\frac{1}{k_B T} \\frac{h \\nu_i(\\vec{k})}\n {e^{\\frac{h \\nu_i(\\vec{k})}{k_B T}} - 1} -\n ln \\big(1 - e^{- \\frac{h \\nu_i(\\vec{k})}{k_B T}} \\big)\n \\Bigg]}\n\n with :math:`\\vec{k}` the sampled *k*-points, :math:`N` the number of\n atoms in the considered unit cell, :math:`h` the Planck constant and\n :math:`k_B` the Boltzmann constant.\n\n Parameters\n ----------\n\n temperature: ndarray\n Array of temperature values with `float` type.\n\n band: ndarray(ndim=3)\n Array of phonon band frequencies with `float` type.\n\n weights: ndarray\n Array of weights for each phonon band with `float` type.\n\n Returns\n -------\n result: ndarray(ndim=2)\n 2D matrix containing the entropy values (in J/mol).\n\n ";
static PyObject *__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_7entropy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_temperature = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_band = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_weights = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("entropy (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_temperature,&__pyx_n_s_band,&__pyx_n_s_weights,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_temperature)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_band)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("entropy", 1, 3, 3, 1); __PYX_ERR(0, 208, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_weights)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("entropy", 1, 3, 3, 2); __PYX_ERR(0, 208, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "entropy") < 0)) __PYX_ERR(0, 208, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v_temperature = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_temperature.memview)) __PYX_ERR(0, 208, __pyx_L3_error)
__pyx_v_band = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_band.memview)) __PYX_ERR(0, 208, __pyx_L3_error)
__pyx_v_weights = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_weights.memview)) __PYX_ERR(0, 208, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("entropy", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 208, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("quantas.utils.physics.statistical_mechanics.entropy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_7quantas_5utils_7physics_21statistical_mechanics_6entropy(__pyx_self, __pyx_v_temperature, __pyx_v_band, __pyx_v_weights);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7quantas_5utils_7physics_21statistical_mechanics_6entropy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_temperature, __Pyx_memviewslice __pyx_v_band, __Pyx_memviewslice __pyx_v_weights) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("entropy", 0);
__Pyx_XDECREF(__pyx_r);
if (unlikely(!__pyx_v_temperature.memview)) { __Pyx_RaiseUnboundLocalError("temperature"); __PYX_ERR(0, 208, __pyx_L1_error) }
if (unlikely(!__pyx_v_band.memview)) { __Pyx_RaiseUnboundLocalError("band"); __PYX_ERR(0, 208, __pyx_L1_error) }
if (unlikely(!__pyx_v_weights.memview)) { __Pyx_RaiseUnboundLocalError("weights"); __PYX_ERR(0, 208, __pyx_L1_error) }
__pyx_t_1 = __pyx_f_7quantas_5utils_7physics_21statistical_mechanics_entropy(__pyx_v_temperature, __pyx_v_band, __pyx_v_weights, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 208, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("quantas.utils.physics.statistical_mechanics.entropy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_temperature, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_band, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_weights, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "quantas/utils/physics/statistical_mechanics.pyx":262
*
*
* cdef inline double ho_F(double temperature, double omega) nogil: # <<<<<<<<<<<<<<
* """ Vibrational free energy of single harmonic oscillator. """
* cdef double x, hw, kt
*/
static CYTHON_INLINE double __pyx_f_7quantas_5utils_7physics_21statistical_mechanics_ho_F(double __pyx_v_temperature, double __pyx_v_omega) {
double __pyx_v_x;
double __pyx_v_hw;
double __pyx_v_kt;
double __pyx_r;
int __pyx_t_1;
double __pyx_t_2;
double __pyx_t_3;
/* "quantas/utils/physics/statistical_mechanics.pyx":265
* """ Vibrational free energy of single harmonic oscillator. """
* cdef double x, hw, kt
* if omega <= 0.: # <<<<<<<<<<<<<<
* return 0.
*
*/
__pyx_t_1 = ((__pyx_v_omega <= 0.) != 0);
if (__pyx_t_1) {
/* "quantas/utils/physics/statistical_mechanics.pyx":266
* cdef double x, hw, kt
* if omega <= 0.:
* return 0. # <<<<<<<<<<<<<<
*
* hw = H * omega
*/
__pyx_r = 0.;
goto __pyx_L0;
/* "quantas/utils/physics/statistical_mechanics.pyx":265
* """ Vibrational free energy of single harmonic oscillator. """
* cdef double x, hw, kt
* if omega <= 0.: # <<<<<<<<<<<<<<
* return 0.
*
*/
}
/* "quantas/utils/physics/statistical_mechanics.pyx":268
* return 0.
*
* hw = H * omega # <<<<<<<<<<<<<<
* if temperature == 0.:
* return pow(10.,-3) * NA * (0.5 * hw)
*/
__pyx_v_hw = (__pyx_v_7quantas_5utils_7physics_21statistical_mechanics_H * __pyx_v_omega);
/* "quantas/utils/physics/statistical_mechanics.pyx":269
*
* hw = H * omega
* if temperature == 0.: # <<<<<<<<<<<<<<
* return pow(10.,-3) * NA * (0.5 * hw)
* else:
*/
__pyx_t_1 = ((__pyx_v_temperature == 0.) != 0);
if (__pyx_t_1) {
/* "quantas/utils/physics/statistical_mechanics.pyx":270
* hw = H * omega
* if temperature == 0.:
* return pow(10.,-3) * NA * (0.5 * hw) # <<<<<<<<<<<<<<
* else:
* x = H * omega / (KB * temperature)
*/
__pyx_r = ((pow(10., -3.0) * __pyx_v_7quantas_5utils_7physics_21statistical_mechanics_NA) * (0.5 * __pyx_v_hw));
goto __pyx_L0;
/* "quantas/utils/physics/statistical_mechanics.pyx":269
*
* hw = H * omega
* if temperature == 0.: # <<<<<<<<<<<<<<
* return pow(10.,-3) * NA * (0.5 * hw)
* else:
*/
}
/* "quantas/utils/physics/statistical_mechanics.pyx":272
* return pow(10.,-3) * NA * (0.5 * hw)
* else:
* x = H * omega / (KB * temperature) # <<<<<<<<<<<<<<
* kt = KB * temperature
* return pow(10.,-3) * NA * (0.5 * hw + kt * log(1.-exp(-x)))
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_7quantas_5utils_7physics_21statistical_mechanics_H * __pyx_v_omega);
__pyx_t_3 = (__pyx_v_7quantas_5utils_7physics_21statistical_mechanics_KB * __pyx_v_temperature);
if (unlikely(__pyx_t_3 == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 272, __pyx_L1_error)
}
__pyx_v_x = (__pyx_t_2 / __pyx_t_3);
/* "quantas/utils/physics/statistical_mechanics.pyx":273
* else:
* x = H * omega / (KB * temperature)
* kt = KB * temperature # <<<<<<<<<<<<<<
* return pow(10.,-3) * NA * (0.5 * hw + kt * log(1.-exp(-x)))
*
*/
__pyx_v_kt = (__pyx_v_7quantas_5utils_7physics_21statistical_mechanics_KB * __pyx_v_temperature);
/* "quantas/utils/physics/statistical_mechanics.pyx":274
* x = H * omega / (KB * temperature)
* kt = KB * temperature
* return pow(10.,-3) * NA * (0.5 * hw + kt * log(1.-exp(-x))) # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((pow(10., -3.0) * __pyx_v_7quantas_5utils_7physics_21statistical_mechanics_NA) * ((0.5 * __pyx_v_hw) + (__pyx_v_kt * log((1. - exp((-__pyx_v_x)))))));
goto __pyx_L0;
}
/* "quantas/utils/physics/statistical_mechanics.pyx":262
*
*
* cdef inline double ho_F(double temperature, double omega) nogil: # <<<<<<<<<<<<<<
* """ Vibrational free energy of single harmonic oscillator. """
* cdef double x, hw, kt
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_WriteUnraisable("quantas.utils.physics.statistical_mechanics.ho_F", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1);
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "quantas/utils/physics/statistical_mechanics.pyx":279
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cpdef vibrational_free_energy(double[::1] temperature, double[:, :, ::1] band, double[::1] weights): # <<<<<<<<<<<<<<
* """
* Calculate the vibrational free energy for a complete phonon band
*/
static PyObject *__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_9vibrational_free_energy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_f_7quantas_5utils_7physics_21statistical_mechanics_vibrational_free_energy(__Pyx_memviewslice __pyx_v_temperature, __Pyx_memviewslice __pyx_v_band, __Pyx_memviewslice __pyx_v_weights, CYTHON_UNUSED int __pyx_skip_dispatch) {
Py_ssize_t __pyx_v_nt;
Py_ssize_t __pyx_v_nb;
Py_ssize_t __pyx_v_nf;
Py_ssize_t __pyx_v_nv;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_v_k;
int __pyx_v_n;
PyObject *__pyx_v_result = NULL;
__Pyx_memviewslice __pyx_v_result_view = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } };
Py_ssize_t __pyx_t_7;
Py_ssize_t __pyx_t_8;
Py_ssize_t __pyx_t_9;
Py_ssize_t __pyx_t_10;
Py_ssize_t __pyx_t_11;
int __pyx_t_12;
Py_ssize_t __pyx_t_13;
Py_ssize_t __pyx_t_14;
int __pyx_t_15;
Py_ssize_t __pyx_t_16;
Py_ssize_t __pyx_t_17;
int __pyx_t_18;
Py_ssize_t __pyx_t_19;
Py_ssize_t __pyx_t_20;
Py_ssize_t __pyx_t_21;
Py_ssize_t __pyx_t_22;
Py_ssize_t __pyx_t_23;
Py_ssize_t __pyx_t_24;
Py_ssize_t __pyx_t_25;
__Pyx_RefNannySetupContext("vibrational_free_energy", 0);
/* "quantas/utils/physics/statistical_mechanics.pyx":313
*
* """
* cdef Py_ssize_t nt = temperature.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t nb = band.shape[0]
* cdef Py_ssize_t nf = band.shape[1]
*/
__pyx_v_nt = (__pyx_v_temperature.shape[0]);
/* "quantas/utils/physics/statistical_mechanics.pyx":314
* """
* cdef Py_ssize_t nt = temperature.shape[0]
* cdef Py_ssize_t nb = band.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t nf = band.shape[1]
* cdef Py_ssize_t nv = band.shape[2]
*/
__pyx_v_nb = (__pyx_v_band.shape[0]);
/* "quantas/utils/physics/statistical_mechanics.pyx":315
* cdef Py_ssize_t nt = temperature.shape[0]
* cdef Py_ssize_t nb = band.shape[0]
* cdef Py_ssize_t nf = band.shape[1] # <<<<<<<<<<<<<<
* cdef Py_ssize_t nv = band.shape[2]
* cdef int i, j, k, n
*/
__pyx_v_nf = (__pyx_v_band.shape[1]);
/* "quantas/utils/physics/statistical_mechanics.pyx":316
* cdef Py_ssize_t nb = band.shape[0]
* cdef Py_ssize_t nf = band.shape[1]
* cdef Py_ssize_t nv = band.shape[2] # <<<<<<<<<<<<<<
* cdef int i, j, k, n
*
*/
__pyx_v_nv = (__pyx_v_band.shape[2]);
/* "quantas/utils/physics/statistical_mechanics.pyx":319
* cdef int i, j, k, n
*
* result = np.zeros( (nt,nv), dtype=np.float64 ) # <<<<<<<<<<<<<<
* cdef double[:,::1] result_view = result
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 319, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 319, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_nt); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 319, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_nv); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 319, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 319, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 319, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 319, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 319, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 319, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 319, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 319, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_v_result = __pyx_t_5;
__pyx_t_5 = 0;
/* "quantas/utils/physics/statistical_mechanics.pyx":320
*
* result = np.zeros( (nt,nv), dtype=np.float64 )
* cdef double[:,::1] result_view = result # <<<<<<<<<<<<<<
*
* for i in prange(nt, nogil=True):
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_v_result, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 320, __pyx_L1_error)
__pyx_v_result_view = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "quantas/utils/physics/statistical_mechanics.pyx":322
* cdef double[:,::1] result_view = result
*
* for i in prange(nt, nogil=True): # <<<<<<<<<<<<<<
* for j in range(nb):
* for k in range(nf):
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_7 = __pyx_v_nt;
if (1 == 0) abort();
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_9 = (__pyx_t_7 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_9 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) lastprivate(__pyx_v_k) lastprivate(__pyx_v_n)
#endif /* _OPENMP */
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_9; __pyx_t_8++){
{
__pyx_v_i = (int)(0 + 1 * __pyx_t_8);
/* Initialize private variables to invalid values */
__pyx_v_j = ((int)0xbad0bad0);
__pyx_v_k = ((int)0xbad0bad0);
__pyx_v_n = ((int)0xbad0bad0);
/* "quantas/utils/physics/statistical_mechanics.pyx":323
*
* for i in prange(nt, nogil=True):
* for j in range(nb): # <<<<<<<<<<<<<<
* for k in range(nf):
* for n in range(nv):
*/
__pyx_t_10 = __pyx_v_nb;
__pyx_t_11 = __pyx_t_10;
for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
__pyx_v_j = __pyx_t_12;
/* "quantas/utils/physics/statistical_mechanics.pyx":324
* for i in prange(nt, nogil=True):
* for j in range(nb):
* for k in range(nf): # <<<<<<<<<<<<<<
* for n in range(nv):
* result_view[i, n] += ho_F(temperature[i], band[j,k,n]) * \
*/
__pyx_t_13 = __pyx_v_nf;
__pyx_t_14 = __pyx_t_13;
for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) {
__pyx_v_k = __pyx_t_15;
/* "quantas/utils/physics/statistical_mechanics.pyx":325
* for j in range(nb):
* for k in range(nf):
* for n in range(nv): # <<<<<<<<<<<<<<
* result_view[i, n] += ho_F(temperature[i], band[j,k,n]) * \
* weights[j]
*/
__pyx_t_16 = __pyx_v_nv;
__pyx_t_17 = __pyx_t_16;
for (__pyx_t_18 = 0; __pyx_t_18 < __pyx_t_17; __pyx_t_18+=1) {
__pyx_v_n = __pyx_t_18;
/* "quantas/utils/physics/statistical_mechanics.pyx":326
* for k in range(nf):
* for n in range(nv):
* result_view[i, n] += ho_F(temperature[i], band[j,k,n]) * \ # <<<<<<<<<<<<<<
* weights[j]
*
*/
__pyx_t_19 = __pyx_v_i;
__pyx_t_20 = __pyx_v_j;
__pyx_t_21 = __pyx_v_k;
__pyx_t_22 = __pyx_v_n;
/* "quantas/utils/physics/statistical_mechanics.pyx":327
* for n in range(nv):
* result_view[i, n] += ho_F(temperature[i], band[j,k,n]) * \
* weights[j] # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_t_23 = __pyx_v_j;
/* "quantas/utils/physics/statistical_mechanics.pyx":326
* for k in range(nf):
* for n in range(nv):
* result_view[i, n] += ho_F(temperature[i], band[j,k,n]) * \ # <<<<<<<<<<<<<<
* weights[j]
*
*/
__pyx_t_24 = __pyx_v_i;
__pyx_t_25 = __pyx_v_n;
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_result_view.data + __pyx_t_24 * __pyx_v_result_view.strides[0]) )) + __pyx_t_25)) )) += (__pyx_f_7quantas_5utils_7physics_21statistical_mechanics_ho_F((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_temperature.data) + __pyx_t_19)) ))), (*((double *) ( /* dim=2 */ ((char *) (((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_band.data + __pyx_t_20 * __pyx_v_band.strides[0]) ) + __pyx_t_21 * __pyx_v_band.strides[1]) )) + __pyx_t_22)) )))) * (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_weights.data) + __pyx_t_23)) ))));
}
}
}
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "quantas/utils/physics/statistical_mechanics.pyx":322
* cdef double[:,::1] result_view = result
*
* for i in prange(nt, nogil=True): # <<<<<<<<<<<<<<
* for j in range(nb):
* for k in range(nf):
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "quantas/utils/physics/statistical_mechanics.pyx":329
* weights[j]
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "quantas/utils/physics/statistical_mechanics.pyx":279
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cpdef vibrational_free_energy(double[::1] temperature, double[:, :, ::1] band, double[::1] weights): # <<<<<<<<<<<<<<
* """
* Calculate the vibrational free energy for a complete phonon band
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__PYX_XDEC_MEMVIEW(&__pyx_t_6, 1);
__Pyx_AddTraceback("quantas.utils.physics.statistical_mechanics.vibrational_free_energy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__PYX_XDEC_MEMVIEW(&__pyx_v_result_view, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_9vibrational_free_energy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_7quantas_5utils_7physics_21statistical_mechanics_8vibrational_free_energy[] = "\n Calculate the vibrational free energy for a complete phonon band\n structure as:\n\n .. math::\n\n F_{vib}^{QHA}\\big(T, V \\big) = k_B T \\sum_{\\vec{k}}\n \\sum_{i=0}^{3N}\n \\Bigg[ ln \\big(1 - e^{\\frac{h \\nu_i(\\vec{k})}{k_B T}} \\big) \\Bigg]\n\n with :math:`\\vec{k}` the sampled *k*-points, :math:`N` the number of\n atoms in the considered unit cell, :math:`h` the Planck constant and\n :math:`k_B` the Boltzmann constant.\n\n Parameters\n ----------\n\n temperature: ndarray\n Array of temperature values with `float` type.\n\n band: ndarray(ndim=3)\n Array of phonon band frequencies with `float` type.\n\n weights: ndarray\n Array of weights for each phonon band with `float` type.\n\n Returns\n -------\n\n result: ndarray\n Array of vibrational free energy values with `float` type.\n\n ";
static PyObject *__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_9vibrational_free_energy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_temperature = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_band = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_weights = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("vibrational_free_energy (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_temperature,&__pyx_n_s_band,&__pyx_n_s_weights,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_temperature)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_band)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("vibrational_free_energy", 1, 3, 3, 1); __PYX_ERR(0, 279, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_weights)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("vibrational_free_energy", 1, 3, 3, 2); __PYX_ERR(0, 279, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "vibrational_free_energy") < 0)) __PYX_ERR(0, 279, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v_temperature = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_temperature.memview)) __PYX_ERR(0, 279, __pyx_L3_error)
__pyx_v_band = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_band.memview)) __PYX_ERR(0, 279, __pyx_L3_error)
__pyx_v_weights = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_weights.memview)) __PYX_ERR(0, 279, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("vibrational_free_energy", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 279, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("quantas.utils.physics.statistical_mechanics.vibrational_free_energy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_7quantas_5utils_7physics_21statistical_mechanics_8vibrational_free_energy(__pyx_self, __pyx_v_temperature, __pyx_v_band, __pyx_v_weights);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7quantas_5utils_7physics_21statistical_mechanics_8vibrational_free_energy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_temperature, __Pyx_memviewslice __pyx_v_band, __Pyx_memviewslice __pyx_v_weights) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("vibrational_free_energy", 0);
__Pyx_XDECREF(__pyx_r);
if (unlikely(!__pyx_v_temperature.memview)) { __Pyx_RaiseUnboundLocalError("temperature"); __PYX_ERR(0, 279, __pyx_L1_error) }
if (unlikely(!__pyx_v_band.memview)) { __Pyx_RaiseUnboundLocalError("band"); __PYX_ERR(0, 279, __pyx_L1_error) }
if (unlikely(!__pyx_v_weights.memview)) { __Pyx_RaiseUnboundLocalError("weights"); __PYX_ERR(0, 279, __pyx_L1_error) }
__pyx_t_1 = __pyx_f_7quantas_5utils_7physics_21statistical_mechanics_vibrational_free_energy(__pyx_v_temperature, __pyx_v_band, __pyx_v_weights, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 279, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("quantas.utils.physics.statistical_mechanics.vibrational_free_energy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_temperature, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_band, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_weights, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "quantas/utils/physics/statistical_mechanics.pyx":334
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cpdef free_energy(double[::1] U0, double[:,::1] Fvib): # <<<<<<<<<<<<<<
* cdef Py_ssize_t nt = Fvib.shape[0]
* cdef Py_ssize_t nv = U0.shape[0]
*/
static PyObject *__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_11free_energy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_f_7quantas_5utils_7physics_21statistical_mechanics_free_energy(__Pyx_memviewslice __pyx_v_U0, __Pyx_memviewslice __pyx_v_Fvib, CYTHON_UNUSED int __pyx_skip_dispatch) {
Py_ssize_t __pyx_v_nt;
Py_ssize_t __pyx_v_nv;
int __pyx_v_i;
int __pyx_v_j;
__Pyx_memviewslice __pyx_v_U0_v = { 0, 0, { 0 }, { 0 }, { 0 } };
CYTHON_UNUSED __Pyx_memviewslice __pyx_v_Fvib_v = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_v_result = NULL;
__Pyx_memviewslice __pyx_v_result_view = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } };
Py_ssize_t __pyx_t_7;
Py_ssize_t __pyx_t_8;
Py_ssize_t __pyx_t_9;
Py_ssize_t __pyx_t_10;
Py_ssize_t __pyx_t_11;
int __pyx_t_12;
Py_ssize_t __pyx_t_13;
Py_ssize_t __pyx_t_14;
Py_ssize_t __pyx_t_15;
Py_ssize_t __pyx_t_16;
Py_ssize_t __pyx_t_17;
__Pyx_RefNannySetupContext("free_energy", 0);
/* "quantas/utils/physics/statistical_mechanics.pyx":335
* @cython.wraparound(False)
* cpdef free_energy(double[::1] U0, double[:,::1] Fvib):
* cdef Py_ssize_t nt = Fvib.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t nv = U0.shape[0]
* cdef int i, j
*/
__pyx_v_nt = (__pyx_v_Fvib.shape[0]);
/* "quantas/utils/physics/statistical_mechanics.pyx":336
* cpdef free_energy(double[::1] U0, double[:,::1] Fvib):
* cdef Py_ssize_t nt = Fvib.shape[0]
* cdef Py_ssize_t nv = U0.shape[0] # <<<<<<<<<<<<<<
* cdef int i, j
*
*/
__pyx_v_nv = (__pyx_v_U0.shape[0]);
/* "quantas/utils/physics/statistical_mechanics.pyx":339
* cdef int i, j
*
* cdef double[::1] U0_v = U0 # <<<<<<<<<<<<<<
* cdef double[:,::1] Fvib_v = Fvib
*
*/
__PYX_INC_MEMVIEW(&__pyx_v_U0, 0);
__pyx_v_U0_v = __pyx_v_U0;
/* "quantas/utils/physics/statistical_mechanics.pyx":340
*
* cdef double[::1] U0_v = U0
* cdef double[:,::1] Fvib_v = Fvib # <<<<<<<<<<<<<<
*
* result = np.zeros( (nt,nv), dtype=np.float64 )
*/
__PYX_INC_MEMVIEW(&__pyx_v_Fvib, 0);
__pyx_v_Fvib_v = __pyx_v_Fvib;
/* "quantas/utils/physics/statistical_mechanics.pyx":342
* cdef double[:,::1] Fvib_v = Fvib
*
* result = np.zeros( (nt,nv), dtype=np.float64 ) # <<<<<<<<<<<<<<
* cdef double[:,::1] result_view = result
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 342, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 342, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_nt); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 342, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_nv); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 342, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 342, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 342, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 342, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 342, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 342, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 342, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 342, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_v_result = __pyx_t_5;
__pyx_t_5 = 0;
/* "quantas/utils/physics/statistical_mechanics.pyx":343
*
* result = np.zeros( (nt,nv), dtype=np.float64 )
* cdef double[:,::1] result_view = result # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_v_result, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 343, __pyx_L1_error)
__pyx_v_result_view = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "quantas/utils/physics/statistical_mechanics.pyx":346
*
*
* for i in prange(nt, nogil=True): # <<<<<<<<<<<<<<
* for j in range(nv):
* result_view[i, j] += U0_v[j] + Fvib[i,j]
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_7 = __pyx_v_nt;
if (1 == 0) abort();
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_9 = (__pyx_t_7 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_9 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j)
#endif /* _OPENMP */
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_9; __pyx_t_8++){
{
__pyx_v_i = (int)(0 + 1 * __pyx_t_8);
/* Initialize private variables to invalid values */
__pyx_v_j = ((int)0xbad0bad0);
/* "quantas/utils/physics/statistical_mechanics.pyx":347
*
* for i in prange(nt, nogil=True):
* for j in range(nv): # <<<<<<<<<<<<<<
* result_view[i, j] += U0_v[j] + Fvib[i,j]
*
*/
__pyx_t_10 = __pyx_v_nv;
__pyx_t_11 = __pyx_t_10;
for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
__pyx_v_j = __pyx_t_12;
/* "quantas/utils/physics/statistical_mechanics.pyx":348
* for i in prange(nt, nogil=True):
* for j in range(nv):
* result_view[i, j] += U0_v[j] + Fvib[i,j] # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_t_13 = __pyx_v_j;
__pyx_t_14 = __pyx_v_i;
__pyx_t_15 = __pyx_v_j;
__pyx_t_16 = __pyx_v_i;
__pyx_t_17 = __pyx_v_j;
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_result_view.data + __pyx_t_16 * __pyx_v_result_view.strides[0]) )) + __pyx_t_17)) )) += ((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_U0_v.data) + __pyx_t_13)) ))) + (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_Fvib.data + __pyx_t_14 * __pyx_v_Fvib.strides[0]) )) + __pyx_t_15)) ))));
}
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "quantas/utils/physics/statistical_mechanics.pyx":346
*
*
* for i in prange(nt, nogil=True): # <<<<<<<<<<<<<<
* for j in range(nv):
* result_view[i, j] += U0_v[j] + Fvib[i,j]
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "quantas/utils/physics/statistical_mechanics.pyx":350
* result_view[i, j] += U0_v[j] + Fvib[i,j]
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "quantas/utils/physics/statistical_mechanics.pyx":334
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cpdef free_energy(double[::1] U0, double[:,::1] Fvib): # <<<<<<<<<<<<<<
* cdef Py_ssize_t nt = Fvib.shape[0]
* cdef Py_ssize_t nv = U0.shape[0]
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__PYX_XDEC_MEMVIEW(&__pyx_t_6, 1);
__Pyx_AddTraceback("quantas.utils.physics.statistical_mechanics.free_energy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_U0_v, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_Fvib_v, 1);
__Pyx_XDECREF(__pyx_v_result);
__PYX_XDEC_MEMVIEW(&__pyx_v_result_view, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_11free_energy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_11free_energy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_U0 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_Fvib = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("free_energy (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_U0,&__pyx_n_s_Fvib,0};
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_U0)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_Fvib)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("free_energy", 1, 2, 2, 1); __PYX_ERR(0, 334, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "free_energy") < 0)) __PYX_ERR(0, 334, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_U0 = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_U0.memview)) __PYX_ERR(0, 334, __pyx_L3_error)
__pyx_v_Fvib = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_Fvib.memview)) __PYX_ERR(0, 334, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("free_energy", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 334, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("quantas.utils.physics.statistical_mechanics.free_energy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_7quantas_5utils_7physics_21statistical_mechanics_10free_energy(__pyx_self, __pyx_v_U0, __pyx_v_Fvib);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7quantas_5utils_7physics_21statistical_mechanics_10free_energy(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_U0, __Pyx_memviewslice __pyx_v_Fvib) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("free_energy", 0);
__Pyx_XDECREF(__pyx_r);
if (unlikely(!__pyx_v_U0.memview)) { __Pyx_RaiseUnboundLocalError("U0"); __PYX_ERR(0, 334, __pyx_L1_error) }
if (unlikely(!__pyx_v_Fvib.memview)) { __Pyx_RaiseUnboundLocalError("Fvib"); __PYX_ERR(0, 334, __pyx_L1_error) }
__pyx_t_1 = __pyx_f_7quantas_5utils_7physics_21statistical_mechanics_free_energy(__pyx_v_U0, __pyx_v_Fvib, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 334, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("quantas.utils.physics.statistical_mechanics.free_energy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_U0, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_Fvib, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "quantas/utils/physics/statistical_mechanics.pyx":353
*
*
* cdef inline double ho_Cv(double temperature, double omega) nogil: # <<<<<<<<<<<<<<
* """ Isochoric heat capacity of single harmonic oscillator. """
* cdef double x, e, n
*/
static CYTHON_INLINE double __pyx_f_7quantas_5utils_7physics_21statistical_mechanics_ho_Cv(double __pyx_v_temperature, double __pyx_v_omega) {
double __pyx_v_x;
double __pyx_v_e;
double __pyx_v_n;
double __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
double __pyx_t_3;
double __pyx_t_4;
/* "quantas/utils/physics/statistical_mechanics.pyx":356
* """ Isochoric heat capacity of single harmonic oscillator. """
* cdef double x, e, n
* if omega <= 0. or temperature == 0.: # <<<<<<<<<<<<<<
* return 0.
* else:
*/
__pyx_t_2 = ((__pyx_v_omega <= 0.) != 0);
if (!__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_temperature == 0.) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (__pyx_t_1) {
/* "quantas/utils/physics/statistical_mechanics.pyx":357
* cdef double x, e, n
* if omega <= 0. or temperature == 0.:
* return 0. # <<<<<<<<<<<<<<
* else:
* x = H * omega / (KB * temperature)
*/
__pyx_r = 0.;
goto __pyx_L0;
/* "quantas/utils/physics/statistical_mechanics.pyx":356
* """ Isochoric heat capacity of single harmonic oscillator. """
* cdef double x, e, n
* if omega <= 0. or temperature == 0.: # <<<<<<<<<<<<<<
* return 0.
* else:
*/
}
/* "quantas/utils/physics/statistical_mechanics.pyx":359
* return 0.
* else:
* x = H * omega / (KB * temperature) # <<<<<<<<<<<<<<
* e = exp(x)
* n = pow((x/expm1(x)),2)
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_7quantas_5utils_7physics_21statistical_mechanics_H * __pyx_v_omega);
__pyx_t_4 = (__pyx_v_7quantas_5utils_7physics_21statistical_mechanics_KB * __pyx_v_temperature);
if (unlikely(__pyx_t_4 == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 359, __pyx_L1_error)
}
__pyx_v_x = (__pyx_t_3 / __pyx_t_4);
/* "quantas/utils/physics/statistical_mechanics.pyx":360
* else:
* x = H * omega / (KB * temperature)
* e = exp(x) # <<<<<<<<<<<<<<
* n = pow((x/expm1(x)),2)
* if n == 0.:
*/
__pyx_v_e = exp(__pyx_v_x);
/* "quantas/utils/physics/statistical_mechanics.pyx":361
* x = H * omega / (KB * temperature)
* e = exp(x)
* n = pow((x/expm1(x)),2) # <<<<<<<<<<<<<<
* if n == 0.:
* return 0.
*/
__pyx_t_4 = expm1(__pyx_v_x);
if (unlikely(__pyx_t_4 == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 361, __pyx_L1_error)
}
__pyx_v_n = pow((__pyx_v_x / __pyx_t_4), 2.0);
/* "quantas/utils/physics/statistical_mechanics.pyx":362
* e = exp(x)
* n = pow((x/expm1(x)),2)
* if n == 0.: # <<<<<<<<<<<<<<
* return 0.
* else:
*/
__pyx_t_1 = ((__pyx_v_n == 0.) != 0);
if (__pyx_t_1) {
/* "quantas/utils/physics/statistical_mechanics.pyx":363
* n = pow((x/expm1(x)),2)
* if n == 0.:
* return 0. # <<<<<<<<<<<<<<
* else:
* return NA * KB * e * n
*/
__pyx_r = 0.;
goto __pyx_L0;
/* "quantas/utils/physics/statistical_mechanics.pyx":362
* e = exp(x)
* n = pow((x/expm1(x)),2)
* if n == 0.: # <<<<<<<<<<<<<<
* return 0.
* else:
*/
}
/* "quantas/utils/physics/statistical_mechanics.pyx":365
* return 0.
* else:
* return NA * KB * e * n # <<<<<<<<<<<<<<
*
* @cython.boundscheck(False)
*/
/*else*/ {
__pyx_r = (((__pyx_v_7quantas_5utils_7physics_21statistical_mechanics_NA * __pyx_v_7quantas_5utils_7physics_21statistical_mechanics_KB) * __pyx_v_e) * __pyx_v_n);
goto __pyx_L0;
}
}
/* "quantas/utils/physics/statistical_mechanics.pyx":353
*
*
* cdef inline double ho_Cv(double temperature, double omega) nogil: # <<<<<<<<<<<<<<
* """ Isochoric heat capacity of single harmonic oscillator. """
* cdef double x, e, n
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_WriteUnraisable("quantas.utils.physics.statistical_mechanics.ho_Cv", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1);
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "quantas/utils/physics/statistical_mechanics.pyx":369
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cpdef isochoric_heat_capacity(double[::1] temperature, double[:, :, ::1] band, double[::1] weights): # <<<<<<<<<<<<<<
* cdef Py_ssize_t nt = temperature.shape[0]
* cdef Py_ssize_t nb = band.shape[0]
*/
static PyObject *__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_13isochoric_heat_capacity(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_f_7quantas_5utils_7physics_21statistical_mechanics_isochoric_heat_capacity(__Pyx_memviewslice __pyx_v_temperature, __Pyx_memviewslice __pyx_v_band, __Pyx_memviewslice __pyx_v_weights, CYTHON_UNUSED int __pyx_skip_dispatch) {
Py_ssize_t __pyx_v_nt;
Py_ssize_t __pyx_v_nb;
Py_ssize_t __pyx_v_nf;
Py_ssize_t __pyx_v_nv;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_v_k;
int __pyx_v_n;
PyObject *__pyx_v_result = NULL;
__Pyx_memviewslice __pyx_v_result_view = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } };
Py_ssize_t __pyx_t_7;
Py_ssize_t __pyx_t_8;
Py_ssize_t __pyx_t_9;
Py_ssize_t __pyx_t_10;
Py_ssize_t __pyx_t_11;
int __pyx_t_12;
Py_ssize_t __pyx_t_13;
Py_ssize_t __pyx_t_14;
int __pyx_t_15;
Py_ssize_t __pyx_t_16;
Py_ssize_t __pyx_t_17;
int __pyx_t_18;
Py_ssize_t __pyx_t_19;
Py_ssize_t __pyx_t_20;
Py_ssize_t __pyx_t_21;
Py_ssize_t __pyx_t_22;
Py_ssize_t __pyx_t_23;
Py_ssize_t __pyx_t_24;
Py_ssize_t __pyx_t_25;
__Pyx_RefNannySetupContext("isochoric_heat_capacity", 0);
/* "quantas/utils/physics/statistical_mechanics.pyx":370
* @cython.wraparound(False)
* cpdef isochoric_heat_capacity(double[::1] temperature, double[:, :, ::1] band, double[::1] weights):
* cdef Py_ssize_t nt = temperature.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t nb = band.shape[0]
* cdef Py_ssize_t nf = band.shape[1]
*/
__pyx_v_nt = (__pyx_v_temperature.shape[0]);
/* "quantas/utils/physics/statistical_mechanics.pyx":371
* cpdef isochoric_heat_capacity(double[::1] temperature, double[:, :, ::1] band, double[::1] weights):
* cdef Py_ssize_t nt = temperature.shape[0]
* cdef Py_ssize_t nb = band.shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t nf = band.shape[1]
* cdef Py_ssize_t nv = band.shape[2]
*/
__pyx_v_nb = (__pyx_v_band.shape[0]);
/* "quantas/utils/physics/statistical_mechanics.pyx":372
* cdef Py_ssize_t nt = temperature.shape[0]
* cdef Py_ssize_t nb = band.shape[0]
* cdef Py_ssize_t nf = band.shape[1] # <<<<<<<<<<<<<<
* cdef Py_ssize_t nv = band.shape[2]
* cdef int i, j, k, n
*/
__pyx_v_nf = (__pyx_v_band.shape[1]);
/* "quantas/utils/physics/statistical_mechanics.pyx":373
* cdef Py_ssize_t nb = band.shape[0]
* cdef Py_ssize_t nf = band.shape[1]
* cdef Py_ssize_t nv = band.shape[2] # <<<<<<<<<<<<<<
* cdef int i, j, k, n
*
*/
__pyx_v_nv = (__pyx_v_band.shape[2]);
/* "quantas/utils/physics/statistical_mechanics.pyx":376
* cdef int i, j, k, n
*
* result = np.zeros( (nt,nv), dtype=np.float64 ) # <<<<<<<<<<<<<<
* cdef double[:,::1] result_view = result
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 376, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 376, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_nt); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 376, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_nv); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 376, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 376, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 376, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 376, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 376, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 376, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 376, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 376, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_v_result = __pyx_t_5;
__pyx_t_5 = 0;
/* "quantas/utils/physics/statistical_mechanics.pyx":377
*
* result = np.zeros( (nt,nv), dtype=np.float64 )
* cdef double[:,::1] result_view = result # <<<<<<<<<<<<<<
*
* for i in prange(nt, nogil=True):
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_v_result, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 377, __pyx_L1_error)
__pyx_v_result_view = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "quantas/utils/physics/statistical_mechanics.pyx":379
* cdef double[:,::1] result_view = result
*
* for i in prange(nt, nogil=True): # <<<<<<<<<<<<<<
* for j in range(nb):
* for k in range(nf):
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_7 = __pyx_v_nt;
if (1 == 0) abort();
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_9 = (__pyx_t_7 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_9 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) lastprivate(__pyx_v_k) lastprivate(__pyx_v_n)
#endif /* _OPENMP */
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_9; __pyx_t_8++){
{
__pyx_v_i = (int)(0 + 1 * __pyx_t_8);
/* Initialize private variables to invalid values */
__pyx_v_j = ((int)0xbad0bad0);
__pyx_v_k = ((int)0xbad0bad0);
__pyx_v_n = ((int)0xbad0bad0);
/* "quantas/utils/physics/statistical_mechanics.pyx":380
*
* for i in prange(nt, nogil=True):
* for j in range(nb): # <<<<<<<<<<<<<<
* for k in range(nf):
* for n in range(nv):
*/
__pyx_t_10 = __pyx_v_nb;
__pyx_t_11 = __pyx_t_10;
for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
__pyx_v_j = __pyx_t_12;
/* "quantas/utils/physics/statistical_mechanics.pyx":381
* for i in prange(nt, nogil=True):
* for j in range(nb):
* for k in range(nf): # <<<<<<<<<<<<<<
* for n in range(nv):
* result_view[i, n] += ho_Cv(temperature[i], band[j,k,n]) * \
*/
__pyx_t_13 = __pyx_v_nf;
__pyx_t_14 = __pyx_t_13;
for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) {
__pyx_v_k = __pyx_t_15;
/* "quantas/utils/physics/statistical_mechanics.pyx":382
* for j in range(nb):
* for k in range(nf):
* for n in range(nv): # <<<<<<<<<<<<<<
* result_view[i, n] += ho_Cv(temperature[i], band[j,k,n]) * \
* weights[j]
*/
__pyx_t_16 = __pyx_v_nv;
__pyx_t_17 = __pyx_t_16;
for (__pyx_t_18 = 0; __pyx_t_18 < __pyx_t_17; __pyx_t_18+=1) {
__pyx_v_n = __pyx_t_18;
/* "quantas/utils/physics/statistical_mechanics.pyx":383
* for k in range(nf):
* for n in range(nv):
* result_view[i, n] += ho_Cv(temperature[i], band[j,k,n]) * \ # <<<<<<<<<<<<<<
* weights[j]
*
*/
__pyx_t_19 = __pyx_v_i;
__pyx_t_20 = __pyx_v_j;
__pyx_t_21 = __pyx_v_k;
__pyx_t_22 = __pyx_v_n;
/* "quantas/utils/physics/statistical_mechanics.pyx":384
* for n in range(nv):
* result_view[i, n] += ho_Cv(temperature[i], band[j,k,n]) * \
* weights[j] # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_t_23 = __pyx_v_j;
/* "quantas/utils/physics/statistical_mechanics.pyx":383
* for k in range(nf):
* for n in range(nv):
* result_view[i, n] += ho_Cv(temperature[i], band[j,k,n]) * \ # <<<<<<<<<<<<<<
* weights[j]
*
*/
__pyx_t_24 = __pyx_v_i;
__pyx_t_25 = __pyx_v_n;
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_result_view.data + __pyx_t_24 * __pyx_v_result_view.strides[0]) )) + __pyx_t_25)) )) += (__pyx_f_7quantas_5utils_7physics_21statistical_mechanics_ho_Cv((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_temperature.data) + __pyx_t_19)) ))), (*((double *) ( /* dim=2 */ ((char *) (((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_band.data + __pyx_t_20 * __pyx_v_band.strides[0]) ) + __pyx_t_21 * __pyx_v_band.strides[1]) )) + __pyx_t_22)) )))) * (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_weights.data) + __pyx_t_23)) ))));
}
}
}
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "quantas/utils/physics/statistical_mechanics.pyx":379
* cdef double[:,::1] result_view = result
*
* for i in prange(nt, nogil=True): # <<<<<<<<<<<<<<
* for j in range(nb):
* for k in range(nf):
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "quantas/utils/physics/statistical_mechanics.pyx":386
* weights[j]
*
* return result # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "quantas/utils/physics/statistical_mechanics.pyx":369
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cpdef isochoric_heat_capacity(double[::1] temperature, double[:, :, ::1] band, double[::1] weights): # <<<<<<<<<<<<<<
* cdef Py_ssize_t nt = temperature.shape[0]
* cdef Py_ssize_t nb = band.shape[0]
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__PYX_XDEC_MEMVIEW(&__pyx_t_6, 1);
__Pyx_AddTraceback("quantas.utils.physics.statistical_mechanics.isochoric_heat_capacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__PYX_XDEC_MEMVIEW(&__pyx_v_result_view, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_13isochoric_heat_capacity(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_13isochoric_heat_capacity(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_temperature = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_band = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_weights = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("isochoric_heat_capacity (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_temperature,&__pyx_n_s_band,&__pyx_n_s_weights,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_temperature)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_band)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("isochoric_heat_capacity", 1, 3, 3, 1); __PYX_ERR(0, 369, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_weights)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("isochoric_heat_capacity", 1, 3, 3, 2); __PYX_ERR(0, 369, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "isochoric_heat_capacity") < 0)) __PYX_ERR(0, 369, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v_temperature = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_temperature.memview)) __PYX_ERR(0, 369, __pyx_L3_error)
__pyx_v_band = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_band.memview)) __PYX_ERR(0, 369, __pyx_L3_error)
__pyx_v_weights = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_weights.memview)) __PYX_ERR(0, 369, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("isochoric_heat_capacity", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 369, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("quantas.utils.physics.statistical_mechanics.isochoric_heat_capacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_7quantas_5utils_7physics_21statistical_mechanics_12isochoric_heat_capacity(__pyx_self, __pyx_v_temperature, __pyx_v_band, __pyx_v_weights);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7quantas_5utils_7physics_21statistical_mechanics_12isochoric_heat_capacity(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_temperature, __Pyx_memviewslice __pyx_v_band, __Pyx_memviewslice __pyx_v_weights) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("isochoric_heat_capacity", 0);
__Pyx_XDECREF(__pyx_r);
if (unlikely(!__pyx_v_temperature.memview)) { __Pyx_RaiseUnboundLocalError("temperature"); __PYX_ERR(0, 369, __pyx_L1_error) }
if (unlikely(!__pyx_v_band.memview)) { __Pyx_RaiseUnboundLocalError("band"); __PYX_ERR(0, 369, __pyx_L1_error) }
if (unlikely(!__pyx_v_weights.memview)) { __Pyx_RaiseUnboundLocalError("weights"); __PYX_ERR(0, 369, __pyx_L1_error) }
__pyx_t_1 = __pyx_f_7quantas_5utils_7physics_21statistical_mechanics_isochoric_heat_capacity(__pyx_v_temperature, __pyx_v_band, __pyx_v_weights, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 369, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("quantas.utils.physics.statistical_mechanics.isochoric_heat_capacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_temperature, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_band, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_weights, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* Python wrapper */
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_shape = 0;
Py_ssize_t __pyx_v_itemsize;
PyObject *__pyx_v_format = 0;
PyObject *__pyx_v_mode = 0;
int __pyx_v_allocate_buffer;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0};
PyObject* values[5] = {0,0,0,0,0};
values[3] = ((PyObject *)__pyx_n_s_c);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode);
if (value) { values[3] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 4:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer);
if (value) { values[4] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_shape = ((PyObject*)values[0]);
__pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error)
__pyx_v_format = values[2];
__pyx_v_mode = values[3];
if (values[4]) {
__pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error)
} else {
/* "View.MemoryView":123
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
* mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<<
*
* cdef int idx
*/
__pyx_v_allocate_buffer = ((int)1);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error)
if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) {
PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error)
}
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer);
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) {
int __pyx_v_idx;
Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_dim;
PyObject **__pyx_v_p;
char __pyx_v_order;
int __pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
char *__pyx_t_7;
int __pyx_t_8;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
Py_ssize_t __pyx_t_11;
__Pyx_RefNannySetupContext("__cinit__", 0);
__Pyx_INCREF(__pyx_v_format);
/* "View.MemoryView":129
* cdef PyObject **p
*
* self.ndim = <int> len(shape) # <<<<<<<<<<<<<<
* self.itemsize = itemsize
*
*/
if (unlikely(__pyx_v_shape == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 129, __pyx_L1_error)
}
__pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error)
__pyx_v_self->ndim = ((int)__pyx_t_1);
/* "View.MemoryView":130
*
* self.ndim = <int> len(shape)
* self.itemsize = itemsize # <<<<<<<<<<<<<<
*
* if not self.ndim:
*/
__pyx_v_self->itemsize = __pyx_v_itemsize;
/* "View.MemoryView":132
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
__pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":133
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 133, __pyx_L1_error)
/* "View.MemoryView":132
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
}
/* "View.MemoryView":135
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
__pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":136
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 136, __pyx_L1_error)
/* "View.MemoryView":135
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
}
/* "View.MemoryView":138
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
__pyx_t_2 = PyBytes_Check(__pyx_v_format);
__pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":139
*
* if not isinstance(format, bytes):
* format = format.encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format # keep a reference to the byte string
* self.format = self._format
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_6)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
}
}
__pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII);
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":138
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
}
/* "View.MemoryView":140
* if not isinstance(format, bytes):
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<<
* self.format = self._format
*
*/
if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error)
__pyx_t_3 = __pyx_v_format;
__Pyx_INCREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__Pyx_GOTREF(__pyx_v_self->_format);
__Pyx_DECREF(__pyx_v_self->_format);
__pyx_v_self->_format = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":141
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
* self.format = self._format # <<<<<<<<<<<<<<
*
*
*/
if (unlikely(__pyx_v_self->_format == Py_None)) {
PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
__PYX_ERR(1, 141, __pyx_L1_error)
}
__pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error)
__pyx_v_self->format = __pyx_t_7;
/* "View.MemoryView":144
*
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<<
* self._strides = self._shape + self.ndim
*
*/
__pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2)));
/* "View.MemoryView":145
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2)
* self._strides = self._shape + self.ndim # <<<<<<<<<<<<<<
*
* if not self._shape:
*/
__pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim);
/* "View.MemoryView":147
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":148
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 148, __pyx_L1_error)
/* "View.MemoryView":147
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
}
/* "View.MemoryView":151
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
__pyx_t_8 = 0;
__pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0;
for (;;) {
if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_9;
__pyx_v_idx = __pyx_t_8;
__pyx_t_8 = (__pyx_t_8 + 1);
/* "View.MemoryView":152
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
__pyx_t_4 = ((__pyx_v_dim <= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":153
* for idx, dim in enumerate(shape):
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<<
* self._shape[idx] = dim
*
*/
__pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6);
__pyx_t_5 = 0;
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 153, __pyx_L1_error)
/* "View.MemoryView":152
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
}
/* "View.MemoryView":154
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim # <<<<<<<<<<<<<<
*
* cdef char order
*/
(__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim;
/* "View.MemoryView":151
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":157
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error)
if (__pyx_t_4) {
/* "View.MemoryView":158
* cdef char order
* if mode == 'fortran':
* order = b'F' # <<<<<<<<<<<<<<
* self.mode = u'fortran'
* elif mode == 'c':
*/
__pyx_v_order = 'F';
/* "View.MemoryView":159
* if mode == 'fortran':
* order = b'F'
* self.mode = u'fortran' # <<<<<<<<<<<<<<
* elif mode == 'c':
* order = b'C'
*/
__Pyx_INCREF(__pyx_n_u_fortran);
__Pyx_GIVEREF(__pyx_n_u_fortran);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_fortran;
/* "View.MemoryView":157
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
goto __pyx_L10;
}
/* "View.MemoryView":160
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error)
if (likely(__pyx_t_4)) {
/* "View.MemoryView":161
* self.mode = u'fortran'
* elif mode == 'c':
* order = b'C' # <<<<<<<<<<<<<<
* self.mode = u'c'
* else:
*/
__pyx_v_order = 'C';
/* "View.MemoryView":162
* elif mode == 'c':
* order = b'C'
* self.mode = u'c' # <<<<<<<<<<<<<<
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*/
__Pyx_INCREF(__pyx_n_u_c);
__Pyx_GIVEREF(__pyx_n_u_c);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_c;
/* "View.MemoryView":160
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
goto __pyx_L10;
}
/* "View.MemoryView":164
* self.mode = u'c'
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<<
*
* self.len = fill_contig_strides_array(self._shape, self._strides,
*/
/*else*/ {
__pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 164, __pyx_L1_error)
}
__pyx_L10:;
/* "View.MemoryView":166
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*
* self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<<
* itemsize, self.ndim, order)
*
*/
__pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order);
/* "View.MemoryView":169
* itemsize, self.ndim, order)
*
* self.free_data = allocate_buffer # <<<<<<<<<<<<<<
* self.dtype_is_object = format == b'O'
* if allocate_buffer:
*/
__pyx_v_self->free_data = __pyx_v_allocate_buffer;
/* "View.MemoryView":170
*
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<<
* if allocate_buffer:
*
*/
__pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error)
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_v_self->dtype_is_object = __pyx_t_4;
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_4 = (__pyx_v_allocate_buffer != 0);
if (__pyx_t_4) {
/* "View.MemoryView":174
*
*
* self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<<
* if not self.data:
* raise MemoryError("unable to allocate array data.")
*/
__pyx_v_self->data = ((char *)malloc(__pyx_v_self->len));
/* "View.MemoryView":175
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":176
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 176, __pyx_L1_error)
/* "View.MemoryView":175
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
}
/* "View.MemoryView":178
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
__pyx_t_4 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_4) {
/* "View.MemoryView":179
*
* if self.dtype_is_object:
* p = <PyObject **> self.data # <<<<<<<<<<<<<<
* for i in range(self.len / itemsize):
* p[i] = Py_None
*/
__pyx_v_p = ((PyObject **)__pyx_v_self->data);
/* "View.MemoryView":180
* if self.dtype_is_object:
* p = <PyObject **> self.data
* for i in range(self.len / itemsize): # <<<<<<<<<<<<<<
* p[i] = Py_None
* Py_INCREF(Py_None)
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 180, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 180, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize);
__pyx_t_9 = __pyx_t_1;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) {
__pyx_v_i = __pyx_t_11;
/* "View.MemoryView":181
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
* p[i] = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
(__pyx_v_p[__pyx_v_i]) = Py_None;
/* "View.MemoryView":182
* for i in range(self.len / itemsize):
* p[i] = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
Py_INCREF(Py_None);
}
/* "View.MemoryView":178
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
}
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_format);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":185
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_bufmode;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
char *__pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
Py_ssize_t *__pyx_t_7;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":186
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1 # <<<<<<<<<<<<<<
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = -1;
/* "View.MemoryView":187
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
__pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error)
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":188
* cdef int bufmode = -1
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":187
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
goto __pyx_L3;
}
/* "View.MemoryView":189
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
__pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error)
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":190
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
*/
__pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":189
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
}
__pyx_L3:;
/* "View.MemoryView":191
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
__pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":192
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 192, __pyx_L1_error)
/* "View.MemoryView":191
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
}
/* "View.MemoryView":193
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data # <<<<<<<<<<<<<<
* info.len = self.len
* info.ndim = self.ndim
*/
__pyx_t_4 = __pyx_v_self->data;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":194
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
* info.len = self.len # <<<<<<<<<<<<<<
* info.ndim = self.ndim
* info.shape = self._shape
*/
__pyx_t_5 = __pyx_v_self->len;
__pyx_v_info->len = __pyx_t_5;
/* "View.MemoryView":195
* info.buf = self.data
* info.len = self.len
* info.ndim = self.ndim # <<<<<<<<<<<<<<
* info.shape = self._shape
* info.strides = self._strides
*/
__pyx_t_6 = __pyx_v_self->ndim;
__pyx_v_info->ndim = __pyx_t_6;
/* "View.MemoryView":196
* info.len = self.len
* info.ndim = self.ndim
* info.shape = self._shape # <<<<<<<<<<<<<<
* info.strides = self._strides
* info.suboffsets = NULL
*/
__pyx_t_7 = __pyx_v_self->_shape;
__pyx_v_info->shape = __pyx_t_7;
/* "View.MemoryView":197
* info.ndim = self.ndim
* info.shape = self._shape
* info.strides = self._strides # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = self.itemsize
*/
__pyx_t_7 = __pyx_v_self->_strides;
__pyx_v_info->strides = __pyx_t_7;
/* "View.MemoryView":198
* info.shape = self._shape
* info.strides = self._strides
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = self.itemsize
* info.readonly = 0
*/
__pyx_v_info->suboffsets = NULL;
/* "View.MemoryView":199
* info.strides = self._strides
* info.suboffsets = NULL
* info.itemsize = self.itemsize # <<<<<<<<<<<<<<
* info.readonly = 0
*
*/
__pyx_t_5 = __pyx_v_self->itemsize;
__pyx_v_info->itemsize = __pyx_t_5;
/* "View.MemoryView":200
* info.suboffsets = NULL
* info.itemsize = self.itemsize
* info.readonly = 0 # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":202
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":203
*
* if flags & PyBUF_FORMAT:
* info.format = self.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_4 = __pyx_v_self->format;
__pyx_v_info->format = __pyx_t_4;
/* "View.MemoryView":202
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":205
* info.format = self.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.obj = self
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L5:;
/* "View.MemoryView":207
* info.format = NULL
*
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":185
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":211
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* Python wrapper */
static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_array___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":212
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
__pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":213
* def __dealloc__(array self):
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data) # <<<<<<<<<<<<<<
* elif self.free_data:
* if self.dtype_is_object:
*/
__pyx_v_self->callback_free_data(__pyx_v_self->data);
/* "View.MemoryView":212
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
goto __pyx_L3;
}
/* "View.MemoryView":214
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
__pyx_t_1 = (__pyx_v_self->free_data != 0);
if (__pyx_t_1) {
/* "View.MemoryView":215
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":216
* elif self.free_data:
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<<
* self._strides, self.ndim, False)
* free(self.data)
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0);
/* "View.MemoryView":215
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
}
/* "View.MemoryView":218
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
* free(self.data) # <<<<<<<<<<<<<<
* PyObject_Free(self._shape)
*
*/
free(__pyx_v_self->data);
/* "View.MemoryView":214
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
}
__pyx_L3:;
/* "View.MemoryView":219
* self._strides, self.ndim, False)
* free(self.data)
* PyObject_Free(self._shape) # <<<<<<<<<<<<<<
*
* @property
*/
PyObject_Free(__pyx_v_self->_shape);
/* "View.MemoryView":211
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":222
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":223
* @property
* def memview(self):
* return self.get_memview() # <<<<<<<<<<<<<<
*
* @cname('get_memview')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":222
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":226
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) {
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("get_memview", 0);
/* "View.MemoryView":227
* @cname('get_memview')
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<<
* return memoryview(self, flags, self.dtype_is_object)
*
*/
__pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE);
/* "View.MemoryView":228
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":226
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":230
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* Python wrapper */
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":231
*
* def __len__(self):
* return self._shape[0] # <<<<<<<<<<<<<<
*
* def __getattr__(self, attr):
*/
__pyx_r = (__pyx_v_self->_shape[0]);
goto __pyx_L0;
/* "View.MemoryView":230
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":233
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("__getattr__", 0);
/* "View.MemoryView":234
*
* def __getattr__(self, attr):
* return getattr(self.memview, attr) # <<<<<<<<<<<<<<
*
* def __getitem__(self, item):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":233
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":236
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":237
*
* def __getitem__(self, item):
* return self.memview[item] # <<<<<<<<<<<<<<
*
* def __setitem__(self, item, value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":236
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":239
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* Python wrapper */
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setitem__", 0);
/* "View.MemoryView":240
*
* def __setitem__(self, item, value):
* self.memview[item] = value # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":239
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":244
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) {
struct __pyx_array_obj *__pyx_v_result = 0;
struct __pyx_array_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("array_cwrapper", 0);
/* "View.MemoryView":248
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
__pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":249
*
* if buf == NULL:
* result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<<
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":248
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":251
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
/*else*/ {
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3);
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":252
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False) # <<<<<<<<<<<<<<
* result.data = buf
*
*/
__pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error)
/* "View.MemoryView":251
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":253
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False)
* result.data = buf # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->data = __pyx_v_buf;
}
__pyx_L3:;
/* "View.MemoryView":255
* result.data = buf
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":244
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":281
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* Python wrapper */
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_name = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0};
PyObject* values[1] = {0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
}
__pyx_v_name = values[0];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__", 0);
/* "View.MemoryView":282
* cdef object name
* def __init__(self, name):
* self.name = name # <<<<<<<<<<<<<<
* def __repr__(self):
* return self.name
*/
__Pyx_INCREF(__pyx_v_name);
__Pyx_GIVEREF(__pyx_v_name);
__Pyx_GOTREF(__pyx_v_self->name);
__Pyx_DECREF(__pyx_v_self->name);
__pyx_v_self->name = __pyx_v_name;
/* "View.MemoryView":281
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":283
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* Python wrapper */
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":284
* self.name = name
* def __repr__(self):
* return self.name # <<<<<<<<<<<<<<
*
* cdef generic = Enum("<strided and direct or indirect>")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->name);
__pyx_r = __pyx_v_self->name;
goto __pyx_L0;
/* "View.MemoryView":283
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_v_state = 0;
PyObject *__pyx_v__dict = 0;
int __pyx_v_use_setstate;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":5
* cdef object _dict
* cdef bint use_setstate
* state = (self.name,) # <<<<<<<<<<<<<<
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v_self->name);
__Pyx_GIVEREF(__pyx_v_self->name);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name);
__pyx_v_state = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "(tree fragment)":6
* cdef bint use_setstate
* state = (self.name,)
* _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
* if _dict is not None:
* state += (_dict,)
*/
__pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v__dict = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":7
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
__pyx_t_2 = (__pyx_v__dict != Py_None);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "(tree fragment)":8
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
* state += (_dict,) # <<<<<<<<<<<<<<
* use_setstate = True
* else:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v__dict);
__Pyx_GIVEREF(__pyx_v__dict);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict);
__pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
__pyx_t_4 = 0;
/* "(tree fragment)":9
* if _dict is not None:
* state += (_dict,)
* use_setstate = True # <<<<<<<<<<<<<<
* else:
* use_setstate = self.name is not None
*/
__pyx_v_use_setstate = 1;
/* "(tree fragment)":7
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
goto __pyx_L3;
}
/* "(tree fragment)":11
* use_setstate = True
* else:
* use_setstate = self.name is not None # <<<<<<<<<<<<<<
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_self->name != Py_None);
__pyx_v_use_setstate = __pyx_t_3;
}
__pyx_L3:;
/* "(tree fragment)":12
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
__pyx_t_3 = (__pyx_v_use_setstate != 0);
if (__pyx_t_3) {
/* "(tree fragment)":13
* use_setstate = self.name is not None
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<<
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None);
__pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state);
__pyx_t_4 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "(tree fragment)":12
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
}
/* "(tree fragment)":15
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1);
__pyx_t_5 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L0;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_state);
__Pyx_XDECREF(__pyx_v__dict);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":17
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error)
__pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":298
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) {
Py_intptr_t __pyx_v_aligned_p;
size_t __pyx_v_offset;
void *__pyx_r;
int __pyx_t_1;
/* "View.MemoryView":300
* cdef void *align_pointer(void *memory, size_t alignment) nogil:
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<<
* cdef size_t offset
*
*/
__pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory);
/* "View.MemoryView":304
*
* with cython.cdivision(True):
* offset = aligned_p % alignment # <<<<<<<<<<<<<<
*
* if offset > 0:
*/
__pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment);
/* "View.MemoryView":306
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
__pyx_t_1 = ((__pyx_v_offset > 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":307
*
* if offset > 0:
* aligned_p += alignment - offset # <<<<<<<<<<<<<<
*
* return <void *> aligned_p
*/
__pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset));
/* "View.MemoryView":306
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
}
/* "View.MemoryView":309
* aligned_p += alignment - offset
*
* return <void *> aligned_p # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((void *)__pyx_v_aligned_p);
goto __pyx_L0;
/* "View.MemoryView":298
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":345
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* Python wrapper */
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_obj = 0;
int __pyx_v_flags;
int __pyx_v_dtype_is_object;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object);
if (value) { values[2] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_obj = values[0];
__pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error)
if (values[2]) {
__pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error)
} else {
__pyx_v_dtype_is_object = ((int)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("__cinit__", 0);
/* "View.MemoryView":346
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj # <<<<<<<<<<<<<<
* self.flags = flags
* if type(self) is memoryview or obj is not None:
*/
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
__Pyx_GOTREF(__pyx_v_self->obj);
__Pyx_DECREF(__pyx_v_self->obj);
__pyx_v_self->obj = __pyx_v_obj;
/* "View.MemoryView":347
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj
* self.flags = flags # <<<<<<<<<<<<<<
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
*/
__pyx_v_self->flags = __pyx_v_flags;
/* "View.MemoryView":348
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
__pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type));
__pyx_t_3 = (__pyx_t_2 != 0);
if (!__pyx_t_3) {
} else {
__pyx_t_1 = __pyx_t_3;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_3 = (__pyx_v_obj != Py_None);
__pyx_t_2 = (__pyx_t_3 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (__pyx_t_1) {
/* "View.MemoryView":349
* self.flags = flags
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<<
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
*/
__pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error)
/* "View.MemoryView":350
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":351
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None;
/* "View.MemoryView":352
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* global __pyx_memoryview_thread_locks_used
*/
Py_INCREF(Py_None);
/* "View.MemoryView":350
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
}
/* "View.MemoryView":348
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
}
/* "View.MemoryView":355
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
__pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":356
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
*/
__pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
/* "View.MemoryView":357
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<<
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1);
/* "View.MemoryView":355
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
}
/* "View.MemoryView":358
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":359
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<<
* if self.lock is NULL:
* raise MemoryError
*/
__pyx_v_self->lock = PyThread_allocate_lock();
/* "View.MemoryView":360
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":361
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
* raise MemoryError # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error)
/* "View.MemoryView":360
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
}
/* "View.MemoryView":358
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
}
/* "View.MemoryView":363
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":364
*
* if flags & PyBUF_FORMAT:
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<<
* else:
* self.dtype_is_object = dtype_is_object
*/
__pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_self->dtype_is_object = __pyx_t_1;
/* "View.MemoryView":363
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
goto __pyx_L10;
}
/* "View.MemoryView":366
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
* self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<<
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
*/
/*else*/ {
__pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object;
}
__pyx_L10:;
/* "View.MemoryView":368
* self.dtype_is_object = dtype_is_object
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<<
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL
*/
__pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int))));
/* "View.MemoryView":370
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL # <<<<<<<<<<<<<<
*
* def __dealloc__(memoryview self):
*/
__pyx_v_self->typeinfo = NULL;
/* "View.MemoryView":345
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":372
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* Python wrapper */
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) {
int __pyx_v_i;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyThread_type_lock __pyx_t_6;
PyThread_type_lock __pyx_t_7;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":373
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
*
*/
__pyx_t_1 = (__pyx_v_self->obj != Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":374
* def __dealloc__(memoryview self):
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<<
*
* cdef int i
*/
__Pyx_ReleaseBuffer((&__pyx_v_self->view));
/* "View.MemoryView":373
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
*
*/
}
/* "View.MemoryView":378
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
__pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":379
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<<
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
*/
__pyx_t_3 = __pyx_memoryview_thread_locks_used;
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":380
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
__pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":381
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<<
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1);
/* "View.MemoryView":382
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
__pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":384
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<<
* break
* else:
*/
__pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
__pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]);
/* "View.MemoryView":383
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break
*/
(__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6;
(__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7;
/* "View.MemoryView":382
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
}
/* "View.MemoryView":385
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break # <<<<<<<<<<<<<<
* else:
* PyThread_free_lock(self.lock)
*/
goto __pyx_L6_break;
/* "View.MemoryView":380
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
}
}
/*else*/ {
/* "View.MemoryView":387
* break
* else:
* PyThread_free_lock(self.lock) # <<<<<<<<<<<<<<
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
*/
PyThread_free_lock(__pyx_v_self->lock);
}
__pyx_L6_break:;
/* "View.MemoryView":378
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
}
/* "View.MemoryView":372
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":389
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
Py_ssize_t __pyx_v_dim;
char *__pyx_v_itemp;
PyObject *__pyx_v_idx = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t __pyx_t_3;
PyObject *(*__pyx_t_4)(PyObject *);
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
char *__pyx_t_7;
__Pyx_RefNannySetupContext("get_item_pointer", 0);
/* "View.MemoryView":391
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<<
*
* for dim, idx in enumerate(index):
*/
__pyx_v_itemp = ((char *)__pyx_v_self->view.buf);
/* "View.MemoryView":393
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
__pyx_t_1 = 0;
if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) {
__pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
__pyx_t_4 = NULL;
} else {
__pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 393, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 393, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_4)) {
if (likely(PyList_CheckExact(__pyx_t_2))) {
if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 393, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 393, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
} else {
if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 393, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 393, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
}
} else {
__pyx_t_5 = __pyx_t_4(__pyx_t_2);
if (unlikely(!__pyx_t_5)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 393, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_5);
}
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_1;
__pyx_t_1 = (__pyx_t_1 + 1);
/* "View.MemoryView":394
*
* for dim, idx in enumerate(index):
* itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<<
*
* return itemp
*/
__pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 394, __pyx_L1_error)
__pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 394, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_7;
/* "View.MemoryView":393
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":396
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
* return itemp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_itemp;
goto __pyx_L0;
/* "View.MemoryView":389
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":399
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_indices = NULL;
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
char *__pyx_t_6;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":400
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
__pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":401
* def __getitem__(memoryview self, object index):
* if index is Ellipsis:
* return self # <<<<<<<<<<<<<<
*
* have_slices, indices = _unellipsify(index, self.view.ndim)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__pyx_r = ((PyObject *)__pyx_v_self);
goto __pyx_L0;
/* "View.MemoryView":400
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
}
/* "View.MemoryView":403
* return self
*
* have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* cdef char *itemp
*/
__pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 403, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (likely(__pyx_t_3 != Py_None)) {
PyObject* sequence = __pyx_t_3;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 403, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
#else
__pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 403, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 403, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 403, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_v_indices = __pyx_t_5;
__pyx_t_5 = 0;
/* "View.MemoryView":406
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 406, __pyx_L1_error)
if (__pyx_t_2) {
/* "View.MemoryView":407
* cdef char *itemp
* if have_slices:
* return memview_slice(self, indices) # <<<<<<<<<<<<<<
* else:
* itemp = self.get_item_pointer(indices)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":406
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
}
/* "View.MemoryView":409
* return memview_slice(self, indices)
* else:
* itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<<
* return self.convert_item_to_object(itemp)
*
*/
/*else*/ {
__pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 409, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_6;
/* "View.MemoryView":410
* else:
* itemp = self.get_item_pointer(indices)
* return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<<
*
* def __setitem__(memoryview self, object index, object value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 410, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":399
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_indices);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":412
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* Python wrapper */
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_obj = NULL;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
__Pyx_RefNannySetupContext("__setitem__", 0);
__Pyx_INCREF(__pyx_v_index);
/* "View.MemoryView":413
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
__pyx_t_1 = (__pyx_v_self->view.readonly != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":414
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 414, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 414, __pyx_L1_error)
/* "View.MemoryView":413
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
}
/* "View.MemoryView":416
* raise TypeError("Cannot assign to read-only memoryview")
*
* have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* if have_slices:
*/
__pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 416, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (likely(__pyx_t_2 != Py_None)) {
PyObject* sequence = __pyx_t_2;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 416, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 416, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 416, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 416, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_3;
__pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":418
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 418, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":419
*
* if have_slices:
* obj = self.is_slice(value) # <<<<<<<<<<<<<<
* if obj:
* self.setitem_slice_assignment(self[index], obj)
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 419, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_v_obj = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":420
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 420, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":421
* obj = self.is_slice(value)
* if obj:
* self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<<
* else:
* self.setitem_slice_assign_scalar(self[index], value)
*/
__pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 421, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 421, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "View.MemoryView":420
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":423
* self.setitem_slice_assignment(self[index], obj)
* else:
* self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<<
* else:
* self.setitem_indexed(index, value)
*/
/*else*/ {
__pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 423, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 423, __pyx_L1_error)
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L5:;
/* "View.MemoryView":418
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
goto __pyx_L4;
}
/* "View.MemoryView":425
* self.setitem_slice_assign_scalar(self[index], value)
* else:
* self.setitem_indexed(index, value) # <<<<<<<<<<<<<<
*
* cdef is_slice(self, obj):
*/
/*else*/ {
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L4:;
/* "View.MemoryView":412
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":427
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_t_9;
__Pyx_RefNannySetupContext("is_slice", 0);
__Pyx_INCREF(__pyx_v_obj);
/* "View.MemoryView":428
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":429
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_5);
/*try:*/ {
/* "View.MemoryView":430
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 430, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":431
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object) # <<<<<<<<<<<<<<
* except TypeError:
* return None
*/
__pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 431, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
/* "View.MemoryView":430
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 430, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7);
__pyx_t_6 = 0;
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 430, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7);
__pyx_t_7 = 0;
/* "View.MemoryView":429
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
}
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
goto __pyx_L9_try_end;
__pyx_L4_error:;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
/* "View.MemoryView":432
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
* except TypeError: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError);
if (__pyx_t_9) {
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 432, __pyx_L6_except_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":433
* self.dtype_is_object)
* except TypeError:
* return None # <<<<<<<<<<<<<<
*
* return obj
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
goto __pyx_L7_except_return;
}
goto __pyx_L6_except_error;
__pyx_L6_except_error:;
/* "View.MemoryView":429
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L1_error;
__pyx_L7_except_return:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L0;
__pyx_L9_try_end:;
}
/* "View.MemoryView":428
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
}
/* "View.MemoryView":435
* return None
*
* return obj # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assignment(self, dst, src):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_obj);
__pyx_r = __pyx_v_obj;
goto __pyx_L0;
/* "View.MemoryView":427
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":437
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) {
__Pyx_memviewslice __pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_src_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("setitem_slice_assignment", 0);
/* "View.MemoryView":441
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 441, __pyx_L1_error)
/* "View.MemoryView":442
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<<
* src.ndim, dst.ndim, self.dtype_is_object)
*
*/
if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 442, __pyx_L1_error)
/* "View.MemoryView":443
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 443, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 443, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 443, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 443, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":441
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
__pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 441, __pyx_L1_error)
/* "View.MemoryView":437
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":445
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) {
int __pyx_v_array[0x80];
void *__pyx_v_tmp;
void *__pyx_v_item;
__Pyx_memviewslice *__pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_tmp_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_t_3;
int __pyx_t_4;
char const *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
__Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0);
/* "View.MemoryView":447
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
* cdef int array[128]
* cdef void *tmp = NULL # <<<<<<<<<<<<<<
* cdef void *item
*
*/
__pyx_v_tmp = NULL;
/* "View.MemoryView":452
* cdef __Pyx_memviewslice *dst_slice
* cdef __Pyx_memviewslice tmp_slice
* dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<<
*
* if <size_t>self.view.itemsize > sizeof(array):
*/
__pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice));
/* "View.MemoryView":454
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
__pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":455
*
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<<
* if tmp == NULL:
* raise MemoryError
*/
__pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize);
/* "View.MemoryView":456
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
__pyx_t_1 = ((__pyx_v_tmp == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":457
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
* raise MemoryError # <<<<<<<<<<<<<<
* item = tmp
* else:
*/
PyErr_NoMemory(); __PYX_ERR(1, 457, __pyx_L1_error)
/* "View.MemoryView":456
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
}
/* "View.MemoryView":458
* if tmp == NULL:
* raise MemoryError
* item = tmp # <<<<<<<<<<<<<<
* else:
* item = <void *> array
*/
__pyx_v_item = __pyx_v_tmp;
/* "View.MemoryView":454
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
goto __pyx_L3;
}
/* "View.MemoryView":460
* item = tmp
* else:
* item = <void *> array # <<<<<<<<<<<<<<
*
* try:
*/
/*else*/ {
__pyx_v_item = ((void *)__pyx_v_array);
}
__pyx_L3:;
/* "View.MemoryView":462
* item = <void *> array
*
* try: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value
*/
/*try:*/ {
/* "View.MemoryView":463
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":464
* try:
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<<
* else:
* self.assign_item_from_object(<char *> item, value)
*/
(((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value);
/* "View.MemoryView":463
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":466
* (<PyObject **> item)[0] = <PyObject *> value
* else:
* self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<<
*
*
*/
/*else*/ {
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 466, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L8:;
/* "View.MemoryView":470
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":471
*
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<<
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
* item, self.dtype_is_object)
*/
__pyx_t_2 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 471, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":470
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
}
/* "View.MemoryView":472
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<<
* item, self.dtype_is_object)
* finally:
*/
__pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object);
}
/* "View.MemoryView":475
* item, self.dtype_is_object)
* finally:
* PyMem_Free(tmp) # <<<<<<<<<<<<<<
*
* cdef setitem_indexed(self, index, value):
*/
/*finally:*/ {
/*normal exit:*/{
PyMem_Free(__pyx_v_tmp);
goto __pyx_L7;
}
__pyx_L6_error:;
/*exception exit:*/{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11);
if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8) < 0)) __Pyx_ErrFetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_6);
__Pyx_XGOTREF(__pyx_t_7);
__Pyx_XGOTREF(__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_10);
__Pyx_XGOTREF(__pyx_t_11);
__pyx_t_3 = __pyx_lineno; __pyx_t_4 = __pyx_clineno; __pyx_t_5 = __pyx_filename;
{
PyMem_Free(__pyx_v_tmp);
}
if (PY_MAJOR_VERSION >= 3) {
__Pyx_XGIVEREF(__pyx_t_9);
__Pyx_XGIVEREF(__pyx_t_10);
__Pyx_XGIVEREF(__pyx_t_11);
__Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11);
}
__Pyx_XGIVEREF(__pyx_t_6);
__Pyx_XGIVEREF(__pyx_t_7);
__Pyx_XGIVEREF(__pyx_t_8);
__Pyx_ErrRestore(__pyx_t_6, __pyx_t_7, __pyx_t_8);
__pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0;
__pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_4; __pyx_filename = __pyx_t_5;
goto __pyx_L1_error;
}
__pyx_L7:;
}
/* "View.MemoryView":445
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":477
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
char *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("setitem_indexed", 0);
/* "View.MemoryView":478
*
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<<
* self.assign_item_from_object(itemp, value)
*
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 478, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_1;
/* "View.MemoryView":479
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 479, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":477
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":481
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_v_struct = NULL;
PyObject *__pyx_v_bytesitem = 0;
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
int __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
size_t __pyx_t_10;
int __pyx_t_11;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":484
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef bytes bytesitem
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 484, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":487
* cdef bytes bytesitem
*
* bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<<
* try:
* result = struct.unpack(self.view.format, bytesitem)
*/
__pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 487, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_bytesitem = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":488
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
/*try:*/ {
/* "View.MemoryView":489
* bytesitem = itemp[:self.view.itemsize]
* try:
* result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<<
* except struct.error:
* raise ValueError("Unable to convert item to object")
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 489, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 489, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = NULL;
__pyx_t_8 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
__pyx_t_8 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 489, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 489, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
{
__pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 489, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_9);
if (__pyx_t_7) {
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL;
}
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6);
__Pyx_INCREF(__pyx_v_bytesitem);
__Pyx_GIVEREF(__pyx_v_bytesitem);
PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem);
__pyx_t_6 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 489, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":488
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
}
/* "View.MemoryView":493
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
/*else:*/ {
__pyx_t_10 = strlen(__pyx_v_self->view.format);
__pyx_t_11 = ((__pyx_t_10 == 1) != 0);
if (__pyx_t_11) {
/* "View.MemoryView":494
* else:
* if len(self.view.format) == 1:
* return result[0] # <<<<<<<<<<<<<<
* return result
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 494, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6_except_return;
/* "View.MemoryView":493
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
}
/* "View.MemoryView":495
* if len(self.view.format) == 1:
* return result[0]
* return result # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L6_except_return;
}
__pyx_L3_error:;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
/* "View.MemoryView":490
* try:
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error: # <<<<<<<<<<<<<<
* raise ValueError("Unable to convert item to object")
* else:
*/
__Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 490, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9);
__pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0;
if (__pyx_t_8) {
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 490, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_1);
/* "View.MemoryView":491
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 491, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(1, 491, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "View.MemoryView":488
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L1_error;
__pyx_L6_except_return:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L0;
}
/* "View.MemoryView":481
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesitem);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":497
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_v_struct = NULL;
char __pyx_v_c;
PyObject *__pyx_v_bytesvalue = 0;
Py_ssize_t __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
char *__pyx_t_11;
char *__pyx_t_12;
char *__pyx_t_13;
char *__pyx_t_14;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":500
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef char c
* cdef bytes bytesvalue
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 500, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":505
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
__pyx_t_2 = PyTuple_Check(__pyx_v_value);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "View.MemoryView":506
*
* if isinstance(value, tuple):
* bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<<
* else:
* bytesvalue = struct.pack(self.view.format, value)
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 506, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 506, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 506, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 506, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 506, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 506, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 506, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":505
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":508
* bytesvalue = struct.pack(self.view.format, *value)
* else:
* bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<<
*
* for i, c in enumerate(bytesvalue):
*/
/*else*/ {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 508, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 508, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = NULL;
__pyx_t_7 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
__pyx_t_7 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 508, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 508, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 508, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
if (__pyx_t_5) {
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL;
}
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1);
__Pyx_INCREF(__pyx_v_value);
__Pyx_GIVEREF(__pyx_v_value);
PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value);
__pyx_t_1 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 508, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 508, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
}
__pyx_L3:;
/* "View.MemoryView":510
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = 0;
if (unlikely(__pyx_v_bytesvalue == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
__PYX_ERR(1, 510, __pyx_L1_error)
}
__Pyx_INCREF(__pyx_v_bytesvalue);
__pyx_t_10 = __pyx_v_bytesvalue;
__pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10);
__pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10));
for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) {
__pyx_t_11 = __pyx_t_14;
__pyx_v_c = (__pyx_t_11[0]);
/* "View.MemoryView":511
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
__pyx_v_i = __pyx_t_9;
/* "View.MemoryView":510
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = (__pyx_t_9 + 1);
/* "View.MemoryView":511
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
(__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c;
}
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
/* "View.MemoryView":497
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesvalue);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":514
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
char *__pyx_t_5;
void *__pyx_t_6;
int __pyx_t_7;
Py_ssize_t __pyx_t_8;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":515
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
__pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_2 = (__pyx_v_self->view.readonly != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":516
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_ND:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 516, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 516, __pyx_L1_error)
/* "View.MemoryView":515
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
}
/* "View.MemoryView":518
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_ND: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":519
*
* if flags & PyBUF_ND:
* info.shape = self.view.shape # <<<<<<<<<<<<<<
* else:
* info.shape = NULL
*/
__pyx_t_4 = __pyx_v_self->view.shape;
__pyx_v_info->shape = __pyx_t_4;
/* "View.MemoryView":518
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_ND: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":521
* info.shape = self.view.shape
* else:
* info.shape = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_STRIDES:
*/
/*else*/ {
__pyx_v_info->shape = NULL;
}
__pyx_L6:;
/* "View.MemoryView":523
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":524
*
* if flags & PyBUF_STRIDES:
* info.strides = self.view.strides # <<<<<<<<<<<<<<
* else:
* info.strides = NULL
*/
__pyx_t_4 = __pyx_v_self->view.strides;
__pyx_v_info->strides = __pyx_t_4;
/* "View.MemoryView":523
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
goto __pyx_L7;
}
/* "View.MemoryView":526
* info.strides = self.view.strides
* else:
* info.strides = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_INDIRECT:
*/
/*else*/ {
__pyx_v_info->strides = NULL;
}
__pyx_L7:;
/* "View.MemoryView":528
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":529
*
* if flags & PyBUF_INDIRECT:
* info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<<
* else:
* info.suboffsets = NULL
*/
__pyx_t_4 = __pyx_v_self->view.suboffsets;
__pyx_v_info->suboffsets = __pyx_t_4;
/* "View.MemoryView":528
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":531
* info.suboffsets = self.view.suboffsets
* else:
* info.suboffsets = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
/*else*/ {
__pyx_v_info->suboffsets = NULL;
}
__pyx_L8:;
/* "View.MemoryView":533
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":534
*
* if flags & PyBUF_FORMAT:
* info.format = self.view.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_5 = __pyx_v_self->view.format;
__pyx_v_info->format = __pyx_t_5;
/* "View.MemoryView":533
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":536
* info.format = self.view.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.buf = self.view.buf
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L9:;
/* "View.MemoryView":538
* info.format = NULL
*
* info.buf = self.view.buf # <<<<<<<<<<<<<<
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
*/
__pyx_t_6 = __pyx_v_self->view.buf;
__pyx_v_info->buf = __pyx_t_6;
/* "View.MemoryView":539
*
* info.buf = self.view.buf
* info.ndim = self.view.ndim # <<<<<<<<<<<<<<
* info.itemsize = self.view.itemsize
* info.len = self.view.len
*/
__pyx_t_7 = __pyx_v_self->view.ndim;
__pyx_v_info->ndim = __pyx_t_7;
/* "View.MemoryView":540
* info.buf = self.view.buf
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize # <<<<<<<<<<<<<<
* info.len = self.view.len
* info.readonly = self.view.readonly
*/
__pyx_t_8 = __pyx_v_self->view.itemsize;
__pyx_v_info->itemsize = __pyx_t_8;
/* "View.MemoryView":541
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
* info.len = self.view.len # <<<<<<<<<<<<<<
* info.readonly = self.view.readonly
* info.obj = self
*/
__pyx_t_8 = __pyx_v_self->view.len;
__pyx_v_info->len = __pyx_t_8;
/* "View.MemoryView":542
* info.itemsize = self.view.itemsize
* info.len = self.view.len
* info.readonly = self.view.readonly # <<<<<<<<<<<<<<
* info.obj = self
*
*/
__pyx_t_1 = __pyx_v_self->view.readonly;
__pyx_v_info->readonly = __pyx_t_1;
/* "View.MemoryView":543
* info.len = self.view.len
* info.readonly = self.view.readonly
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":514
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":549
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":550
* @property
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<<
* transpose_memslice(&result.from_slice)
* return result
*/
__pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 550, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 550, __pyx_L1_error)
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":551
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 551, __pyx_L1_error)
/* "View.MemoryView":552
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
* return result # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":549
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":555
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":556
* @property
* def base(self):
* return self.obj # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->obj);
__pyx_r = __pyx_v_self->obj;
goto __pyx_L0;
/* "View.MemoryView":555
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":559
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_length;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":560
* @property
* def shape(self):
* return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 560, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_length = (__pyx_t_2[0]);
__pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 560, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 560, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 560, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":559
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":563
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_stride;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":564
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
__pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":566
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 566, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 566, __pyx_L1_error)
/* "View.MemoryView":564
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
}
/* "View.MemoryView":568
* raise ValueError("Buffer view does not expose strides")
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 568, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_v_stride = (__pyx_t_3[0]);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 568, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 568, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
__pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 568, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
/* "View.MemoryView":563
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":571
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
Py_ssize_t *__pyx_t_6;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":572
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":573
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 573, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_tuple__12, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 573, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":572
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
}
/* "View.MemoryView":575
* return (-1,) * self.view.ndim
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 575, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim);
for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) {
__pyx_t_4 = __pyx_t_6;
__pyx_v_suboffset = (__pyx_t_4[0]);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 575, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 575, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 575, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":571
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":578
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":579
* @property
* def ndim(self):
* return self.view.ndim # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":578
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":582
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":583
* @property
* def itemsize(self):
* return self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":582
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":586
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":587
* @property
* def nbytes(self):
* return self.size * self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 587, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 587, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":586
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":590
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":591
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
__pyx_t_1 = (__pyx_v_self->_size == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":592
* def size(self):
* if self._size is None:
* result = 1 # <<<<<<<<<<<<<<
*
* for length in self.view.shape[:self.view.ndim]:
*/
__Pyx_INCREF(__pyx_int_1);
__pyx_v_result = __pyx_int_1;
/* "View.MemoryView":594
* result = 1
*
* for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<<
* result *= length
*
*/
__pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 594, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6);
__pyx_t_6 = 0;
/* "View.MemoryView":595
*
* for length in self.view.shape[:self.view.ndim]:
* result *= length # <<<<<<<<<<<<<<
*
* self._size = result
*/
__pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 595, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6);
__pyx_t_6 = 0;
}
/* "View.MemoryView":597
* result *= length
*
* self._size = result # <<<<<<<<<<<<<<
*
* return self._size
*/
__Pyx_INCREF(__pyx_v_result);
__Pyx_GIVEREF(__pyx_v_result);
__Pyx_GOTREF(__pyx_v_self->_size);
__Pyx_DECREF(__pyx_v_self->_size);
__pyx_v_self->_size = __pyx_v_result;
/* "View.MemoryView":591
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
}
/* "View.MemoryView":599
* self._size = result
*
* return self._size # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->_size);
__pyx_r = __pyx_v_self->_size;
goto __pyx_L0;
/* "View.MemoryView":590
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":601
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* Python wrapper */
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":602
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
__pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":603
* def __len__(self):
* if self.view.ndim >= 1:
* return self.view.shape[0] # <<<<<<<<<<<<<<
*
* return 0
*/
__pyx_r = (__pyx_v_self->view.shape[0]);
goto __pyx_L0;
/* "View.MemoryView":602
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
}
/* "View.MemoryView":605
* return self.view.shape[0]
*
* return 0 # <<<<<<<<<<<<<<
*
* def __repr__(self):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":601
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":607
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":608
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 608, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 608, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 608, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":609
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self)) # <<<<<<<<<<<<<<
*
* def __str__(self):
*/
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 609, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
/* "View.MemoryView":608
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 608, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 608, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":607
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":611
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__str__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("__str__", 0);
/* "View.MemoryView":612
*
* def __str__(self):
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":611
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":615
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("is_c_contig", 0);
/* "View.MemoryView":618
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
*/
__pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp));
/* "View.MemoryView":619
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<<
*
* def is_f_contig(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 619, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":615
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":621
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("is_f_contig", 0);
/* "View.MemoryView":624
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
*/
__pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp));
/* "View.MemoryView":625
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<<
*
* def copy(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 625, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":621
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":627
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_mslice;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("copy", 0);
/* "View.MemoryView":629
* def copy(self):
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &mslice)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS));
/* "View.MemoryView":631
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*
* slice_copy(self, &mslice) # <<<<<<<<<<<<<<
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice));
/* "View.MemoryView":632
*
* slice_copy(self, &mslice)
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_C_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 632, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":637
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<<
*
* def copy_fortran(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 637, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":627
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":639
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("copy_fortran", 0);
/* "View.MemoryView":641
* def copy_fortran(self):
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &src)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS));
/* "View.MemoryView":643
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*
* slice_copy(self, &src) # <<<<<<<<<<<<<<
* dst = slice_copy_contig(&src, "fortran", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src));
/* "View.MemoryView":644
*
* slice_copy(self, &src)
* dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_F_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 644, __pyx_L1_error)
__pyx_v_dst = __pyx_t_1;
/* "View.MemoryView":649
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 649, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":639
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":653
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) {
struct __pyx_memoryview_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("memoryview_cwrapper", 0);
/* "View.MemoryView":654
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<<
* result.typeinfo = typeinfo
* return result
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 654, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 654, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 654, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_o);
__Pyx_GIVEREF(__pyx_v_o);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 654, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":655
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_v_result->typeinfo = __pyx_v_typeinfo;
/* "View.MemoryView":656
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_check')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":653
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":659
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("memoryview_check", 0);
/* "View.MemoryView":660
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o):
* return isinstance(o, memoryview) # <<<<<<<<<<<<<<
*
* cdef tuple _unellipsify(object index, int ndim):
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type);
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* "View.MemoryView":659
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":662
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) {
PyObject *__pyx_v_tup = NULL;
PyObject *__pyx_v_result = NULL;
int __pyx_v_have_slices;
int __pyx_v_seen_ellipsis;
CYTHON_UNUSED PyObject *__pyx_v_idx = NULL;
PyObject *__pyx_v_item = NULL;
Py_ssize_t __pyx_v_nslices;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
PyObject *(*__pyx_t_6)(PyObject *);
PyObject *__pyx_t_7 = NULL;
Py_ssize_t __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
PyObject *__pyx_t_11 = NULL;
__Pyx_RefNannySetupContext("_unellipsify", 0);
/* "View.MemoryView":667
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
__pyx_t_1 = PyTuple_Check(__pyx_v_index);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":668
* """
* if not isinstance(index, tuple):
* tup = (index,) # <<<<<<<<<<<<<<
* else:
* tup = index
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 668, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_index);
__Pyx_GIVEREF(__pyx_v_index);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index);
__pyx_v_tup = __pyx_t_3;
__pyx_t_3 = 0;
/* "View.MemoryView":667
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":670
* tup = (index,)
* else:
* tup = index # <<<<<<<<<<<<<<
*
* result = []
*/
/*else*/ {
__Pyx_INCREF(__pyx_v_index);
__pyx_v_tup = __pyx_v_index;
}
__pyx_L3:;
/* "View.MemoryView":672
* tup = index
*
* result = [] # <<<<<<<<<<<<<<
* have_slices = False
* seen_ellipsis = False
*/
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_result = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":673
*
* result = []
* have_slices = False # <<<<<<<<<<<<<<
* seen_ellipsis = False
* for idx, item in enumerate(tup):
*/
__pyx_v_have_slices = 0;
/* "View.MemoryView":674
* result = []
* have_slices = False
* seen_ellipsis = False # <<<<<<<<<<<<<<
* for idx, item in enumerate(tup):
* if item is Ellipsis:
*/
__pyx_v_seen_ellipsis = 0;
/* "View.MemoryView":675
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
__Pyx_INCREF(__pyx_int_0);
__pyx_t_3 = __pyx_int_0;
if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) {
__pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
__pyx_t_6 = NULL;
} else {
__pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 675, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 675, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_6)) {
if (likely(PyList_CheckExact(__pyx_t_4))) {
if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 675, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
} else {
if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 675, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
}
} else {
__pyx_t_7 = __pyx_t_6(__pyx_t_4);
if (unlikely(!__pyx_t_7)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 675, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_7);
}
__Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7);
__pyx_t_7 = 0;
__Pyx_INCREF(__pyx_t_3);
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3);
__pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3);
__pyx_t_3 = __pyx_t_7;
__pyx_t_7 = 0;
/* "View.MemoryView":676
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
__pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":677
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
__pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":678
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 678, __pyx_L1_error)
__pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 678, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__15);
__Pyx_GIVEREF(__pyx_slice__15);
PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__15);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 678, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":679
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True # <<<<<<<<<<<<<<
* else:
* result.append(slice(None))
*/
__pyx_v_seen_ellipsis = 1;
/* "View.MemoryView":677
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
goto __pyx_L7;
}
/* "View.MemoryView":681
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__15); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 681, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":682
* else:
* result.append(slice(None))
* have_slices = True # <<<<<<<<<<<<<<
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
*/
__pyx_v_have_slices = 1;
/* "View.MemoryView":676
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
goto __pyx_L6;
}
/* "View.MemoryView":684
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
/*else*/ {
__pyx_t_2 = PySlice_Check(__pyx_v_item);
__pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0);
__pyx_t_1 = __pyx_t_10;
__pyx_L9_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":685
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
* raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<<
*
* have_slices = have_slices or isinstance(item, slice)
*/
__pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 685, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 685, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_Raise(__pyx_t_11, 0, 0, 0);
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__PYX_ERR(1, 685, __pyx_L1_error)
/* "View.MemoryView":684
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
}
/* "View.MemoryView":687
* raise TypeError("Cannot index with type '%s'" % type(item))
*
* have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<<
* result.append(item)
*
*/
__pyx_t_10 = (__pyx_v_have_slices != 0);
if (!__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = PySlice_Check(__pyx_v_item);
__pyx_t_2 = (__pyx_t_10 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_have_slices = __pyx_t_1;
/* "View.MemoryView":688
*
* have_slices = have_slices or isinstance(item, slice)
* result.append(item) # <<<<<<<<<<<<<<
*
* nslices = ndim - len(result)
*/
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 688, __pyx_L1_error)
}
__pyx_L6:;
/* "View.MemoryView":675
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":690
* result.append(item)
*
* nslices = ndim - len(result) # <<<<<<<<<<<<<<
* if nslices:
* result.extend([slice(None)] * nslices)
*/
__pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 690, __pyx_L1_error)
__pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5);
/* "View.MemoryView":691
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
__pyx_t_1 = (__pyx_v_nslices != 0);
if (__pyx_t_1) {
/* "View.MemoryView":692
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 692, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__15);
__Pyx_GIVEREF(__pyx_slice__15);
PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__15);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":691
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
}
/* "View.MemoryView":694
* result.extend([slice(None)] * nslices)
*
* return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<<
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
*/
__Pyx_XDECREF(__pyx_r);
if (!__pyx_v_have_slices) {
} else {
__pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 694, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L14_bool_binop_done;
}
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 694, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_L14_bool_binop_done:;
__pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 694, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 694, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4);
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_r = ((PyObject*)__pyx_t_11);
__pyx_t_11 = 0;
goto __pyx_L0;
/* "View.MemoryView":662
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_11);
__Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_tup);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_XDECREF(__pyx_v_item);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":696
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("assert_direct_dimensions", 0);
/* "View.MemoryView":697
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported")
*/
__pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim);
for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) {
__pyx_t_1 = __pyx_t_3;
__pyx_v_suboffset = (__pyx_t_1[0]);
/* "View.MemoryView":698
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
__pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":699
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 699, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__PYX_ERR(1, 699, __pyx_L1_error)
/* "View.MemoryView":698
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
}
}
/* "View.MemoryView":696
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":706
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) {
int __pyx_v_new_ndim;
int __pyx_v_suboffset_dim;
int __pyx_v_dim;
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
__Pyx_memviewslice *__pyx_v_p_src;
struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0;
__Pyx_memviewslice *__pyx_v_p_dst;
int *__pyx_v_p_suboffset_dim;
Py_ssize_t __pyx_v_start;
Py_ssize_t __pyx_v_stop;
Py_ssize_t __pyx_v_step;
int __pyx_v_have_start;
int __pyx_v_have_stop;
int __pyx_v_have_step;
PyObject *__pyx_v_index = NULL;
struct __pyx_memoryview_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
struct __pyx_memoryview_obj *__pyx_t_4;
char *__pyx_t_5;
int __pyx_t_6;
Py_ssize_t __pyx_t_7;
PyObject *(*__pyx_t_8)(PyObject *);
PyObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
__Pyx_RefNannySetupContext("memview_slice", 0);
/* "View.MemoryView":707
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices):
* cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<<
* cdef bint negative_step
* cdef __Pyx_memviewslice src, dst
*/
__pyx_v_new_ndim = 0;
__pyx_v_suboffset_dim = -1;
/* "View.MemoryView":714
*
*
* memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<<
*
* cdef _memoryviewslice memviewsliceobj
*/
(void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))));
/* "View.MemoryView":718
* cdef _memoryviewslice memviewsliceobj
*
* assert memview.view.ndim > 0 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(1, 718, __pyx_L1_error)
}
}
#endif
/* "View.MemoryView":720
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":721
*
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview # <<<<<<<<<<<<<<
* p_src = &memviewsliceobj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 721, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":722
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, &src)
*/
__pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice);
/* "View.MemoryView":720
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
goto __pyx_L3;
}
/* "View.MemoryView":724
* p_src = &memviewsliceobj.from_slice
* else:
* slice_copy(memview, &src) # <<<<<<<<<<<<<<
* p_src = &src
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src));
/* "View.MemoryView":725
* else:
* slice_copy(memview, &src)
* p_src = &src # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_p_src = (&__pyx_v_src);
}
__pyx_L3:;
/* "View.MemoryView":731
*
*
* dst.memview = p_src.memview # <<<<<<<<<<<<<<
* dst.data = p_src.data
*
*/
__pyx_t_4 = __pyx_v_p_src->memview;
__pyx_v_dst.memview = __pyx_t_4;
/* "View.MemoryView":732
*
* dst.memview = p_src.memview
* dst.data = p_src.data # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_v_p_src->data;
__pyx_v_dst.data = __pyx_t_5;
/* "View.MemoryView":737
*
*
* cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<<
* cdef int *p_suboffset_dim = &suboffset_dim
* cdef Py_ssize_t start, stop, step
*/
__pyx_v_p_dst = (&__pyx_v_dst);
/* "View.MemoryView":738
*
* cdef __Pyx_memviewslice *p_dst = &dst
* cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<<
* cdef Py_ssize_t start, stop, step
* cdef bint have_start, have_stop, have_step
*/
__pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim);
/* "View.MemoryView":742
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
__pyx_t_6 = 0;
if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) {
__pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0;
__pyx_t_8 = NULL;
} else {
__pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 742, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 742, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_8)) {
if (likely(PyList_CheckExact(__pyx_t_3))) {
if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 742, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 742, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
} else {
if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 742, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 742, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
}
} else {
__pyx_t_9 = __pyx_t_8(__pyx_t_3);
if (unlikely(!__pyx_t_9)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 742, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_9);
}
__Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_v_dim = __pyx_t_6;
__pyx_t_6 = (__pyx_t_6 + 1);
/* "View.MemoryView":743
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
__pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":747
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
* index, 0, 0, # start, stop, step # <<<<<<<<<<<<<<
* 0, 0, 0, # have_{start,stop,step}
* False)
*/
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 747, __pyx_L1_error)
/* "View.MemoryView":744
* for dim, index in enumerate(indices):
* if PyIndex_Check(index):
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 744, __pyx_L1_error)
/* "View.MemoryView":743
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
goto __pyx_L6;
}
/* "View.MemoryView":750
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
__pyx_t_2 = (__pyx_v_index == Py_None);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":751
* False)
* elif index is None:
* p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<<
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
*/
(__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1;
/* "View.MemoryView":752
* elif index is None:
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<<
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1
*/
(__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0;
/* "View.MemoryView":753
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<<
* new_ndim += 1
* else:
*/
(__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L;
/* "View.MemoryView":754
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1 # <<<<<<<<<<<<<<
* else:
* start = index.start or 0
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
/* "View.MemoryView":750
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
goto __pyx_L6;
}
/* "View.MemoryView":756
* new_ndim += 1
* else:
* start = index.start or 0 # <<<<<<<<<<<<<<
* stop = index.stop or 0
* step = index.step or 0
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 756, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 756, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 756, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L7_bool_binop_done:;
__pyx_v_start = __pyx_t_10;
/* "View.MemoryView":757
* else:
* start = index.start or 0
* stop = index.stop or 0 # <<<<<<<<<<<<<<
* step = index.step or 0
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 757, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 757, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 757, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L9_bool_binop_done:;
__pyx_v_stop = __pyx_t_10;
/* "View.MemoryView":758
* start = index.start or 0
* stop = index.stop or 0
* step = index.step or 0 # <<<<<<<<<<<<<<
*
* have_start = index.start is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 758, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 758, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 758, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L11_bool_binop_done:;
__pyx_v_step = __pyx_t_10;
/* "View.MemoryView":760
* step = index.step or 0
*
* have_start = index.start is not None # <<<<<<<<<<<<<<
* have_stop = index.stop is not None
* have_step = index.step is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_start = __pyx_t_1;
/* "View.MemoryView":761
*
* have_start = index.start is not None
* have_stop = index.stop is not None # <<<<<<<<<<<<<<
* have_step = index.step is not None
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_stop = __pyx_t_1;
/* "View.MemoryView":762
* have_start = index.start is not None
* have_stop = index.stop is not None
* have_step = index.step is not None # <<<<<<<<<<<<<<
*
* slice_memviewslice(
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_step = __pyx_t_1;
/* "View.MemoryView":764
* have_step = index.step is not None
*
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 764, __pyx_L1_error)
/* "View.MemoryView":770
* have_start, have_stop, have_step,
* True)
* new_ndim += 1 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
}
__pyx_L6:;
/* "View.MemoryView":742
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":772
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":773
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":774
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func, # <<<<<<<<<<<<<<
* memviewsliceobj.to_dtype_func,
* memview.dtype_is_object)
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 774, __pyx_L1_error) }
/* "View.MemoryView":775
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
* else:
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 775, __pyx_L1_error) }
/* "View.MemoryView":773
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 773, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 773, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":772
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
}
/* "View.MemoryView":778
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
/*else*/ {
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":779
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 778, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
/* "View.MemoryView":778
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 778, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":706
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":803
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) {
Py_ssize_t __pyx_v_new_shape;
int __pyx_v_negative_step;
int __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":823
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
__pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":825
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
__pyx_t_1 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":826
*
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":825
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
}
/* "View.MemoryView":827
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
__pyx_t_1 = (0 <= __pyx_v_start);
if (__pyx_t_1) {
__pyx_t_1 = (__pyx_v_start < __pyx_v_shape);
}
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":828
* start += shape
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<<
* else:
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 828, __pyx_L1_error)
/* "View.MemoryView":827
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
}
/* "View.MemoryView":823
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
goto __pyx_L3;
}
/* "View.MemoryView":831
* else:
*
* negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<<
*
* if have_step and step == 0:
*/
/*else*/ {
__pyx_t_1 = ((__pyx_v_have_step != 0) != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step < 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L6_bool_binop_done:;
__pyx_v_negative_step = __pyx_t_2;
/* "View.MemoryView":833
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
__pyx_t_1 = (__pyx_v_have_step != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step == 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L9_bool_binop_done:;
if (__pyx_t_2) {
/* "View.MemoryView":834
*
* if have_step and step == 0:
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 834, __pyx_L1_error)
/* "View.MemoryView":833
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
}
/* "View.MemoryView":837
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
__pyx_t_2 = (__pyx_v_have_start != 0);
if (__pyx_t_2) {
/* "View.MemoryView":838
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":839
* if have_start:
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if start < 0:
* start = 0
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":840
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":841
* start += shape
* if start < 0:
* start = 0 # <<<<<<<<<<<<<<
* elif start >= shape:
* if negative_step:
*/
__pyx_v_start = 0;
/* "View.MemoryView":840
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
}
/* "View.MemoryView":838
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
goto __pyx_L12;
}
/* "View.MemoryView":842
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
__pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":843
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":844
* elif start >= shape:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = shape
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":843
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L14;
}
/* "View.MemoryView":846
* start = shape - 1
* else:
* start = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
/*else*/ {
__pyx_v_start = __pyx_v_shape;
}
__pyx_L14:;
/* "View.MemoryView":842
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
}
__pyx_L12:;
/* "View.MemoryView":837
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
goto __pyx_L11;
}
/* "View.MemoryView":848
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":849
* else:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = 0
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":848
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L15;
}
/* "View.MemoryView":851
* start = shape - 1
* else:
* start = 0 # <<<<<<<<<<<<<<
*
* if have_stop:
*/
/*else*/ {
__pyx_v_start = 0;
}
__pyx_L15:;
}
__pyx_L11:;
/* "View.MemoryView":853
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
__pyx_t_2 = (__pyx_v_have_stop != 0);
if (__pyx_t_2) {
/* "View.MemoryView":854
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":855
* if have_stop:
* if stop < 0:
* stop += shape # <<<<<<<<<<<<<<
* if stop < 0:
* stop = 0
*/
__pyx_v_stop = (__pyx_v_stop + __pyx_v_shape);
/* "View.MemoryView":856
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":857
* stop += shape
* if stop < 0:
* stop = 0 # <<<<<<<<<<<<<<
* elif stop > shape:
* stop = shape
*/
__pyx_v_stop = 0;
/* "View.MemoryView":856
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
}
/* "View.MemoryView":854
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
goto __pyx_L17;
}
/* "View.MemoryView":858
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
__pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":859
* stop = 0
* elif stop > shape:
* stop = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
__pyx_v_stop = __pyx_v_shape;
/* "View.MemoryView":858
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
}
__pyx_L17:;
/* "View.MemoryView":853
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
goto __pyx_L16;
}
/* "View.MemoryView":861
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":862
* else:
* if negative_step:
* stop = -1 # <<<<<<<<<<<<<<
* else:
* stop = shape
*/
__pyx_v_stop = -1L;
/* "View.MemoryView":861
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
goto __pyx_L19;
}
/* "View.MemoryView":864
* stop = -1
* else:
* stop = shape # <<<<<<<<<<<<<<
*
* if not have_step:
*/
/*else*/ {
__pyx_v_stop = __pyx_v_shape;
}
__pyx_L19:;
}
__pyx_L16:;
/* "View.MemoryView":866
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
__pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":867
*
* if not have_step:
* step = 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_step = 1;
/* "View.MemoryView":866
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
}
/* "View.MemoryView":871
*
* with cython.cdivision(True):
* new_shape = (stop - start) // step # <<<<<<<<<<<<<<
*
* if (stop - start) - step * new_shape:
*/
__pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step);
/* "View.MemoryView":873
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
__pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":874
*
* if (stop - start) - step * new_shape:
* new_shape += 1 # <<<<<<<<<<<<<<
*
* if new_shape < 0:
*/
__pyx_v_new_shape = (__pyx_v_new_shape + 1);
/* "View.MemoryView":873
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
}
/* "View.MemoryView":876
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
__pyx_t_2 = ((__pyx_v_new_shape < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":877
*
* if new_shape < 0:
* new_shape = 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_new_shape = 0;
/* "View.MemoryView":876
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
}
/* "View.MemoryView":880
*
*
* dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<<
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset
*/
(__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step);
/* "View.MemoryView":881
*
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<<
* dst.suboffsets[new_ndim] = suboffset
*
*/
(__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape;
/* "View.MemoryView":882
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset;
}
__pyx_L3:;
/* "View.MemoryView":885
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
__pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":886
*
* if suboffset_dim[0] < 0:
* dst.data += start * stride # <<<<<<<<<<<<<<
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride
*/
__pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride));
/* "View.MemoryView":885
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
goto __pyx_L23;
}
/* "View.MemoryView":888
* dst.data += start * stride
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<<
*
* if suboffset >= 0:
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_suboffset_dim[0]);
(__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride));
}
__pyx_L23:;
/* "View.MemoryView":890
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":891
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
__pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":892
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
__pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":893
* if not is_slice:
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<<
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
*/
__pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":892
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
goto __pyx_L26;
}
/* "View.MemoryView":895
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<<
* "must be indexed and not sliced", dim)
* else:
*/
/*else*/ {
/* "View.MemoryView":896
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
* "must be indexed and not sliced", dim) # <<<<<<<<<<<<<<
* else:
* suboffset_dim[0] = new_ndim
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 895, __pyx_L1_error)
}
__pyx_L26:;
/* "View.MemoryView":891
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
goto __pyx_L25;
}
/* "View.MemoryView":898
* "must be indexed and not sliced", dim)
* else:
* suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<<
*
* return 0
*/
/*else*/ {
(__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim;
}
__pyx_L25:;
/* "View.MemoryView":890
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
}
/* "View.MemoryView":900
* suboffset_dim[0] = new_ndim
*
* return 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":803
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":906
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_suboffset;
Py_ssize_t __pyx_v_itemsize;
char *__pyx_v_resultp;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
__Pyx_RefNannySetupContext("pybuffer_index", 0);
/* "View.MemoryView":908
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<<
* cdef Py_ssize_t itemsize = view.itemsize
* cdef char *resultp
*/
__pyx_v_suboffset = -1L;
/* "View.MemoryView":909
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
* cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<<
* cdef char *resultp
*
*/
__pyx_t_1 = __pyx_v_view->itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":912
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
__pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":913
*
* if view.ndim == 0:
* shape = view.len / itemsize # <<<<<<<<<<<<<<
* stride = itemsize
* else:
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 913, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 913, __pyx_L1_error)
}
__pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize);
/* "View.MemoryView":914
* if view.ndim == 0:
* shape = view.len / itemsize
* stride = itemsize # <<<<<<<<<<<<<<
* else:
* shape = view.shape[dim]
*/
__pyx_v_stride = __pyx_v_itemsize;
/* "View.MemoryView":912
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
goto __pyx_L3;
}
/* "View.MemoryView":916
* stride = itemsize
* else:
* shape = view.shape[dim] # <<<<<<<<<<<<<<
* stride = view.strides[dim]
* if view.suboffsets != NULL:
*/
/*else*/ {
__pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]);
/* "View.MemoryView":917
* else:
* shape = view.shape[dim]
* stride = view.strides[dim] # <<<<<<<<<<<<<<
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim]
*/
__pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]);
/* "View.MemoryView":918
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
__pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":919
* stride = view.strides[dim]
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<<
*
* if index < 0:
*/
__pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]);
/* "View.MemoryView":918
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
}
}
__pyx_L3:;
/* "View.MemoryView":921
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":922
*
* if index < 0:
* index += view.shape[dim] # <<<<<<<<<<<<<<
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*/
__pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim]));
/* "View.MemoryView":923
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":924
* index += view.shape[dim]
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* if index >= shape:
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 924, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 924, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 924, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 924, __pyx_L1_error)
/* "View.MemoryView":923
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":921
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
}
/* "View.MemoryView":926
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":927
*
* if index >= shape:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* resultp = bufp + index * stride
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 927, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 927, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 927, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 927, __pyx_L1_error)
/* "View.MemoryView":926
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":929
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* resultp = bufp + index * stride # <<<<<<<<<<<<<<
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset
*/
__pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride));
/* "View.MemoryView":930
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":931
* resultp = bufp + index * stride
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<<
*
* return resultp
*/
__pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":930
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
}
/* "View.MemoryView":933
* resultp = (<char **> resultp)[0] + suboffset
*
* return resultp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_resultp;
goto __pyx_L0;
/* "View.MemoryView":906
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":939
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) {
int __pyx_v_ndim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_r;
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
long __pyx_t_3;
long __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
/* "View.MemoryView":940
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0:
* cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t *shape = memslice.shape
*/
__pyx_t_1 = __pyx_v_memslice->memview->view.ndim;
__pyx_v_ndim = __pyx_t_1;
/* "View.MemoryView":942
* cdef int ndim = memslice.memview.view.ndim
*
* cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<<
* cdef Py_ssize_t *strides = memslice.strides
*
*/
__pyx_t_2 = __pyx_v_memslice->shape;
__pyx_v_shape = __pyx_t_2;
/* "View.MemoryView":943
*
* cdef Py_ssize_t *shape = memslice.shape
* cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_v_memslice->strides;
__pyx_v_strides = __pyx_t_2;
/* "View.MemoryView":947
*
* cdef int i, j
* for i in range(ndim / 2): # <<<<<<<<<<<<<<
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
*/
__pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2);
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":948
* cdef int i, j
* for i in range(ndim / 2):
* j = ndim - 1 - i # <<<<<<<<<<<<<<
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i]
*/
__pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i);
/* "View.MemoryView":949
* for i in range(ndim / 2):
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<<
* shape[i], shape[j] = shape[j], shape[i]
*
*/
__pyx_t_5 = (__pyx_v_strides[__pyx_v_j]);
__pyx_t_6 = (__pyx_v_strides[__pyx_v_i]);
(__pyx_v_strides[__pyx_v_i]) = __pyx_t_5;
(__pyx_v_strides[__pyx_v_j]) = __pyx_t_6;
/* "View.MemoryView":950
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<<
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
*/
__pyx_t_6 = (__pyx_v_shape[__pyx_v_j]);
__pyx_t_5 = (__pyx_v_shape[__pyx_v_i]);
(__pyx_v_shape[__pyx_v_i]) = __pyx_t_6;
(__pyx_v_shape[__pyx_v_j]) = __pyx_t_5;
/* "View.MemoryView":952
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0);
if (!__pyx_t_8) {
} else {
__pyx_t_7 = __pyx_t_8;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0);
__pyx_t_7 = __pyx_t_8;
__pyx_L6_bool_binop_done:;
if (__pyx_t_7) {
/* "View.MemoryView":953
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<<
*
* return 1
*/
__pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 953, __pyx_L1_error)
/* "View.MemoryView":952
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
}
}
/* "View.MemoryView":955
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
* return 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 1;
goto __pyx_L0;
/* "View.MemoryView":939
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":972
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* Python wrapper */
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":973
*
* def __dealloc__(self):
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1);
/* "View.MemoryView":972
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":975
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":976
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":977
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL:
* return self.to_object_func(itemp) # <<<<<<<<<<<<<<
* else:
* return memoryview.convert_item_to_object(self, itemp)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 977, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":976
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
}
/* "View.MemoryView":979
* return self.to_object_func(itemp)
* else:
* return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 979, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":975
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":981
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":982
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":983
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<<
* else:
* memoryview.assign_item_from_object(self, itemp, value)
*/
__pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 983, __pyx_L1_error)
/* "View.MemoryView":982
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":985
* self.to_dtype_func(itemp, value)
* else:
* memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<<
*
* @property
*/
/*else*/ {
__pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 985, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L3:;
/* "View.MemoryView":981
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":988
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":989
* @property
* def base(self):
* return self.from_object # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->from_object);
__pyx_r = __pyx_v_self->from_object;
goto __pyx_L0;
/* "View.MemoryView":988
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":995
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_TypeInfo *__pyx_t_4;
Py_buffer __pyx_t_5;
Py_ssize_t *__pyx_t_6;
Py_ssize_t *__pyx_t_7;
Py_ssize_t *__pyx_t_8;
Py_ssize_t __pyx_t_9;
__Pyx_RefNannySetupContext("memoryview_fromslice", 0);
/* "View.MemoryView":1003
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1004
*
* if <PyObject *> memviewslice.memview == Py_None:
* return None # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
/* "View.MemoryView":1003
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
}
/* "View.MemoryView":1009
*
*
* result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<<
*
* result.from_slice = memviewslice
*/
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1009, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1009, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None);
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1009, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1011
* result = _memoryviewslice(None, 0, dtype_is_object)
*
* result.from_slice = memviewslice # <<<<<<<<<<<<<<
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
*/
__pyx_v_result->from_slice = __pyx_v_memviewslice;
/* "View.MemoryView":1012
*
* result.from_slice = memviewslice
* __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<<
*
* result.from_object = (<memoryview> memviewslice.memview).base
*/
__PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1);
/* "View.MemoryView":1014
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
* result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<<
* result.typeinfo = memviewslice.memview.typeinfo
*
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1014, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_v_result->from_object);
__Pyx_DECREF(__pyx_v_result->from_object);
__pyx_v_result->from_object = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":1015
*
* result.from_object = (<memoryview> memviewslice.memview).base
* result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<<
*
* result.view = memviewslice.memview.view
*/
__pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo;
__pyx_v_result->__pyx_base.typeinfo = __pyx_t_4;
/* "View.MemoryView":1017
* result.typeinfo = memviewslice.memview.typeinfo
*
* result.view = memviewslice.memview.view # <<<<<<<<<<<<<<
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
*/
__pyx_t_5 = __pyx_v_memviewslice.memview->view;
__pyx_v_result->__pyx_base.view = __pyx_t_5;
/* "View.MemoryView":1018
*
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<<
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
*/
__pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data);
/* "View.MemoryView":1019
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim;
/* "View.MemoryView":1020
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None;
/* "View.MemoryView":1021
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
*/
Py_INCREF(Py_None);
/* "View.MemoryView":1023
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
__pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1024
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
* result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<<
* else:
* result.flags = PyBUF_RECORDS_RO
*/
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS;
/* "View.MemoryView":1023
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
goto __pyx_L4;
}
/* "View.MemoryView":1026
* result.flags = PyBUF_RECORDS
* else:
* result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<<
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
*/
/*else*/ {
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO;
}
__pyx_L4:;
/* "View.MemoryView":1028
* result.flags = PyBUF_RECORDS_RO
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<<
* result.view.strides = <Py_ssize_t *> result.from_slice.strides
*
*/
__pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape);
/* "View.MemoryView":1029
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
* result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides);
/* "View.MemoryView":1032
*
*
* result.view.suboffsets = NULL # <<<<<<<<<<<<<<
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
*/
__pyx_v_result->__pyx_base.view.suboffsets = NULL;
/* "View.MemoryView":1033
*
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*/
__pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_v_suboffset = (__pyx_t_6[0]);
/* "View.MemoryView":1034
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
__pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1035
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets);
/* "View.MemoryView":1036
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break # <<<<<<<<<<<<<<
*
* result.view.len = result.view.itemsize
*/
goto __pyx_L6_break;
/* "View.MemoryView":1034
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
}
}
__pyx_L6_break:;
/* "View.MemoryView":1038
* break
*
* result.view.len = result.view.itemsize # <<<<<<<<<<<<<<
* for length in result.view.shape[:ndim]:
* result.view.len *= length
*/
__pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
/* "View.MemoryView":1039
*
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<<
* result.view.len *= length
*
*/
__pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1039, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1040
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]:
* result.view.len *= length # <<<<<<<<<<<<<<
*
* result.to_object_func = to_object_func
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1040, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1040, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1040, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
}
/* "View.MemoryView":1042
* result.view.len *= length
*
* result.to_object_func = to_object_func # <<<<<<<<<<<<<<
* result.to_dtype_func = to_dtype_func
*
*/
__pyx_v_result->to_object_func = __pyx_v_to_object_func;
/* "View.MemoryView":1043
*
* result.to_object_func = to_object_func
* result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func;
/* "View.MemoryView":1045
* result.to_dtype_func = to_dtype_func
*
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":995
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1048
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) {
struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0;
__Pyx_memviewslice *__pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("get_slice_from_memview", 0);
/* "View.MemoryView":1051
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1052
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice):
* obj = memview # <<<<<<<<<<<<<<
* return &obj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1052, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":1053
* if isinstance(memview, _memoryviewslice):
* obj = memview
* return &obj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, mslice)
*/
__pyx_r = (&__pyx_v_obj->from_slice);
goto __pyx_L0;
/* "View.MemoryView":1051
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
}
/* "View.MemoryView":1055
* return &obj.from_slice
* else:
* slice_copy(memview, mslice) # <<<<<<<<<<<<<<
* return mslice
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice);
/* "View.MemoryView":1056
* else:
* slice_copy(memview, mslice)
* return mslice # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_slice_copy')
*/
__pyx_r = __pyx_v_mslice;
goto __pyx_L0;
}
/* "View.MemoryView":1048
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_obj);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1059
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) {
int __pyx_v_dim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
Py_ssize_t *__pyx_v_suboffsets;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
__Pyx_RefNannySetupContext("slice_copy", 0);
/* "View.MemoryView":1063
* cdef (Py_ssize_t*) shape, strides, suboffsets
*
* shape = memview.view.shape # <<<<<<<<<<<<<<
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets
*/
__pyx_t_1 = __pyx_v_memview->view.shape;
__pyx_v_shape = __pyx_t_1;
/* "View.MemoryView":1064
*
* shape = memview.view.shape
* strides = memview.view.strides # <<<<<<<<<<<<<<
* suboffsets = memview.view.suboffsets
*
*/
__pyx_t_1 = __pyx_v_memview->view.strides;
__pyx_v_strides = __pyx_t_1;
/* "View.MemoryView":1065
* shape = memview.view.shape
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<<
*
* dst.memview = <__pyx_memoryview *> memview
*/
__pyx_t_1 = __pyx_v_memview->view.suboffsets;
__pyx_v_suboffsets = __pyx_t_1;
/* "View.MemoryView":1067
* suboffsets = memview.view.suboffsets
*
* dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<<
* dst.data = <char *> memview.view.buf
*
*/
__pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview);
/* "View.MemoryView":1068
*
* dst.memview = <__pyx_memoryview *> memview
* dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<<
*
* for dim in range(memview.view.ndim):
*/
__pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf);
/* "View.MemoryView":1070
* dst.data = <char *> memview.view.buf
*
* for dim in range(memview.view.ndim): # <<<<<<<<<<<<<<
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
*/
__pyx_t_2 = __pyx_v_memview->view.ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_dim = __pyx_t_4;
/* "View.MemoryView":1071
*
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<<
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*/
(__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]);
/* "View.MemoryView":1072
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<<
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*
*/
(__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]);
/* "View.MemoryView":1073
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object')
*/
if ((__pyx_v_suboffsets != 0)) {
__pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]);
} else {
__pyx_t_5 = -1L;
}
(__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5;
}
/* "View.MemoryView":1059
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1076
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) {
__Pyx_memviewslice __pyx_v_memviewslice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("memoryview_copy", 0);
/* "View.MemoryView":1079
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<<
* return memoryview_copy_from_slice(memview, &memviewslice)
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice));
/* "View.MemoryView":1080
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice)
* return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object_from_slice')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1080, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":1076
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1083
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) {
PyObject *(*__pyx_v_to_object_func)(char *);
int (*__pyx_v_to_dtype_func)(char *, PyObject *);
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *(*__pyx_t_3)(char *);
int (*__pyx_t_4)(char *, PyObject *);
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0);
/* "View.MemoryView":1090
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1091
*
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<<
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
*/
__pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func;
__pyx_v_to_object_func = __pyx_t_3;
/* "View.MemoryView":1092
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<<
* else:
* to_object_func = NULL
*/
__pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func;
__pyx_v_to_dtype_func = __pyx_t_4;
/* "View.MemoryView":1090
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
goto __pyx_L3;
}
/* "View.MemoryView":1094
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
* to_object_func = NULL # <<<<<<<<<<<<<<
* to_dtype_func = NULL
*
*/
/*else*/ {
__pyx_v_to_object_func = NULL;
/* "View.MemoryView":1095
* else:
* to_object_func = NULL
* to_dtype_func = NULL # <<<<<<<<<<<<<<
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
*/
__pyx_v_to_dtype_func = NULL;
}
__pyx_L3:;
/* "View.MemoryView":1097
* to_dtype_func = NULL
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<<
* to_object_func, to_dtype_func,
* memview.dtype_is_object)
*/
__Pyx_XDECREF(__pyx_r);
/* "View.MemoryView":1099
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
* to_object_func, to_dtype_func,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1097, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":1083
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1105
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) {
Py_ssize_t __pyx_r;
int __pyx_t_1;
/* "View.MemoryView":1106
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
__pyx_t_1 = ((__pyx_v_arg < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1107
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0:
* return -arg # <<<<<<<<<<<<<<
* else:
* return arg
*/
__pyx_r = (-__pyx_v_arg);
goto __pyx_L0;
/* "View.MemoryView":1106
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
}
/* "View.MemoryView":1109
* return -arg
* else:
* return arg # <<<<<<<<<<<<<<
*
* @cname('__pyx_get_best_slice_order')
*/
/*else*/ {
__pyx_r = __pyx_v_arg;
goto __pyx_L0;
}
/* "View.MemoryView":1105
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1112
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_c_stride;
Py_ssize_t __pyx_v_f_stride;
char __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1117
* """
* cdef int i
* cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<<
* cdef Py_ssize_t f_stride = 0
*
*/
__pyx_v_c_stride = 0;
/* "View.MemoryView":1118
* cdef int i
* cdef Py_ssize_t c_stride = 0
* cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_f_stride = 0;
/* "View.MemoryView":1120
* cdef Py_ssize_t f_stride = 0
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1121
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1122
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1123
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
goto __pyx_L4_break;
/* "View.MemoryView":1121
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L4_break:;
/* "View.MemoryView":1125
* break
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
*/
__pyx_t_1 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_1;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1126
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1127
* for i in range(ndim):
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1128
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
*/
goto __pyx_L7_break;
/* "View.MemoryView":1126
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L7_break:;
/* "View.MemoryView":1130
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
__pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1131
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
* return 'C' # <<<<<<<<<<<<<<
* else:
* return 'F'
*/
__pyx_r = 'C';
goto __pyx_L0;
/* "View.MemoryView":1130
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
}
/* "View.MemoryView":1133
* return 'C'
* else:
* return 'F' # <<<<<<<<<<<<<<
*
* @cython.cdivision(True)
*/
/*else*/ {
__pyx_r = 'F';
goto __pyx_L0;
}
/* "View.MemoryView":1112
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1136
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent;
Py_ssize_t __pyx_v_dst_extent;
Py_ssize_t __pyx_v_src_stride;
Py_ssize_t __pyx_v_dst_stride;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
/* "View.MemoryView":1143
*
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
*/
__pyx_v_src_extent = (__pyx_v_src_shape[0]);
/* "View.MemoryView":1144
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0]
*/
__pyx_v_dst_extent = (__pyx_v_dst_shape[0]);
/* "View.MemoryView":1145
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
*/
__pyx_v_src_stride = (__pyx_v_src_strides[0]);
/* "View.MemoryView":1146
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_dst_stride = (__pyx_v_dst_strides[0]);
/* "View.MemoryView":1148
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1149
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
__pyx_t_2 = ((__pyx_v_src_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
/* "View.MemoryView":1150
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
*/
__pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize);
if (__pyx_t_2) {
__pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride));
}
__pyx_t_3 = (__pyx_t_2 != 0);
__pyx_t_1 = __pyx_t_3;
__pyx_L5_bool_binop_done:;
/* "View.MemoryView":1149
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
if (__pyx_t_1) {
/* "View.MemoryView":1151
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)));
/* "View.MemoryView":1149
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
goto __pyx_L4;
}
/* "View.MemoryView":1153
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1154
* else:
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<<
* src_data += src_stride
* dst_data += dst_stride
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize));
/* "View.MemoryView":1155
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
* else:
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1156
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L4:;
/* "View.MemoryView":1148
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
goto __pyx_L3;
}
/* "View.MemoryView":1158
* dst_data += dst_stride
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* _copy_strided_to_strided(src_data, src_strides + 1,
* dst_data, dst_strides + 1,
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1159
* else:
* for i in range(dst_extent):
* _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<<
* dst_data, dst_strides + 1,
* src_shape + 1, dst_shape + 1,
*/
_copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize);
/* "View.MemoryView":1163
* src_shape + 1, dst_shape + 1,
* ndim - 1, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
*
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1164
* ndim - 1, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src,
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1136
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
/* function exit code */
}
/* "View.MemoryView":1166
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
/* "View.MemoryView":1169
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
* _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<<
* src.shape, dst.shape, ndim, itemsize)
*
*/
_copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1166
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1173
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1176
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
* cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_size = __pyx_t_1;
/* "View.MemoryView":1178
* cdef Py_ssize_t size = src.memview.view.itemsize
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* size *= src.shape[i]
*
*/
__pyx_t_2 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1179
*
* for i in range(ndim):
* size *= src.shape[i] # <<<<<<<<<<<<<<
*
* return size
*/
__pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i]));
}
/* "View.MemoryView":1181
* size *= src.shape[i]
*
* return size # <<<<<<<<<<<<<<
*
* @cname('__pyx_fill_contig_strides_array')
*/
__pyx_r = __pyx_v_size;
goto __pyx_L0;
/* "View.MemoryView":1173
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1184
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) {
int __pyx_v_idx;
Py_ssize_t __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1193
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
__pyx_t_1 = ((__pyx_v_order == 'F') != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1194
*
* if order == 'F':
* for idx in range(ndim): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride = stride * shape[idx]
*/
__pyx_t_2 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_idx = __pyx_t_4;
/* "View.MemoryView":1195
* if order == 'F':
* for idx in range(ndim):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride = stride * shape[idx]
* else:
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1196
* for idx in range(ndim):
* strides[idx] = stride
* stride = stride * shape[idx] # <<<<<<<<<<<<<<
* else:
* for idx in range(ndim - 1, -1, -1):
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
/* "View.MemoryView":1193
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
goto __pyx_L3;
}
/* "View.MemoryView":1198
* stride = stride * shape[idx]
* else:
* for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride = stride * shape[idx]
*/
/*else*/ {
for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) {
__pyx_v_idx = __pyx_t_2;
/* "View.MemoryView":1199
* else:
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride = stride * shape[idx]
*
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1200
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride
* stride = stride * shape[idx] # <<<<<<<<<<<<<<
*
* return stride
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
}
__pyx_L3:;
/* "View.MemoryView":1202
* stride = stride * shape[idx]
*
* return stride # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_data_to_temp')
*/
__pyx_r = __pyx_v_stride;
goto __pyx_L0;
/* "View.MemoryView":1184
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1205
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) {
int __pyx_v_i;
void *__pyx_v_result;
size_t __pyx_v_itemsize;
size_t __pyx_v_size;
void *__pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
struct __pyx_memoryview_obj *__pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
/* "View.MemoryView":1216
* cdef void *result
*
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef size_t size = slice_get_size(src, ndim)
*
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1217
*
* cdef size_t itemsize = src.memview.view.itemsize
* cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<<
*
* result = malloc(size)
*/
__pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim);
/* "View.MemoryView":1219
* cdef size_t size = slice_get_size(src, ndim)
*
* result = malloc(size) # <<<<<<<<<<<<<<
* if not result:
* _err(MemoryError, NULL)
*/
__pyx_v_result = malloc(__pyx_v_size);
/* "View.MemoryView":1220
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
__pyx_t_2 = ((!(__pyx_v_result != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1221
* result = malloc(size)
* if not result:
* _err(MemoryError, NULL) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1221, __pyx_L1_error)
/* "View.MemoryView":1220
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
}
/* "View.MemoryView":1224
*
*
* tmpslice.data = <char *> result # <<<<<<<<<<<<<<
* tmpslice.memview = src.memview
* for i in range(ndim):
*/
__pyx_v_tmpslice->data = ((char *)__pyx_v_result);
/* "View.MemoryView":1225
*
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview # <<<<<<<<<<<<<<
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
*/
__pyx_t_4 = __pyx_v_src->memview;
__pyx_v_tmpslice->memview = __pyx_t_4;
/* "View.MemoryView":1226
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview
* for i in range(ndim): # <<<<<<<<<<<<<<
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1227
* tmpslice.memview = src.memview
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<<
* tmpslice.suboffsets[i] = -1
*
*/
(__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]);
/* "View.MemoryView":1228
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize,
*/
(__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1230
* tmpslice.suboffsets[i] = -1
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<<
* ndim, order)
*
*/
(void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order));
/* "View.MemoryView":1234
*
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1235
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
__pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1236
* for i in range(ndim):
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0 # <<<<<<<<<<<<<<
*
* if slice_is_contig(src[0], order, ndim):
*/
(__pyx_v_tmpslice->strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1235
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
}
}
/* "View.MemoryView":1238
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1239
*
* if slice_is_contig(src[0], order, ndim):
* memcpy(result, src.data, size) # <<<<<<<<<<<<<<
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*/
(void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size));
/* "View.MemoryView":1238
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":1241
* memcpy(result, src.data, size)
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<<
*
* return result
*/
/*else*/ {
copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize);
}
__pyx_L9:;
/* "View.MemoryView":1243
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":1205
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = NULL;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1248
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_extents", 0);
/* "View.MemoryView":1251
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
* (i, extent1, extent2)) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err_dim')
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":1250
* cdef int _err_extents(int i, Py_ssize_t extent1,
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<<
* (i, extent1, extent2))
*
*/
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1250, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1250, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 1250, __pyx_L1_error)
/* "View.MemoryView":1248
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1254
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_dim", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1255
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:
* raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err')
*/
__pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1255, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1255, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1255, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_INCREF(__pyx_v_error);
__pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1255, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 1255, __pyx_L1_error)
/* "View.MemoryView":1254
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1258
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1259
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
__pyx_t_1 = ((__pyx_v_msg != NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":1260
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL:
* raise error(msg.decode('ascii')) # <<<<<<<<<<<<<<
* else:
* raise error
*/
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1260, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_error);
__pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
__pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1260, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 1260, __pyx_L1_error)
/* "View.MemoryView":1259
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
}
/* "View.MemoryView":1262
* raise error(msg.decode('ascii'))
* else:
* raise error # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_contents')
*/
/*else*/ {
__Pyx_Raise(__pyx_v_error, 0, 0, 0);
__PYX_ERR(1, 1262, __pyx_L1_error)
}
/* "View.MemoryView":1258
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1265
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) {
void *__pyx_v_tmpdata;
size_t __pyx_v_itemsize;
int __pyx_v_i;
char __pyx_v_order;
int __pyx_v_broadcasting;
int __pyx_v_direct_copy;
__Pyx_memviewslice __pyx_v_tmp;
int __pyx_v_ndim;
int __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
void *__pyx_t_7;
int __pyx_t_8;
/* "View.MemoryView":1273
* Check for overlapping memory and verify the shapes.
* """
* cdef void *tmpdata = NULL # <<<<<<<<<<<<<<
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
*/
__pyx_v_tmpdata = NULL;
/* "View.MemoryView":1274
* """
* cdef void *tmpdata = NULL
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
*/
__pyx_t_1 = __pyx_v_src.memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1276
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
* cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<<
* cdef bint broadcasting = False
* cdef bint direct_copy = False
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim);
/* "View.MemoryView":1277
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False # <<<<<<<<<<<<<<
* cdef bint direct_copy = False
* cdef __Pyx_memviewslice tmp
*/
__pyx_v_broadcasting = 0;
/* "View.MemoryView":1278
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False
* cdef bint direct_copy = False # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice tmp
*
*/
__pyx_v_direct_copy = 0;
/* "View.MemoryView":1281
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
__pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1282
*
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<<
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim);
/* "View.MemoryView":1281
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
goto __pyx_L3;
}
/* "View.MemoryView":1283
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
__pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1284
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<<
*
* cdef int ndim = max(src_ndim, dst_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim);
/* "View.MemoryView":1283
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
}
__pyx_L3:;
/* "View.MemoryView":1286
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
* cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_3 = __pyx_v_dst_ndim;
__pyx_t_4 = __pyx_v_src_ndim;
if (((__pyx_t_3 > __pyx_t_4) != 0)) {
__pyx_t_5 = __pyx_t_3;
} else {
__pyx_t_5 = __pyx_t_4;
}
__pyx_v_ndim = __pyx_t_5;
/* "View.MemoryView":1288
* cdef int ndim = max(src_ndim, dst_ndim)
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
*/
__pyx_t_5 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_5;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1289
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1290
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1291
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
* broadcasting = True # <<<<<<<<<<<<<<
* src.strides[i] = 0
* else:
*/
__pyx_v_broadcasting = 1;
/* "View.MemoryView":1292
* if src.shape[i] == 1:
* broadcasting = True
* src.strides[i] = 0 # <<<<<<<<<<<<<<
* else:
* _err_extents(i, dst.shape[i], src.shape[i])
*/
(__pyx_v_src.strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1290
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
goto __pyx_L7;
}
/* "View.MemoryView":1294
* src.strides[i] = 0
* else:
* _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<<
*
* if src.suboffsets[i] >= 0:
*/
/*else*/ {
__pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1294, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":1289
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
}
/* "View.MemoryView":1296
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
__pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1297
*
* if src.suboffsets[i] >= 0:
* _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<<
*
* if slices_overlap(&src, &dst, ndim, itemsize):
*/
__pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error)
/* "View.MemoryView":1296
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
}
}
/* "View.MemoryView":1299
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
__pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1301
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
__pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1302
*
* if not slice_is_contig(src, order, ndim):
* order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<<
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim);
/* "View.MemoryView":1301
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
}
/* "View.MemoryView":1304
* order = get_best_order(&dst, ndim)
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<<
* src = tmp
*
*/
__pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1304, __pyx_L1_error)
__pyx_v_tmpdata = __pyx_t_7;
/* "View.MemoryView":1305
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
* src = tmp # <<<<<<<<<<<<<<
*
* if not broadcasting:
*/
__pyx_v_src = __pyx_v_tmp;
/* "View.MemoryView":1299
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
}
/* "View.MemoryView":1307
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1310
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1311
*
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<<
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim)
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim);
/* "View.MemoryView":1310
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
goto __pyx_L12;
}
/* "View.MemoryView":1312
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1313
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<<
*
* if direct_copy:
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim);
/* "View.MemoryView":1312
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
}
__pyx_L12:;
/* "View.MemoryView":1315
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_2 = (__pyx_v_direct_copy != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1317
* if direct_copy:
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1318
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
*/
(void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)));
/* "View.MemoryView":1319
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
* free(tmpdata)
* return 0
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1320
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1321
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* if order == 'F' == get_best_order(&dst, ndim):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1315
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
}
/* "View.MemoryView":1307
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1323
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = (__pyx_v_order == 'F');
if (__pyx_t_2) {
__pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim));
}
__pyx_t_8 = (__pyx_t_2 != 0);
if (__pyx_t_8) {
/* "View.MemoryView":1326
*
*
* transpose_memslice(&src) # <<<<<<<<<<<<<<
* transpose_memslice(&dst)
*
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1326, __pyx_L1_error)
/* "View.MemoryView":1327
*
* transpose_memslice(&src)
* transpose_memslice(&dst) # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1327, __pyx_L1_error)
/* "View.MemoryView":1323
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1329
* transpose_memslice(&dst)
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1330
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
*/
copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1331
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
* free(tmpdata)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1333
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1334
*
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_broadcast_leading')
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1265
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1337
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) {
int __pyx_v_i;
int __pyx_v_offset;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1341
* int ndim_other) nogil:
* cdef int i
* cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim);
/* "View.MemoryView":1343
* cdef int offset = ndim_other - ndim
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1344
*
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<<
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*/
(__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]);
/* "View.MemoryView":1345
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<<
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
*/
(__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1346
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<<
*
* for i in range(offset):
*/
(__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]);
}
/* "View.MemoryView":1348
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
* for i in range(offset): # <<<<<<<<<<<<<<
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
*/
__pyx_t_1 = __pyx_v_offset;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1349
*
* for i in range(offset):
* mslice.shape[i] = 1 # <<<<<<<<<<<<<<
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1
*/
(__pyx_v_mslice->shape[__pyx_v_i]) = 1;
/* "View.MemoryView":1350
* for i in range(offset):
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<<
* mslice.suboffsets[i] = -1
*
*/
(__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]);
/* "View.MemoryView":1351
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1337
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1359
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) {
int __pyx_t_1;
/* "View.MemoryView":1363
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
__pyx_t_1 = (__pyx_v_dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1364
*
* if dtype_is_object:
* refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<<
* dst.strides, ndim, inc)
*
*/
__pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1363
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
}
/* "View.MemoryView":1359
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
/* function exit code */
}
/* "View.MemoryView":1368
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
__Pyx_RefNannyDeclarations
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0);
/* "View.MemoryView":1371
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
* refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1368
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
/* "View.MemoryView":1374
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("refcount_objects_in_slice", 0);
/* "View.MemoryView":1378
* cdef Py_ssize_t i
*
* for i in range(shape[0]): # <<<<<<<<<<<<<<
* if ndim == 1:
* if inc:
*/
__pyx_t_1 = (__pyx_v_shape[0]);
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1379
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
__pyx_t_4 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1380
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
__pyx_t_4 = (__pyx_v_inc != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1381
* if ndim == 1:
* if inc:
* Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* Py_DECREF((<PyObject **> data)[0])
*/
Py_INCREF((((PyObject **)__pyx_v_data)[0]));
/* "View.MemoryView":1380
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":1383
* Py_INCREF((<PyObject **> data)[0])
* else:
* Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
*/
/*else*/ {
Py_DECREF((((PyObject **)__pyx_v_data)[0]));
}
__pyx_L6:;
/* "View.MemoryView":1379
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
goto __pyx_L5;
}
/* "View.MemoryView":1385
* Py_DECREF((<PyObject **> data)[0])
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, inc)
*
*/
/*else*/ {
/* "View.MemoryView":1386
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
* ndim - 1, inc) # <<<<<<<<<<<<<<
*
* data += strides[0]
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc);
}
__pyx_L5:;
/* "View.MemoryView":1388
* ndim - 1, inc)
*
* data += strides[0] # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0]));
}
/* "View.MemoryView":1374
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1394
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) {
/* "View.MemoryView":1397
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1398
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False)
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<<
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1400
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
*
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1394
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1404
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_extent;
int __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
Py_ssize_t __pyx_t_4;
/* "View.MemoryView":1408
* size_t itemsize, void *item) nogil:
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t extent = shape[0]
*
*/
__pyx_v_stride = (__pyx_v_strides[0]);
/* "View.MemoryView":1409
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0]
* cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_extent = (__pyx_v_shape[0]);
/* "View.MemoryView":1411
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1412
*
* if ndim == 1:
* for i in range(extent): # <<<<<<<<<<<<<<
* memcpy(data, item, itemsize)
* data += stride
*/
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1413
* if ndim == 1:
* for i in range(extent):
* memcpy(data, item, itemsize) # <<<<<<<<<<<<<<
* data += stride
* else:
*/
(void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize));
/* "View.MemoryView":1414
* for i in range(extent):
* memcpy(data, item, itemsize)
* data += stride # <<<<<<<<<<<<<<
* else:
* for i in range(extent):
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
/* "View.MemoryView":1411
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
goto __pyx_L3;
}
/* "View.MemoryView":1416
* data += stride
* else:
* for i in range(extent): # <<<<<<<<<<<<<<
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
*/
/*else*/ {
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1417
* else:
* for i in range(extent):
* _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, itemsize, item)
* data += stride
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1419
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
* data += stride # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1404
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
/* function exit code */
}
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v___pyx_type = 0;
long __pyx_v___pyx_checksum;
PyObject *__pyx_v___pyx_state = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v___pyx_type = values[0];
__pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_v___pyx_state = values[2];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_v___pyx_PickleError = 0;
PyObject *__pyx_v___pyx_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0);
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
__pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0);
if (__pyx_t_1) {
/* "(tree fragment)":5
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
*/
__pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s_PickleError);
__Pyx_GIVEREF(__pyx_n_s_PickleError);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
__pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_2);
__pyx_v___pyx_PickleError = __pyx_t_2;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":6
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<<
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
*/
__pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_INCREF(__pyx_v___pyx_PickleError);
__pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 6, __pyx_L1_error)
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
}
/* "(tree fragment)":7
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<<
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v___pyx_result = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
__pyx_t_1 = (__pyx_v___pyx_state != Py_None);
__pyx_t_6 = (__pyx_t_1 != 0);
if (__pyx_t_6) {
/* "(tree fragment)":9
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error)
__pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
}
/* "(tree fragment)":10
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result # <<<<<<<<<<<<<<
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v___pyx_result);
__pyx_r = __pyx_v___pyx_result;
goto __pyx_L0;
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v___pyx_PickleError);
__Pyx_XDECREF(__pyx_v___pyx_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0);
/* "(tree fragment)":12
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<<
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->name);
__Pyx_DECREF(__pyx_v___pyx_result->name);
__pyx_v___pyx_result->name = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 13, __pyx_L1_error)
}
__pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_4 = ((__pyx_t_3 > 1) != 0);
if (__pyx_t_4) {
} else {
__pyx_t_2 = __pyx_t_4;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_5 = (__pyx_t_4 != 0);
__pyx_t_2 = __pyx_t_5;
__pyx_L4_bool_binop_done:;
if (__pyx_t_2) {
/* "(tree fragment)":14
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<<
*/
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 14, __pyx_L1_error)
}
__pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
__pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6);
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
}
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static struct __pyx_vtabstruct_array __pyx_vtable_array;
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_array_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_array_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_array;
p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None);
p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None);
if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_array(PyObject *o) {
struct __pyx_array_obj *p = (struct __pyx_array_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_array___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->mode);
Py_CLEAR(p->_format);
(*Py_TYPE(o)->tp_free)(o);
}
static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_array___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) {
PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n);
if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
v = __pyx_array___getattr__(o, n);
}
return v;
}
static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o);
}
static PyMethodDef __pyx_methods_array[] = {
{"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_array[] = {
{(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_array = {
__pyx_array___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_array, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_array = {
__pyx_array___len__, /*mp_length*/
__pyx_array___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_array, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_array = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_array_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_array = {
PyVarObject_HEAD_INIT(0, 0)
"quantas.utils.physics.statistical_mechanics.array", /*tp_name*/
sizeof(struct __pyx_array_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_array, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_array, /*tp_as_sequence*/
&__pyx_tp_as_mapping_array, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
__pyx_tp_getattro_array, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_array, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_array, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_array, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_array, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
};
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
struct __pyx_MemviewEnum_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_MemviewEnum_obj *)o);
p->name = Py_None; Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_Enum(PyObject *o) {
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->name);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
if (p->name) {
e = (*v)(p->name, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_Enum(PyObject *o) {
PyObject* tmp;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
tmp = ((PyObject*)p->name);
p->name = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_Enum[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_MemviewEnum = {
PyVarObject_HEAD_INIT(0, 0)
"quantas.utils.physics.statistical_mechanics.Enum", /*tp_name*/
sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_Enum, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_MemviewEnum___repr__, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_Enum, /*tp_traverse*/
__pyx_tp_clear_Enum, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_Enum, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_MemviewEnum___init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_Enum, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
};
static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview;
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryview_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryview_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_memoryview;
p->obj = Py_None; Py_INCREF(Py_None);
p->_size = Py_None; Py_INCREF(Py_None);
p->_array_interface = Py_None; Py_INCREF(Py_None);
p->view.obj = NULL;
if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_memoryview(PyObject *o) {
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_memoryview___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->obj);
Py_CLEAR(p->_size);
Py_CLEAR(p->_array_interface);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
if (p->obj) {
e = (*v)(p->obj, a); if (e) return e;
}
if (p->_size) {
e = (*v)(p->_size, a); if (e) return e;
}
if (p->_array_interface) {
e = (*v)(p->_array_interface, a); if (e) return e;
}
if (p->view.obj) {
e = (*v)(p->view.obj, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_memoryview(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
tmp = ((PyObject*)p->obj);
p->obj = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_size);
p->_size = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_array_interface);
p->_array_interface = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
Py_CLEAR(p->view.obj);
return 0;
}
static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_memoryview___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o);
}
static PyMethodDef __pyx_methods_memoryview[] = {
{"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0},
{"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0},
{"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0},
{"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_memoryview[] = {
{(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0},
{(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0},
{(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0},
{(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0},
{(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0},
{(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0},
{(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0},
{(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0},
{(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_memoryview = {
__pyx_memoryview___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_memoryview, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_memoryview = {
__pyx_memoryview___len__, /*mp_length*/
__pyx_memoryview___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_memoryview = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_memoryview_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_memoryview = {
PyVarObject_HEAD_INIT(0, 0)
"quantas.utils.physics.statistical_mechanics.memoryview", /*tp_name*/
sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_memoryview, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_memoryview___repr__, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/
&__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
__pyx_memoryview___str__, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_memoryview, /*tp_traverse*/
__pyx_tp_clear_memoryview, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_memoryview, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_memoryview, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_memoryview, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
};
static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice;
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryviewslice_obj *p;
PyObject *o = __pyx_tp_new_memoryview(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryviewslice_obj *)o);
p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice;
p->from_object = Py_None; Py_INCREF(Py_None);
p->from_slice.memview = NULL;
return o;
}
static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) {
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_memoryviewslice___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->from_object);
PyObject_GC_Track(o);
__pyx_tp_dealloc_memoryview(o);
}
static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e;
if (p->from_object) {
e = (*v)(p->from_object, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear__memoryviewslice(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
__pyx_tp_clear_memoryview(o);
tmp = ((PyObject*)p->from_object);
p->from_object = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
__PYX_XDEC_MEMVIEW(&p->from_slice, 1);
return 0;
}
static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o);
}
static PyMethodDef __pyx_methods__memoryviewslice[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = {
{(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_memoryviewslice = {
PyVarObject_HEAD_INIT(0, 0)
"quantas.utils.physics.statistical_mechanics._memoryviewslice", /*tp_name*/
sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___repr__, /*tp_repr*/
#else
0, /*tp_repr*/
#endif
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___str__, /*tp_str*/
#else
0, /*tp_str*/
#endif
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
"Internal class for passing memoryview slices to Python", /*tp_doc*/
__pyx_tp_traverse__memoryviewslice, /*tp_traverse*/
__pyx_tp_clear__memoryviewslice, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods__memoryviewslice, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets__memoryviewslice, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new__memoryviewslice, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{"zero_point_energy", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_1zero_point_energy, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7quantas_5utils_7physics_21statistical_mechanics_zero_point_energy},
{"thermal_energy", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_3thermal_energy, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7quantas_5utils_7physics_21statistical_mechanics_2thermal_energy},
{"internal_energy", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_5internal_energy, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7quantas_5utils_7physics_21statistical_mechanics_4internal_energy},
{"entropy", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_7entropy, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7quantas_5utils_7physics_21statistical_mechanics_6entropy},
{"vibrational_free_energy", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_9vibrational_free_energy, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7quantas_5utils_7physics_21statistical_mechanics_8vibrational_free_energy},
{"free_energy", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_11free_energy, METH_VARARGS|METH_KEYWORDS, 0},
{"isochoric_heat_capacity", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7quantas_5utils_7physics_21statistical_mechanics_13isochoric_heat_capacity, METH_VARARGS|METH_KEYWORDS, 0},
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec_statistical_mechanics(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
{Py_mod_create, (void*)__pyx_pymod_create},
{Py_mod_exec, (void*)__pyx_pymod_exec_statistical_mechanics},
{0, NULL}
};
#endif
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
"statistical_mechanics",
0, /* m_doc */
#if CYTHON_PEP489_MULTI_PHASE_INIT
0, /* m_size */
#else
-1, /* m_size */
#endif
__pyx_methods /* m_methods */,
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_moduledef_slots, /* m_slots */
#else
NULL, /* m_reload */
#endif
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
#define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define CYTHON_SMALL_CODE __attribute__((cold))
#else
#define CYTHON_SMALL_CODE
#endif
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1},
{&__pyx_n_s_Avogadro, __pyx_k_Avogadro, sizeof(__pyx_k_Avogadro), 0, 0, 1, 1},
{&__pyx_n_s_Boltzmann, __pyx_k_Boltzmann, sizeof(__pyx_k_Boltzmann), 0, 0, 1, 1},
{&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0},
{&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0},
{&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1},
{&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0},
{&__pyx_n_s_Fvib, __pyx_k_Fvib, sizeof(__pyx_k_Fvib), 0, 0, 1, 1},
{&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0},
{&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1},
{&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0},
{&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1},
{&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0},
{&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0},
{&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1},
{&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0},
{&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_Planck, __pyx_k_Planck, sizeof(__pyx_k_Planck), 0, 0, 1, 1},
{&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
{&__pyx_n_s_U0, __pyx_k_U0, sizeof(__pyx_k_U0), 0, 0, 1, 1},
{&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0},
{&__pyx_n_s_Uth, __pyx_k_Uth, sizeof(__pyx_k_Uth), 0, 0, 1, 1},
{&__pyx_n_s_Uzp, __pyx_k_Uzp, sizeof(__pyx_k_Uzp), 0, 0, 1, 1},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1},
{&__pyx_n_s__19, __pyx_k__19, sizeof(__pyx_k__19), 0, 0, 1, 1},
{&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1},
{&__pyx_n_s_band, __pyx_k_band, sizeof(__pyx_k_band), 0, 0, 1, 1},
{&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1},
{&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1},
{&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1},
{&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_cs, __pyx_k_cs, sizeof(__pyx_k_cs), 0, 0, 1, 1},
{&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1},
{&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1},
{&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1},
{&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1},
{&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1},
{&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1},
{&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1},
{&__pyx_n_s_float64, __pyx_k_float64, sizeof(__pyx_k_float64), 0, 0, 1, 1},
{&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
{&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1},
{&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1},
{&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1},
{&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0},
{&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1},
{&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1},
{&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1},
{&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1},
{&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1},
{&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0},
{&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1},
{&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1},
{&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1},
{&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
{&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1},
{&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1},
{&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1},
{&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1},
{&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1},
{&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1},
{&__pyx_n_s_scipy_constants, __pyx_k_scipy_constants, sizeof(__pyx_k_scipy_constants), 0, 0, 1, 1},
{&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1},
{&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1},
{&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1},
{&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1},
{&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1},
{&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1},
{&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1},
{&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0},
{&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1},
{&__pyx_n_s_temperature, __pyx_k_temperature, sizeof(__pyx_k_temperature), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0},
{&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0},
{&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1},
{&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1},
{&__pyx_n_s_weights, __pyx_k_weights, sizeof(__pyx_k_weights), 0, 0, 1, 1},
{&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 83, __pyx_L1_error)
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error)
__pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error)
__pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error)
__pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error)
__pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 400, __pyx_L1_error)
__pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 609, __pyx_L1_error)
__pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 828, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "View.MemoryView":133
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple_);
__Pyx_GIVEREF(__pyx_tuple_);
/* "View.MemoryView":136
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "View.MemoryView":148
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__3);
__Pyx_GIVEREF(__pyx_tuple__3);
/* "View.MemoryView":176
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 176, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
/* "View.MemoryView":192
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 192, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "View.MemoryView":414
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 414, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "View.MemoryView":491
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 491, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "View.MemoryView":516
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_ND:
*/
__pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 516, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
/* "View.MemoryView":566
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 566, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
/* "View.MemoryView":573
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__pyx_tuple__12 = PyTuple_New(1); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 573, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__12);
__Pyx_INCREF(__pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_int_neg_1);
PyTuple_SET_ITEM(__pyx_tuple__12, 0, __pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_tuple__12);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_GIVEREF(__pyx_tuple__13);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__14);
__Pyx_GIVEREF(__pyx_tuple__14);
/* "View.MemoryView":678
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_slice__15 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__15)) __PYX_ERR(1, 678, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__15);
__Pyx_GIVEREF(__pyx_slice__15);
/* "View.MemoryView":699
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 699, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__16);
__Pyx_GIVEREF(__pyx_tuple__16);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__17);
__Pyx_GIVEREF(__pyx_tuple__17);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__18);
__Pyx_GIVEREF(__pyx_tuple__18);
/* "View.MemoryView":286
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__20);
__Pyx_GIVEREF(__pyx_tuple__20);
/* "View.MemoryView":287
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__21);
__Pyx_GIVEREF(__pyx_tuple__21);
/* "View.MemoryView":288
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__22);
__Pyx_GIVEREF(__pyx_tuple__22);
/* "View.MemoryView":291
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__23);
__Pyx_GIVEREF(__pyx_tuple__23);
/* "View.MemoryView":292
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 292, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__24);
__Pyx_GIVEREF(__pyx_tuple__24);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_tuple__25 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__25);
__Pyx_GIVEREF(__pyx_tuple__25);
__pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#ifdef WITH_THREAD
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
static int __Pyx_modinit_global_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
/*--- Global init code ---*/
generic = Py_None; Py_INCREF(Py_None);
strided = Py_None; Py_INCREF(Py_None);
indirect = Py_None; Py_INCREF(Py_None);
contiguous = Py_None; Py_INCREF(Py_None);
indirect_contiguous = Py_None; Py_INCREF(Py_None);
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
/*--- Variable export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
/*--- Function export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
/*--- Type init code ---*/
__pyx_vtabptr_array = &__pyx_vtable_array;
__pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview;
if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_array.tp_print = 0;
#endif
if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
__pyx_array_type = &__pyx_type___pyx_array;
if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_MemviewEnum.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error)
__pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum;
__pyx_vtabptr_memoryview = &__pyx_vtable_memoryview;
__pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer;
__pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice;
__pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment;
__pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar;
__pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed;
__pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object;
__pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object;
if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_memoryview.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
__pyx_memoryview_type = &__pyx_type___pyx_memoryview;
__pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice;
__pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview;
__pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object;
__pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object;
__pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type;
if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 961, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_memoryviewslice.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 961, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 961, __pyx_L1_error)
__pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_type_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
/*--- Type import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
/*--- Variable import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
/*--- Function import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
#if PY_MAJOR_VERSION < 3
#ifdef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC void
#else
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#endif
#else
#ifdef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC PyObject *
#else
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#endif
#endif
#if PY_MAJOR_VERSION < 3
__Pyx_PyMODINIT_FUNC initstatistical_mechanics(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC initstatistical_mechanics(void)
#else
__Pyx_PyMODINIT_FUNC PyInit_statistical_mechanics(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit_statistical_mechanics(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
return PyModuleDef_Init(&__pyx_moduledef);
}
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
#if PY_VERSION_HEX >= 0x030700A1
static PY_INT64_T main_interpreter_id = -1;
PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
if (main_interpreter_id == -1) {
main_interpreter_id = current_id;
return (unlikely(current_id == -1)) ? -1 : 0;
} else if (unlikely(main_interpreter_id != current_id))
#else
static PyInterpreterState *main_interpreter = NULL;
PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
if (!main_interpreter) {
main_interpreter = current_interpreter;
} else if (unlikely(main_interpreter != current_interpreter))
#endif
{
PyErr_SetString(
PyExc_ImportError,
"Interpreter change detected - this module can only be loaded into one interpreter per process.");
return -1;
}
return 0;
}
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
if (allow_none || value != Py_None) {
result = PyDict_SetItemString(moddict, to_name, value);
}
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
if (__Pyx_check_single_interpreter())
return NULL;
if (__pyx_m)
return __Pyx_NewRef(__pyx_m);
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
static CYTHON_SMALL_CODE int __pyx_pymod_exec_statistical_mechanics(PyObject *__pyx_pyinit_module)
#endif
#endif
{
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
double __pyx_t_3;
static PyThread_type_lock __pyx_t_4[8];
__Pyx_RefNannyDeclarations
#if CYTHON_PEP489_MULTI_PHASE_INIT
if (__pyx_m) {
if (__pyx_m == __pyx_pyinit_module) return 0;
PyErr_SetString(PyExc_RuntimeError, "Module 'statistical_mechanics' has already been imported. Re-initialisation is not supported.");
return -1;
}
#elif PY_MAJOR_VERSION >= 3
if (__pyx_m) return __Pyx_NewRef(__pyx_m);
#endif
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_statistical_mechanics(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pxy_PyFrame_Initialize_Offsets
__Pxy_PyFrame_Initialize_Offsets();
#endif
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_m = __pyx_pyinit_module;
Py_INCREF(__pyx_m);
#else
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("statistical_mechanics", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_b);
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_cython_runtime);
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_quantas__utils__physics__statistical_mechanics) {
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "quantas.utils.physics.statistical_mechanics")) {
if (unlikely(PyDict_SetItemString(modules, "quantas.utils.physics.statistical_mechanics", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) goto __pyx_L1_error;
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) goto __pyx_L1_error;
/*--- Global type/function init code ---*/
(void)__Pyx_modinit_global_init_code();
(void)__Pyx_modinit_variable_export_code();
(void)__Pyx_modinit_function_export_code();
if (unlikely(__Pyx_modinit_type_init_code() != 0)) goto __pyx_L1_error;
(void)__Pyx_modinit_type_import_code();
(void)__Pyx_modinit_variable_import_code();
(void)__Pyx_modinit_function_import_code();
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "quantas/utils/physics/statistical_mechanics.pyx":14
* from cython.parallel import prange
*
* import scipy.constants as cs # <<<<<<<<<<<<<<
* import numpy as np
*
*/
__pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_n_s__19);
__Pyx_GIVEREF(__pyx_n_s__19);
PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s__19);
__pyx_t_2 = __Pyx_Import(__pyx_n_s_scipy_constants, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_d, __pyx_n_s_cs, __pyx_t_2) < 0) __PYX_ERR(0, 14, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "quantas/utils/physics/statistical_mechanics.pyx":15
*
* import scipy.constants as cs
* import numpy as np # <<<<<<<<<<<<<<
*
* cdef double H = cs.Planck
*/
__pyx_t_2 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_2) < 0) __PYX_ERR(0, 15, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "quantas/utils/physics/statistical_mechanics.pyx":17
* import numpy as np
*
* cdef double H = cs.Planck # <<<<<<<<<<<<<<
* cdef double KB = cs.Boltzmann
* cdef double NA = cs.Avogadro
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_cs); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_Planck); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_3 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_3 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 17, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_7quantas_5utils_7physics_21statistical_mechanics_H = __pyx_t_3;
/* "quantas/utils/physics/statistical_mechanics.pyx":18
*
* cdef double H = cs.Planck
* cdef double KB = cs.Boltzmann # <<<<<<<<<<<<<<
* cdef double NA = cs.Avogadro
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_cs); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_Boltzmann); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_3 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_3 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 18, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_7quantas_5utils_7physics_21statistical_mechanics_KB = __pyx_t_3;
/* "quantas/utils/physics/statistical_mechanics.pyx":19
* cdef double H = cs.Planck
* cdef double KB = cs.Boltzmann
* cdef double NA = cs.Avogadro # <<<<<<<<<<<<<<
*
* cdef extern from "math.h" nogil:
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_cs); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_Avogadro); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_3 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_3 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 19, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_7quantas_5utils_7physics_21statistical_mechanics_NA = __pyx_t_3;
/* "quantas/utils/physics/statistical_mechanics.pyx":1
* # -*- coding: utf-8 -*- # <<<<<<<<<<<<<<
* ##############################################################################
* # Copyright (c), Gianfranco Ulian and Giovanni Valdre'. #
*/
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":209
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
* def __dealloc__(array self):
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_array_type);
/* "View.MemoryView":286
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(generic);
__Pyx_DECREF_SET(generic, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":287
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(strided);
__Pyx_DECREF_SET(strided, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":288
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect);
__Pyx_DECREF_SET(indirect, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":291
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(contiguous);
__Pyx_DECREF_SET(contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":292
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect_contiguous);
__Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":316
*
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<<
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [
* PyThread_allocate_lock(),
*/
__pyx_memoryview_thread_locks_used = 0;
/* "View.MemoryView":317
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<<
* PyThread_allocate_lock(),
* PyThread_allocate_lock(),
*/
__pyx_t_4[0] = PyThread_allocate_lock();
__pyx_t_4[1] = PyThread_allocate_lock();
__pyx_t_4[2] = PyThread_allocate_lock();
__pyx_t_4[3] = PyThread_allocate_lock();
__pyx_t_4[4] = PyThread_allocate_lock();
__pyx_t_4[5] = PyThread_allocate_lock();
__pyx_t_4[6] = PyThread_allocate_lock();
__pyx_t_4[7] = PyThread_allocate_lock();
memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_4, sizeof(__pyx_memoryview_thread_locks[0]) * (8));
/* "View.MemoryView":545
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 545, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 545, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryview_type);
/* "View.MemoryView":991
* return self.from_object
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 991, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 991, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryviewslice_type);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init quantas.utils.physics.statistical_mechanics", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_CLEAR(__pyx_m);
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init quantas.utils.physics.statistical_mechanics");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if CYTHON_PEP489_MULTI_PHASE_INIT
return (__pyx_m != NULL) ? 0 : -1;
#elif PY_MAJOR_VERSION >= 3
return __pyx_m;
#else
return;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule(modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, "RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* PyObjectGetAttrStr */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* PyDictVersioning */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
PyObject **dictptr = NULL;
Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
if (offset) {
#if CYTHON_COMPILING_IN_CPYTHON
dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
#else
dictptr = _PyObject_GetDictPtr(obj);
#endif
}
return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
}
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
return 0;
return obj_dict_version == __Pyx_get_object_dict_version(obj);
}
#endif
/* GetModuleGlobalName */
#if CYTHON_USE_DICT_VERSIONS
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
#else
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
#endif
{
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
} else if (unlikely(PyErr_Occurred())) {
return NULL;
}
#else
result = PyDict_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
#endif
#else
result = PyObject_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
PyErr_Clear();
#endif
return __Pyx_GetBuiltinName(name);
}
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* MemviewSliceInit */
static int
__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference)
{
__Pyx_RefNannyDeclarations
int i, retval=-1;
Py_buffer *buf = &memview->view;
__Pyx_RefNannySetupContext("init_memviewslice", 0);
if (memviewslice->memview || memviewslice->data) {
PyErr_SetString(PyExc_ValueError,
"memviewslice is already initialized!");
goto fail;
}
if (buf->strides) {
for (i = 0; i < ndim; i++) {
memviewslice->strides[i] = buf->strides[i];
}
} else {
Py_ssize_t stride = buf->itemsize;
for (i = ndim - 1; i >= 0; i--) {
memviewslice->strides[i] = stride;
stride *= buf->shape[i];
}
}
for (i = 0; i < ndim; i++) {
memviewslice->shape[i] = buf->shape[i];
if (buf->suboffsets) {
memviewslice->suboffsets[i] = buf->suboffsets[i];
} else {
memviewslice->suboffsets[i] = -1;
}
}
memviewslice->memview = memview;
memviewslice->data = (char *)buf->buf;
if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) {
Py_INCREF(memview);
}
retval = 0;
goto no_fail;
fail:
memviewslice->memview = 0;
memviewslice->data = 0;
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
#ifndef Py_NO_RETURN
#define Py_NO_RETURN
#endif
static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN {
va_list vargs;
char msg[200];
#ifdef HAVE_STDARG_PROTOTYPES
va_start(vargs, fmt);
#else
va_start(vargs);
#endif
vsnprintf(msg, 200, fmt, vargs);
va_end(vargs);
Py_FatalError(msg);
}
static CYTHON_INLINE int
__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)++;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE int
__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)--;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE void
__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno)
{
int first_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (!memview || (PyObject *) memview == Py_None)
return;
if (__pyx_get_slice_count(memview) < 0)
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
first_time = __pyx_add_acquisition_count(memview) == 0;
if (first_time) {
if (have_gil) {
Py_INCREF((PyObject *) memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_INCREF((PyObject *) memview);
PyGILState_Release(_gilstate);
}
}
}
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice,
int have_gil, int lineno) {
int last_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (!memview ) {
return;
} else if ((PyObject *) memview == Py_None) {
memslice->memview = NULL;
return;
}
if (__pyx_get_slice_count(memview) <= 0)
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
last_time = __pyx_sub_acquisition_count(memview) == 1;
memslice->data = NULL;
if (last_time) {
if (have_gil) {
Py_CLEAR(memslice->memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_CLEAR(memslice->memview);
PyGILState_Release(_gilstate);
}
} else {
memslice->memview = NULL;
}
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* None */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* WriteUnraisableException */
static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno,
CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename,
int full_traceback, CYTHON_UNUSED int nogil) {
PyObject *old_exc, *old_val, *old_tb;
PyObject *ctx;
__Pyx_PyThreadState_declare
#ifdef WITH_THREAD
PyGILState_STATE state;
if (nogil)
state = PyGILState_Ensure();
#ifdef _MSC_VER
else state = (PyGILState_STATE)-1;
#endif
#endif
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&old_exc, &old_val, &old_tb);
if (full_traceback) {
Py_XINCREF(old_exc);
Py_XINCREF(old_val);
Py_XINCREF(old_tb);
__Pyx_ErrRestore(old_exc, old_val, old_tb);
PyErr_PrintEx(1);
}
#if PY_MAJOR_VERSION < 3
ctx = PyString_FromString(name);
#else
ctx = PyUnicode_FromString(name);
#endif
__Pyx_ErrRestore(old_exc, old_val, old_tb);
if (!ctx) {
PyErr_WriteUnraisable(Py_None);
} else {
PyErr_WriteUnraisable(ctx);
Py_DECREF(ctx);
}
#ifdef WITH_THREAD
if (nogil)
PyGILState_Release(state);
#endif
}
/* ArgTypeTest */
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
else if (exact) {
#if PY_MAJOR_VERSION == 2
if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(__Pyx_TypeCheck(obj, type))) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
int flags = PyCFunction_GET_FLAGS(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
} else {
return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
}
}
#endif
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif
#endif
/* PyObjectCall2Args */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
PyObject *args, *result = NULL;
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyFunction_FastCall(function, args, 2);
}
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyCFunction_FastCall(function, args, 2);
}
#endif
args = PyTuple_New(2);
if (unlikely(!args)) goto done;
Py_INCREF(arg1);
PyTuple_SET_ITEM(args, 0, arg1);
Py_INCREF(arg2);
PyTuple_SET_ITEM(args, 1, arg2);
Py_INCREF(function);
result = __Pyx_PyObject_Call(function, args, NULL);
Py_DECREF(args);
Py_DECREF(function);
done:
return result;
}
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
if (likely(PyCFunction_Check(func))) {
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* BytesEquals */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
if (s1 == s2) {
return (equals == Py_EQ);
} else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
const char *ps1, *ps2;
Py_ssize_t length = PyBytes_GET_SIZE(s1);
if (length != PyBytes_GET_SIZE(s2))
return (equals == Py_NE);
ps1 = PyBytes_AS_STRING(s1);
ps2 = PyBytes_AS_STRING(s2);
if (ps1[0] != ps2[0]) {
return (equals == Py_NE);
} else if (length == 1) {
return (equals == Py_EQ);
} else {
int result;
#if CYTHON_USE_UNICODE_INTERNALS
Py_hash_t hash1, hash2;
hash1 = ((PyBytesObject*)s1)->ob_shash;
hash2 = ((PyBytesObject*)s2)->ob_shash;
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
return (equals == Py_NE);
}
#endif
result = memcmp(ps1, ps2, (size_t)length);
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
return (equals == Py_NE);
} else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
return (equals == Py_NE);
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
#endif
}
/* UnicodeEquals */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
#if PY_MAJOR_VERSION < 3
PyObject* owned_ref = NULL;
#endif
int s1_is_unicode, s2_is_unicode;
if (s1 == s2) {
goto return_eq;
}
s1_is_unicode = PyUnicode_CheckExact(s1);
s2_is_unicode = PyUnicode_CheckExact(s2);
#if PY_MAJOR_VERSION < 3
if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
owned_ref = PyUnicode_FromObject(s2);
if (unlikely(!owned_ref))
return -1;
s2 = owned_ref;
s2_is_unicode = 1;
} else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
owned_ref = PyUnicode_FromObject(s1);
if (unlikely(!owned_ref))
return -1;
s1 = owned_ref;
s1_is_unicode = 1;
} else if (((!s2_is_unicode) & (!s1_is_unicode))) {
return __Pyx_PyBytes_Equals(s1, s2, equals);
}
#endif
if (s1_is_unicode & s2_is_unicode) {
Py_ssize_t length;
int kind;
void *data1, *data2;
if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
return -1;
length = __Pyx_PyUnicode_GET_LENGTH(s1);
if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
goto return_ne;
}
#if CYTHON_USE_UNICODE_INTERNALS
{
Py_hash_t hash1, hash2;
#if CYTHON_PEP393_ENABLED
hash1 = ((PyASCIIObject*)s1)->hash;
hash2 = ((PyASCIIObject*)s2)->hash;
#else
hash1 = ((PyUnicodeObject*)s1)->hash;
hash2 = ((PyUnicodeObject*)s2)->hash;
#endif
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
goto return_ne;
}
}
#endif
kind = __Pyx_PyUnicode_KIND(s1);
if (kind != __Pyx_PyUnicode_KIND(s2)) {
goto return_ne;
}
data1 = __Pyx_PyUnicode_DATA(s1);
data2 = __Pyx_PyUnicode_DATA(s2);
if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
goto return_ne;
} else if (length == 1) {
goto return_eq;
} else {
int result = memcmp(data1, data2, (size_t)(length * kind));
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & s2_is_unicode) {
goto return_ne;
} else if ((s2 == Py_None) & s1_is_unicode) {
goto return_ne;
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
return_eq:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ);
return_ne:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_NE);
#endif
}
/* None */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) {
Py_ssize_t q = a / b;
Py_ssize_t r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* GetAttr */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
#if CYTHON_USE_TYPE_SLOTS
#if PY_MAJOR_VERSION >= 3
if (likely(PyUnicode_Check(n)))
#else
if (likely(PyString_Check(n)))
#endif
return __Pyx_PyObject_GetAttrStr(o, n);
#endif
return PyObject_GetAttr(o, n);
}
/* GetItemInt */
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyList_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyTuple_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* ObjectGetItem */
#if CYTHON_USE_TYPE_SLOTS
static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) {
PyObject *runerr;
Py_ssize_t key_value;
PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence;
if (unlikely(!(m && m->sq_item))) {
PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name);
return NULL;
}
key_value = __Pyx_PyIndex_AsSsize_t(index);
if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1);
}
if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
PyErr_Clear();
PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name);
}
return NULL;
}
static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) {
PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping;
if (likely(m && m->mp_subscript)) {
return m->mp_subscript(obj, key);
}
return __Pyx_PyObject_GetIndex(obj, key);
}
#endif
/* decode_c_string */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
Py_ssize_t length;
if (unlikely((start < 0) | (stop < 0))) {
size_t slen = strlen(cstring);
if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) {
PyErr_SetString(PyExc_OverflowError,
"c-string too long to convert to Python");
return NULL;
}
length = (Py_ssize_t) slen;
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
}
length = stop - start;
if (unlikely(length <= 0))
return PyUnicode_FromUnicode(NULL, 0);
cstring += start;
if (decode_func) {
return decode_func(cstring, length, errors);
} else {
return PyUnicode_Decode(cstring, length, encoding, errors);
}
}
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
if (unlikely(PyTuple_Check(err)))
return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* GetAttr3 */
static PyObject *__Pyx_GetAttr3Default(PyObject *d) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
return NULL;
__Pyx_PyErr_Clear();
Py_INCREF(d);
return d;
}
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
PyObject *r = __Pyx_GetAttr(o, n);
return (likely(r)) ? r : __Pyx_GetAttr3Default(d);
}
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(__Pyx_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* GetTopmostException */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem *
__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
{
_PyErr_StackItem *exc_info = tstate->exc_info;
while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
exc_info->previous_item != NULL)
{
exc_info = exc_info->previous_item;
}
return exc_info;
}
#endif
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
*type = exc_info->exc_type;
*value = exc_info->exc_value;
*tb = exc_info->exc_traceback;
#else
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
#endif
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = type;
exc_info->exc_value = value;
exc_info->exc_traceback = tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
#endif
{
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
#if CYTHON_USE_EXC_INFO_STACK
{
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = local_type;
exc_info->exc_value = local_value;
exc_info->exc_traceback = local_tb;
}
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* SwapException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = *type;
exc_info->exc_value = *value;
exc_info->exc_traceback = *tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = *type;
tstate->exc_value = *value;
tstate->exc_traceback = *tb;
#endif
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
PyErr_SetExcInfo(*type, *value, *tb);
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#endif
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_MAJOR_VERSION < 3
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if (strchr(__Pyx_MODULE_NAME, '.')) {
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_MAJOR_VERSION < 3
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
assert(PyExceptionClass_Check(exc_type));
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
PyObject *t = PyTuple_GET_ITEM(tuple, i);
#if PY_MAJOR_VERSION < 3
if (likely(exc_type == t)) return 1;
#endif
if (likely(PyExceptionClass_Check(t))) {
if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
} else {
}
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
if (likely(err == exc_type)) return 1;
if (likely(PyExceptionClass_Check(err))) {
if (likely(PyExceptionClass_Check(exc_type))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
} else if (likely(PyTuple_Check(exc_type))) {
return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
} else {
}
}
return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
assert(PyExceptionClass_Check(exc_type1));
assert(PyExceptionClass_Check(exc_type2));
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/* PyIntBinop */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) {
(void)inplace;
(void)zerodivision_check;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long x;
long a = PyInt_AS_LONG(op1);
x = (long)((unsigned long)a + b);
if (likely((x^a) >= 0 || (x^b) >= 0))
return PyInt_FromLong(x);
return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
const long b = intval;
long a, x;
#ifdef HAVE_LONG_LONG
const PY_LONG_LONG llb = intval;
PY_LONG_LONG lla, llx;
#endif
const digit* digits = ((PyLongObject*)op1)->ob_digit;
const Py_ssize_t size = Py_SIZE(op1);
if (likely(__Pyx_sst_abs(size) <= 1)) {
a = likely(size) ? digits[0] : 0;
if (size == -1) a = -a;
} else {
switch (size) {
case -2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
default: return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
}
x = a + b;
return PyLong_FromLong(x);
#ifdef HAVE_LONG_LONG
long_long:
llx = lla + llb;
return PyLong_FromLongLong(llx);
#endif
}
#endif
if (PyFloat_CheckExact(op1)) {
const long b = intval;
double a = PyFloat_AS_DOUBLE(op1);
double result;
PyFPE_START_PROTECT("add", return NULL)
result = ((double)a) + (double)b;
PyFPE_END_PROTECT(result)
return PyFloat_FromDouble(result);
}
return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2);
}
#endif
/* None */
static CYTHON_INLINE long __Pyx_div_long(long a, long b) {
long q = a / b;
long r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* ImportFrom */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Format(PyExc_ImportError,
#if PY_MAJOR_VERSION < 3
"cannot import name %.230s", PyString_AS_STRING(name));
#else
"cannot import name %S", name);
#endif
}
return value;
}
/* HasAttr */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) {
PyObject *r;
if (unlikely(!__Pyx_PyBaseString_Check(n))) {
PyErr_SetString(PyExc_TypeError,
"hasattr(): attribute name must be string");
return -1;
}
r = __Pyx_GetAttr(o, n);
if (unlikely(!r)) {
PyErr_Clear();
return 0;
} else {
Py_DECREF(r);
return 1;
}
}
/* PyObject_GenericGetAttrNoDict */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
PyErr_Format(PyExc_AttributeError,
#if PY_MAJOR_VERSION >= 3
"'%.50s' object has no attribute '%U'",
tp->tp_name, attr_name);
#else
"'%.50s' object has no attribute '%.400s'",
tp->tp_name, PyString_AS_STRING(attr_name));
#endif
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
PyObject *descr;
PyTypeObject *tp = Py_TYPE(obj);
if (unlikely(!PyString_Check(attr_name))) {
return PyObject_GenericGetAttr(obj, attr_name);
}
assert(!tp->tp_dictoffset);
descr = _PyType_Lookup(tp, attr_name);
if (unlikely(!descr)) {
return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
}
Py_INCREF(descr);
#if PY_MAJOR_VERSION < 3
if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
#endif
{
descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
if (unlikely(f)) {
PyObject *res = f(descr, obj, (PyObject *)tp);
Py_DECREF(descr);
return res;
}
}
return descr;
}
#endif
/* PyObject_GenericGetAttr */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
return PyObject_GenericGetAttr(obj, attr_name);
}
return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
}
#endif
/* SetVTable */
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
#if PY_VERSION_HEX >= 0x02070000
PyObject *ob = PyCapsule_New(vtable, 0, 0);
#else
PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
#endif
if (!ob)
goto bad;
if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
goto bad;
Py_DECREF(ob);
return 0;
bad:
Py_XDECREF(ob);
return -1;
}
/* SetupReduce */
static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) {
int ret;
PyObject *name_attr;
name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2);
if (likely(name_attr)) {
ret = PyObject_RichCompareBool(name_attr, name, Py_EQ);
} else {
ret = -1;
}
if (unlikely(ret < 0)) {
PyErr_Clear();
ret = 0;
}
Py_XDECREF(name_attr);
return ret;
}
static int __Pyx_setup_reduce(PyObject* type_obj) {
int ret = 0;
PyObject *object_reduce = NULL;
PyObject *object_reduce_ex = NULL;
PyObject *reduce = NULL;
PyObject *reduce_ex = NULL;
PyObject *reduce_cython = NULL;
PyObject *setstate = NULL;
PyObject *setstate_cython = NULL;
#if CYTHON_USE_PYTYPE_LOOKUP
if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto GOOD;
#else
if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto GOOD;
#endif
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD;
#else
object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD;
#endif
reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto BAD;
if (reduce_ex == object_reduce_ex) {
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD;
#else
object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD;
#endif
reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto BAD;
if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) {
reduce_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_cython); if (unlikely(!reduce_cython)) goto BAD;
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto BAD;
setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate);
if (!setstate) PyErr_Clear();
if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) {
setstate_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate_cython); if (unlikely(!setstate_cython)) goto BAD;
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto BAD;
}
PyType_Modified((PyTypeObject*)type_obj);
}
}
goto GOOD;
BAD:
if (!PyErr_Occurred())
PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name);
ret = -1;
GOOD:
#if !CYTHON_USE_PYTYPE_LOOKUP
Py_XDECREF(object_reduce);
Py_XDECREF(object_reduce_ex);
#endif
Py_XDECREF(reduce);
Py_XDECREF(reduce_ex);
Py_XDECREF(reduce_cython);
Py_XDECREF(setstate);
Py_XDECREF(setstate_cython);
return ret;
}
/* CLineInTraceback */
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
if (unlikely(!__pyx_cython_runtime)) {
return c_line;
}
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
__PYX_PY_DICT_LOOKUP_IF_MODIFIED(
use_cline, *cython_runtime_dict,
__Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if ((0)) {}
view->obj = NULL;
Py_DECREF(obj);
}
#endif
/* MemviewSliceIsContig */
static int
__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim)
{
int i, index, step, start;
Py_ssize_t itemsize = mvs.memview->view.itemsize;
if (order == 'F') {
step = 1;
start = 0;
} else {
step = -1;
start = ndim - 1;
}
for (i = 0; i < ndim; i++) {
index = start + step * i;
if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize)
return 0;
itemsize *= mvs.shape[index];
}
return 1;
}
/* OverlappingSlices */
static void
__pyx_get_array_memory_extents(__Pyx_memviewslice *slice,
void **out_start, void **out_end,
int ndim, size_t itemsize)
{
char *start, *end;
int i;
start = end = slice->data;
for (i = 0; i < ndim; i++) {
Py_ssize_t stride = slice->strides[i];
Py_ssize_t extent = slice->shape[i];
if (extent == 0) {
*out_start = *out_end = start;
return;
} else {
if (stride > 0)
end += stride * (extent - 1);
else
start += stride * (extent - 1);
}
}
*out_start = start;
*out_end = end + itemsize;
}
static int
__pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize)
{
void *start1, *end1, *start2, *end2;
__pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
__pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
return (start1 < end2) && (start2 < end1);
}
/* Capsule */
static CYTHON_INLINE PyObject *
__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig)
{
PyObject *cobj;
#if PY_VERSION_HEX >= 0x02070000
cobj = PyCapsule_New(p, sig, NULL);
#else
cobj = PyCObject_FromVoidPtr(p, NULL);
#endif
return cobj;
}
/* IsLittleEndian */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void)
{
union {
uint32_t u32;
uint8_t u8[4];
} S;
S.u32 = 0x01020304;
return S.u8[0] == 4;
}
/* BufferFormatCheck */
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t <= '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number;
int ndim = ctx->head->field->type->ndim;
;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
CYTHON_FALLTHROUGH;
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
ctx->enc_packmode == ctx->new_packmode) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
CYTHON_FALLTHROUGH;
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
/* TypeInfoCompare */
static int
__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b)
{
int i;
if (!a || !b)
return 0;
if (a == b)
return 1;
if (a->size != b->size || a->typegroup != b->typegroup ||
a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) {
if (a->typegroup == 'H' || b->typegroup == 'H') {
return a->size == b->size;
} else {
return 0;
}
}
if (a->ndim) {
for (i = 0; i < a->ndim; i++)
if (a->arraysize[i] != b->arraysize[i])
return 0;
}
if (a->typegroup == 'S') {
if (a->flags != b->flags)
return 0;
if (a->fields || b->fields) {
if (!(a->fields && b->fields))
return 0;
for (i = 0; a->fields[i].type && b->fields[i].type; i++) {
__Pyx_StructField *field_a = a->fields + i;
__Pyx_StructField *field_b = b->fields + i;
if (field_a->offset != field_b->offset ||
!__pyx_typeinfo_cmp(field_a->type, field_b->type))
return 0;
}
return !a->fields[i].type && !b->fields[i].type;
}
}
return 1;
}
/* MemviewSliceValidateAndInit */
static int
__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
{
if (buf->shape[dim] <= 1)
return 1;
if (buf->strides) {
if (spec & __Pyx_MEMVIEW_CONTIG) {
if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) {
if (buf->strides[dim] != sizeof(void *)) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly contiguous "
"in dimension %d.", dim);
goto fail;
}
} else if (buf->strides[dim] != buf->itemsize) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_FOLLOW) {
Py_ssize_t stride = buf->strides[dim];
if (stride < 0)
stride = -stride;
if (stride < buf->itemsize) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
} else {
if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not contiguous in "
"dimension %d", dim);
goto fail;
} else if (spec & (__Pyx_MEMVIEW_PTR)) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not indirect in "
"dimension %d", dim);
goto fail;
} else if (buf->suboffsets) {
PyErr_SetString(PyExc_ValueError,
"Buffer exposes suboffsets but no strides");
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec)
{
if (spec & __Pyx_MEMVIEW_DIRECT) {
if (buf->suboffsets && buf->suboffsets[dim] >= 0) {
PyErr_Format(PyExc_ValueError,
"Buffer not compatible with direct access "
"in dimension %d.", dim);
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_PTR) {
if (!buf->suboffsets || (buf->suboffsets[dim] < 0)) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly accessible "
"in dimension %d.", dim);
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
{
int i;
if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
Py_ssize_t stride = 1;
for (i = 0; i < ndim; i++) {
if (stride * buf->itemsize != buf->strides[i] &&
buf->shape[i] > 1)
{
PyErr_SetString(PyExc_ValueError,
"Buffer not fortran contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
} else if (c_or_f_flag & __Pyx_IS_C_CONTIG) {
Py_ssize_t stride = 1;
for (i = ndim - 1; i >- 1; i--) {
if (stride * buf->itemsize != buf->strides[i] &&
buf->shape[i] > 1) {
PyErr_SetString(PyExc_ValueError,
"Buffer not C contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
}
return 1;
fail:
return 0;
}
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj)
{
struct __pyx_memoryview_obj *memview, *new_memview;
__Pyx_RefNannyDeclarations
Py_buffer *buf;
int i, spec = 0, retval = -1;
__Pyx_BufFmt_Context ctx;
int from_memoryview = __pyx_memoryview_check(original_obj);
__Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0);
if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *)
original_obj)->typeinfo)) {
memview = (struct __pyx_memoryview_obj *) original_obj;
new_memview = NULL;
} else {
memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
original_obj, buf_flags, 0, dtype);
new_memview = memview;
if (unlikely(!memview))
goto fail;
}
buf = &memview->view;
if (buf->ndim != ndim) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
ndim, buf->ndim);
goto fail;
}
if (new_memview) {
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned) buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) "
"does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)",
buf->itemsize,
(buf->itemsize > 1) ? "s" : "",
dtype->name,
dtype->size,
(dtype->size > 1) ? "s" : "");
goto fail;
}
for (i = 0; i < ndim; i++) {
spec = axes_specs[i];
if (!__pyx_check_strides(buf, i, ndim, spec))
goto fail;
if (!__pyx_check_suboffsets(buf, i, ndim, spec))
goto fail;
}
if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice,
new_memview != NULL) == -1)) {
goto fail;
}
retval = 0;
goto no_fail;
fail:
Py_XDECREF(new_memview);
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_double(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* MemviewSliceCopyTemplate */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object)
{
__Pyx_RefNannyDeclarations
int i;
__Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } };
struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
Py_buffer *buf = &from_memview->view;
PyObject *shape_tuple = NULL;
PyObject *temp_int = NULL;
struct __pyx_array_obj *array_obj = NULL;
struct __pyx_memoryview_obj *memview_obj = NULL;
__Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
for (i = 0; i < ndim; i++) {
if (from_mvs->suboffsets[i] >= 0) {
PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
"indirect dimensions (axis %d)", i);
goto fail;
}
}
shape_tuple = PyTuple_New(ndim);
if (unlikely(!shape_tuple)) {
goto fail;
}
__Pyx_GOTREF(shape_tuple);
for(i = 0; i < ndim; i++) {
temp_int = PyInt_FromSsize_t(from_mvs->shape[i]);
if(unlikely(!temp_int)) {
goto fail;
} else {
PyTuple_SET_ITEM(shape_tuple, i, temp_int);
temp_int = NULL;
}
}
array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL);
if (unlikely(!array_obj)) {
goto fail;
}
__Pyx_GOTREF(array_obj);
memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
(PyObject *) array_obj, contig_flag,
dtype_is_object,
from_mvs->memview->typeinfo);
if (unlikely(!memview_obj))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0))
goto fail;
if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
dtype_is_object) < 0))
goto fail;
goto no_fail;
fail:
__Pyx_XDECREF(new_mvs.memview);
new_mvs.memview = NULL;
new_mvs.data = NULL;
no_fail:
__Pyx_XDECREF(shape_tuple);
__Pyx_XDECREF(temp_int);
__Pyx_XDECREF(array_obj);
__Pyx_RefNannyFinishContext();
return new_mvs;
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) {
const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(char) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (char) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0])
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) {
return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) {
return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) {
return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (char) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(char) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0])
case -2:
if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
}
#endif
if (sizeof(char) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
char val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (char) -1;
}
} else {
char val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (char) -1;
val = __Pyx_PyInt_As_char(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to char");
return (char) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to char");
return (char) -1;
}
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
}
#endif
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
int retval;
if (unlikely(!x)) return -1;
retval = __Pyx_PyObject_IsTrue(x);
Py_DECREF(x);
return retval;
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(b);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
temporal_variance_method.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Suneth Warnakulasuriya (https://github.com/sunethwarna)
//
#if !defined(KRATOS_TEMPORAL_VARIANCE_METHOD_H_INCLUDED)
#define KRATOS_TEMPORAL_VARIANCE_METHOD_H_INCLUDED
// System includes
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
// Application includes
#include "custom_methods/temporal_method.h"
#include "custom_utilities/method_utilities.h"
#include "custom_utilities/temporal_method_utilities.h"
namespace Kratos
{
///@addtogroup StatisticsApplication
///@{
///@name Kratos Globals
///@{
namespace TemporalMethods
{
template <class TContainerType, class TContainerItemType, template <class T> class TDataRetrievalFunctor, template <class T> class TDataStorageFunctor>
class TemporalVarianceMethod
{
public:
template <class TDataType>
class ValueMethod : public TemporalMethod
{
public:
KRATOS_CLASS_POINTER_DEFINITION(ValueMethod);
ValueMethod(
ModelPart& rModelPart,
const std::string& rNormType,
const Variable<TDataType>& rInputVariable,
const int EchoLevel,
const Variable<TDataType>& rOutputMeanVariable,
const Variable<TDataType>& rOutputVarianceVariable)
: TemporalMethod(rModelPart, EchoLevel),
mrInputVariable(rInputVariable),
mrOutputMeanVariable(rOutputMeanVariable),
mrOutputVarianceVariable(rOutputVarianceVariable)
{
KRATOS_TRY
KRATOS_ERROR_IF(rOutputMeanVariable == rOutputVarianceVariable) << "Same variable is given for mean and variance in value variance method with input variable "
<< rInputVariable
.Name()
<< ". Please provide two different variables. [ variable = "
<< rOutputMeanVariable
.Name()
<< " ].\n";
KRATOS_CATCH("");
}
void CalculateStatistics() override
{
TContainerType& r_container =
MethodUtilities::GetDataContainer<TContainerType>(this->GetModelPart());
const double delta_time = this->GetDeltaTime();
const double old_total_time = this->GetTotalTime();
const double total_time = old_total_time + delta_time;
const int number_of_items = r_container.size();
#pragma omp parallel for
for (int i = 0; i < number_of_items; ++i)
{
TContainerItemType& r_item = *(r_container.begin() + i);
const TDataType& r_input_value =
TDataRetrievalFunctor<TContainerItemType>()(r_item, mrInputVariable);
TDataType& r_output_mean_value =
TDataStorageFunctor<TContainerItemType>()(r_item, mrOutputMeanVariable);
TDataType& r_output_variance_value =
TDataStorageFunctor<TContainerItemType>()(r_item, mrOutputVarianceVariable);
MethodUtilities::DataTypeSizeChecker(r_input_value, r_output_mean_value);
MethodUtilities::DataTypeSizeChecker(r_input_value, r_output_variance_value);
TemporalVarianceMethod::CalculateMeanAndVariance<TDataType>(
r_output_mean_value, r_output_variance_value, r_input_value,
delta_time, old_total_time, total_time);
}
KRATOS_INFO_IF("TemporalValueVarianceMethod", this->GetEchoLevel() > 1)
<< "Calculated temporal value variance for "
<< mrInputVariable.Name() << " input variable with "
<< mrOutputMeanVariable.Name() << " mean variable and "
<< mrOutputVarianceVariable.Name() << " variance variable for "
<< this->GetModelPart().Name() << ".\n";
}
void InitializeStatisticsVariables() override
{
TContainerType& r_container =
MethodUtilities::GetDataContainer<TContainerType>(this->GetModelPart());
auto& initializer_method =
TemporalMethodUtilities::InitializeVariables<TContainerType, TContainerItemType, TDataRetrievalFunctor, TDataStorageFunctor, TDataType>;
initializer_method(r_container, mrOutputMeanVariable, mrInputVariable);
initializer_method(r_container, mrOutputVarianceVariable, mrInputVariable);
KRATOS_INFO_IF("TemporalValueVarianceMethod", this->GetEchoLevel() > 0)
<< "Initialized temporal value variance method for "
<< mrInputVariable.Name() << " input variable with "
<< mrOutputMeanVariable.Name() << " mean variable and "
<< mrOutputVarianceVariable.Name() << " variance variable for "
<< this->GetModelPart().Name() << ".\n";
}
private:
const Variable<TDataType>& mrInputVariable;
const Variable<TDataType>& mrOutputMeanVariable;
const Variable<TDataType>& mrOutputVarianceVariable;
};
template <class TDataType>
class NormMethod : public TemporalMethod
{
public:
KRATOS_CLASS_POINTER_DEFINITION(NormMethod);
NormMethod(
ModelPart& rModelPart,
const std::string& rNormType,
const Variable<TDataType>& rInputVariable,
const int EchoLevel,
const Variable<double>& rOutputMeanVariable,
const Variable<double>& rOutputVarianceVariable)
: TemporalMethod(rModelPart, EchoLevel),
mNormType(rNormType),
mrInputVariable(rInputVariable),
mrOutputMeanVariable(rOutputMeanVariable),
mrOutputVarianceVariable(rOutputVarianceVariable)
{
KRATOS_TRY
KRATOS_ERROR_IF(rOutputMeanVariable == rOutputVarianceVariable) << "Same variable is given for mean and variance in norm variance method with input variable "
<< rInputVariable
.Name()
<< ". Please provide two different variables. [ variable = "
<< rOutputMeanVariable
.Name()
<< " ].\n";
KRATOS_CATCH("");
}
void CalculateStatistics() override
{
TContainerType& r_container =
MethodUtilities::GetDataContainer<TContainerType>(this->GetModelPart());
const auto& norm_method =
MethodUtilities::GetNormMethod(mrInputVariable, mNormType);
const double delta_time = this->GetDeltaTime();
const double old_total_time = this->GetTotalTime();
const double total_time = old_total_time + delta_time;
const int number_of_items = r_container.size();
#pragma omp parallel for
for (int i = 0; i < number_of_items; ++i)
{
TContainerItemType& r_item = *(r_container.begin() + i);
const TDataType& r_input_value =
TDataRetrievalFunctor<TContainerItemType>()(r_item, mrInputVariable);
const double input_norm_value = norm_method(r_input_value);
double& r_output_mean_value =
TDataStorageFunctor<TContainerItemType>()(r_item, mrOutputMeanVariable);
double& r_output_variance_value =
TDataStorageFunctor<TContainerItemType>()(r_item, mrOutputVarianceVariable);
TemporalVarianceMethod::CalculateMeanAndVariance<double>(
r_output_mean_value, r_output_variance_value,
input_norm_value, delta_time, old_total_time, total_time);
}
KRATOS_INFO_IF("TemporalNormVarianceMethod", this->GetEchoLevel() > 1)
<< "Calculated temporal norm variance for " << mrInputVariable.Name()
<< " input variable with " << mrOutputMeanVariable.Name()
<< " mean variable and " << mrOutputVarianceVariable.Name()
<< " variance variable for " << this->GetModelPart().Name() << ".\n";
}
// norm output variable initialization
void InitializeStatisticsVariables() override
{
TContainerType& r_container =
MethodUtilities::GetDataContainer<TContainerType>(this->GetModelPart());
auto& initializer_method =
TemporalMethodUtilities::InitializeVariables<TContainerType, TContainerItemType, TDataStorageFunctor>;
initializer_method(r_container, mrOutputMeanVariable, 0.0);
initializer_method(r_container, mrOutputVarianceVariable, 0.0);
KRATOS_INFO_IF("TemporalNormVarianceMethod", this->GetEchoLevel() > 0)
<< "Initialized temporal norm variance method for "
<< mrInputVariable.Name() << " input variable with "
<< mrOutputMeanVariable.Name() << " mean variable and "
<< mrOutputVarianceVariable.Name() << " variance variable for "
<< this->GetModelPart().Name() << ".\n";
}
private:
const std::string mNormType;
const Variable<TDataType>& mrInputVariable;
const Variable<double>& mrOutputMeanVariable;
const Variable<double>& mrOutputVarianceVariable;
};
std::vector<TemporalMethod::Pointer> static CreateTemporalMethodObject(
ModelPart& rModelPart, const std::string& rNormType, const int EchoLevel, Parameters Params)
{
KRATOS_TRY
Parameters default_parameters = Parameters(R"(
{
"input_variables" : [],
"output_mean_variables" : [],
"output_variance_variables" : []
})");
Params.RecursivelyValidateAndAssignDefaults(default_parameters);
const std::vector<std::string>& input_variable_names_list =
Params["input_variables"].GetStringArray();
const std::vector<std::string>& output_variable_1_names_list =
Params["output_mean_variables"].GetStringArray();
const std::vector<std::string>& output_variable_2_names_list =
Params["output_variance_variables"].GetStringArray();
std::vector<TemporalMethod::Pointer> method_list;
if (rNormType == "none") // for non norm types
{
MethodUtilities::CheckInputOutputVariables(
input_variable_names_list, output_variable_1_names_list);
MethodUtilities::CheckInputOutputVariables(
input_variable_names_list, output_variable_2_names_list);
const int number_of_variables = input_variable_names_list.size();
for (int i = 0; i < number_of_variables; ++i)
{
const std::string& r_variable_input_name = input_variable_names_list[i];
const std::string& r_variable_1_output_name =
output_variable_1_names_list[i];
const std::string& r_variable_2_output_name =
output_variable_2_names_list[i];
ADD_TEMPORAL_VALUE_METHOD_TWO_OUTPUT_VARIABLE_OBJECT(
rModelPart, rNormType, r_variable_input_name, EchoLevel,
r_variable_1_output_name, r_variable_2_output_name, method_list, ValueMethod)
}
}
else // for values with norms
{
MethodUtilities::CheckVariableType<double>(output_variable_1_names_list);
MethodUtilities::CheckVariableType<double>(output_variable_2_names_list);
const int number_of_variables = input_variable_names_list.size();
for (int i = 0; i < number_of_variables; ++i)
{
const std::string& r_variable_input_name = input_variable_names_list[i];
const std::string& r_variable_1_output_name =
output_variable_1_names_list[i];
const std::string& r_variable_2_output_name =
output_variable_2_names_list[i];
ADD_TEMPORAL_NORM_METHOD_TWO_OUTPUT_VARIABLE_OBJECT(
rModelPart, rNormType, r_variable_input_name, EchoLevel,
r_variable_1_output_name, r_variable_2_output_name, method_list, NormMethod)
}
}
return method_list;
KRATOS_CATCH("");
}
private:
template <class TDataType>
void static CalculateMeanAndVariance(
TDataType& rMean,
TDataType& rVariance,
const TDataType& rNewDataPoint,
const double DeltaTime,
const double OldTotalTime,
const double CurrentTotalTime)
{
const TDataType new_mean =
(rMean * OldTotalTime + rNewDataPoint * DeltaTime) * (1.0 / CurrentTotalTime);
rVariance =
((rVariance + MethodUtilities::RaiseToPower<TDataType>(rMean, 2)) * OldTotalTime +
MethodUtilities::RaiseToPower<TDataType>(rNewDataPoint, 2) * DeltaTime) *
(1 / CurrentTotalTime) -
MethodUtilities::RaiseToPower<TDataType>(new_mean, 2);
rMean = new_mean;
}
};
} // namespace TemporalMethods
} // namespace Kratos
#endif // KRATOS_TEMPORAL_VARIANCE_METHOD_H_INCLUDED |
pagerank.c | #include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <unistd.h>
#include <omp.h>
#include "mt19937p.h"
#define g(x, y) (g[(y)*n+(x)])
int run_block(int n, double d, int* restrict g, double* restrict w, double* restrict wnew, int* restrict degree, int start, int count, double* restrict wlocal)
{
double residual = 0.0;
for (int i=0; i<count; ++i) {
double sum = 0.0;
//do before the block
for (int j=0; j<start; ++j) {
//find edges pointing toward i
if (g(j,i+start)) {
//count out degree of j
sum += w[j]/(double)degree[j];
}
}
// do the block
for (int j=start; j<start+count; ++j) {
//find edges pointing toward i
if (g(j,i+start)) {
//count out degree of j
sum += wnew[j]/(double)degree[j];
}
}
// do after the block
for (int j=start+count; j<n; ++j) {
//find edges pointing toward i
if (g(j,i+start)) {
//count out degree of j
sum += w[j]/(double)degree[j];
}
}
double newVal = ((1.0 - d)/(double)n) + (d*sum);
residual += fabs(wnew[i+start] - newVal);
wlocal[i] = newVal;
}
return residual < ((double)count)/(1000000.0 * (double)n);
}
/**
* Pr(x) = (1-d)/n + d*sum_{n in g(n,x)}(Pr(n)/(outdegree n))
* Runs 1 iteration of pagerank
* Returns 1 if done, 0 otherwise
*/
int run_iteration(int n, double d, int* restrict g, double* restrict w, double* restrict wnew, int* restrict degree)
{
int iterationDone = 1;
#pragma omp parallel shared(w, wnew) reduction(&& : iterationDone)
{
int this_thread = omp_get_thread_num(), num_threads = omp_get_num_threads();
int start = (n/num_threads) * this_thread;
int count;
if (this_thread == num_threads - 1) {
count = n - start;
} else {
count = ((n/num_threads) * (this_thread + 1)) - start;
}
double* wlocal = (double*)calloc(count, sizeof(double));
memcpy(wlocal, wnew+start, count * sizeof(double));
int done = 0;
while (!done) {
done = run_block(n, d, g, w, wnew, degree, start, count, wlocal);
memcpy(wnew+start, wlocal, count * sizeof(double));
}
free(wlocal);
#pragma omp barrier
for(int i=start; i<start+count; i++){
iterationDone = iterationDone && (fabs(w[i] - wnew[i]) < 1.0/(1000.0 * (double)n));
w[i] = wnew[i];
}
}
return iterationDone;
}
/**
*
*/
int pagerank(int n, double d, int* restrict g, double* restrict w)
{
int iterations = 0;
double* restrict wnew = (double*) calloc(n, sizeof(double));
memcpy(wnew, w, n * sizeof(double));
//compute degree of each item prior
int* restrict degree = (int*) calloc(n, sizeof(int));
for (int i=0; i<n; ++i) {
int count = 0;
for (int j=0; j<n; ++j) {
count += g(i,j);
}
degree[i] = count;
}
for (int done = 0; !done; ) {
done = run_iteration(n, d, g, w, wnew, degree);
iterations++;
}
free(wnew);
free(degree);
return iterations;
}
/**
* # The random graph model
*
* Of course, we need to run the shortest path algorithm on something!
* For the sake of keeping things interesting, let's use a simple random graph
* model to generate the input data. The $G(n,p)$ model simply includes each
* possible edge with probability $p$, drops it otherwise -- doesn't get much
* simpler than that. We use a thread-safe version of the Mersenne twister
* random number generator in lieu of coin flips.
*/
int* gen_graph(int n, double p)
{
int* g = calloc(n*n, sizeof(int));
struct mt19937p state;
struct timeval time;
gettimeofday(&time, NULL);
sgenrand((unsigned long)time.tv_usec, &state);
for (int j = 0; j < n; ++j) {
for (int i = 0; i < n; ++i)
g(i, j) = (genrand(&state) < p);
g(j, j) = 0; //no self edges
}
return g;
}
void write_matrix(const char* fname, int n, int* g)
{
FILE* fp = fopen(fname, "w+");
if (fp == NULL) {
fprintf(stderr, "Could not open output file: %s\n", fname);
exit(-1);
}
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j)
fprintf(fp, "%d ", g(i,j));
fprintf(fp, "\n");
}
fclose(fp);
}
void write_weights(const char* fname, int n, double* w)
{
FILE* fp = fopen(fname, "w+");
if (fp == NULL) {
fprintf(stderr, "Could not open output file: %s\n", fname);
exit(-1);
}
for (int i = 0; i < n; ++i) {
fprintf(fp, "%g ", w[i]);
}
fprintf(fp, "\n");
fclose(fp);
}
double checksum(const double* restrict w, int n) {
double sum = 0.0;
for (int i=0; i<n; ++i) {
sum += w[i];
}
return sum;
}
/**
* # The `main` event
*/
const char* usage =
"pagerank.x -- Compute pagerank on a random graph\n"
"Flags:\n"
" - n -- number of nodes (200)\n"
" - p -- probability of including edges (0.05)\n"
" - d -- probability that a user follows a link (0.85)\n"
" - i -- file name where adjacency matrix should be stored (none)\n"
" - o -- file name where output weights should be stored (none)\n";
int main(int argc, char** argv)
{
int n = 200; // Number of nodes
double p = 0.05; // Edge probability
double d = 0.85; // Probability a link is followed
const char* ifname = NULL; // Adjacency matrix file name
const char* ofname = NULL; // Distance matrix file name
// Option processing
extern char* optarg;
const char* optstring = "hn:d:p:o:i:";
int c;
while ((c = getopt(argc, argv, optstring)) != -1) {
switch (c) {
case 'h':
fprintf(stderr, "%s", usage);
return -1;
case 'n': n = atoi(optarg); break;
case 'p': p = atof(optarg); break;
case 'd': d = atof(optarg); break;
case 'o': ofname = optarg; break;
case 'i': ifname = optarg; break;
}
}
// Graph generation + output
int* g = gen_graph(n, p);
if (ifname)
write_matrix(ifname, n, g);
// Generate initial weights
double* w = calloc(n, sizeof(double));
for (int i = 0; i < n; ++i) {
w[i] = 1.0/(double)n;
}
// Time the pagerank code
double t0 = omp_get_wtime();
int iterations = pagerank(n, d, g, w);
double t1 = omp_get_wtime();
//openmp, cores, time, n, iterations, p, d, checksum
printf("openmp, %d, %g, %d, %d, %g, %g, %g\n",
omp_get_max_threads(),
(t1-t0),
n,
iterations,
p,
d,
checksum(w, n));
// Generate output file
if (ofname)
write_weights(ofname, n, w);
// Clean up
free(g);
free(w);
return 0;
}
|
ASTMatchers.h | //===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements matchers to be used together with the MatchFinder to
// match AST nodes.
//
// Matchers are created by generator functions, which can be combined in
// a functional in-language DSL to express queries over the C++ AST.
//
// For example, to match a class with a certain name, one would call:
// cxxRecordDecl(hasName("MyClass"))
// which returns a matcher that can be used to find all AST nodes that declare
// a class named 'MyClass'.
//
// For more complicated match expressions we're often interested in accessing
// multiple parts of the matched AST nodes once a match is found. In that case,
// call `.bind("name")` on match expressions that match the nodes you want to
// access.
//
// For example, when we're interested in child classes of a certain class, we
// would write:
// cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child")))
// When the match is found via the MatchFinder, a user provided callback will
// be called with a BoundNodes instance that contains a mapping from the
// strings that we provided for the `.bind()` calls to the nodes that were
// matched.
// In the given example, each time our matcher finds a match we get a callback
// where "child" is bound to the RecordDecl node of the matching child
// class declaration.
//
// See ASTMatchersInternal.h for a more in-depth explanation of the
// implementation details of the matcher framework.
//
// See ASTMatchFinder.h for how to use the generated matchers to run over
// an AST.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/LambdaCapture.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/ParentMapContext.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/ASTMatchers/ASTMatchersInternal.h"
#include "clang/ASTMatchers/ASTMatchersMacros.h"
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <limits>
#include <string>
#include <utility>
#include <vector>
namespace clang {
namespace ast_matchers {
/// Maps string IDs to AST nodes matched by parts of a matcher.
///
/// The bound nodes are generated by calling \c bind("id") on the node matchers
/// of the nodes we want to access later.
///
/// The instances of BoundNodes are created by \c MatchFinder when the user's
/// callbacks are executed every time a match is found.
class BoundNodes {
public:
/// Returns the AST node bound to \c ID.
///
/// Returns NULL if there was no node bound to \c ID or if there is a node but
/// it cannot be converted to the specified type.
template <typename T>
const T *getNodeAs(StringRef ID) const {
return MyBoundNodes.getNodeAs<T>(ID);
}
/// Type of mapping from binding identifiers to bound nodes. This type
/// is an associative container with a key type of \c std::string and a value
/// type of \c clang::DynTypedNode
using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap;
/// Retrieve mapping from binding identifiers to bound nodes.
const IDToNodeMap &getMap() const {
return MyBoundNodes.getMap();
}
private:
friend class internal::BoundNodesTreeBuilder;
/// Create BoundNodes from a pre-filled map of bindings.
BoundNodes(internal::BoundNodesMap &MyBoundNodes)
: MyBoundNodes(MyBoundNodes) {}
internal::BoundNodesMap MyBoundNodes;
};
/// Types of matchers for the top-level classes in the AST class
/// hierarchy.
/// @{
using DeclarationMatcher = internal::Matcher<Decl>;
using StatementMatcher = internal::Matcher<Stmt>;
using TypeMatcher = internal::Matcher<QualType>;
using TypeLocMatcher = internal::Matcher<TypeLoc>;
using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>;
using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>;
using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>;
using TemplateArgumentMatcher = internal::Matcher<TemplateArgument>;
using TemplateArgumentLocMatcher = internal::Matcher<TemplateArgumentLoc>;
/// @}
/// Matches any node.
///
/// Useful when another matcher requires a child matcher, but there's no
/// additional constraint. This will often be used with an explicit conversion
/// to an \c internal::Matcher<> type such as \c TypeMatcher.
///
/// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g.,
/// \code
/// "int* p" and "void f()" in
/// int* p;
/// void f();
/// \endcode
///
/// Usable as: Any Matcher
inline internal::TrueMatcher anything() { return internal::TrueMatcher(); }
/// Matches the top declaration context.
///
/// Given
/// \code
/// int X;
/// namespace NS {
/// int Y;
/// } // namespace NS
/// \endcode
/// decl(hasDeclContext(translationUnitDecl()))
/// matches "int X", but not "int Y".
extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl>
translationUnitDecl;
/// Matches typedef declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefDecl()
/// matches "typedef int X", but not "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl>
typedefDecl;
/// Matches typedef name declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefNameDecl()
/// matches "typedef int X" and "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl>
typedefNameDecl;
/// Matches type alias declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typeAliasDecl()
/// matches "using Y = int", but not "typedef int X"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl>
typeAliasDecl;
/// Matches type alias template declarations.
///
/// typeAliasTemplateDecl() matches
/// \code
/// template <typename T>
/// using Y = X<T>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl>
typeAliasTemplateDecl;
/// Matches AST nodes that were expanded within the main-file.
///
/// Example matches X but not Y
/// (matcher = cxxRecordDecl(isExpansionInMainFile())
/// \code
/// #include <Y.h>
/// class X {};
/// \endcode
/// Y.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInMainFile,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
return SourceManager.isInMainFile(
SourceManager.getExpansionLoc(Node.getBeginLoc()));
}
/// Matches AST nodes that were expanded within system-header-files.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInSystemHeader())
/// \code
/// #include <SystemHeader.h>
/// class X {};
/// \endcode
/// SystemHeader.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
return SourceManager.isInSystemHeader(ExpansionLoc);
}
/// Matches AST nodes that were expanded within files whose name is
/// partially matching a given regex.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*"))
/// \code
/// #include "ASTMatcher.h"
/// class X {};
/// \endcode
/// ASTMatcher.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER_REGEX(isExpansionInFileMatching,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt,
TypeLoc),
RegExp) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
auto FileEntry =
SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc));
if (!FileEntry) {
return false;
}
auto Filename = FileEntry->getName();
return RegExp->match(Filename);
}
/// Matches statements that are (transitively) expanded from the named macro.
/// Does not match if only part of the statement is expanded from that macro or
/// if different parts of the the statement are expanded from different
/// appearances of the macro.
///
/// FIXME: Change to be a polymorphic matcher that works on any syntactic
/// node. There's nothing `Stmt`-specific about it.
AST_MATCHER_P(Stmt, isExpandedFromMacro, llvm::StringRef, MacroName) {
// Verifies that the statement' beginning and ending are both expanded from
// the same instance of the given macro.
auto& Context = Finder->getASTContext();
llvm::Optional<SourceLocation> B =
internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context);
if (!B) return false;
llvm::Optional<SourceLocation> E =
internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context);
if (!E) return false;
return *B == *E;
}
/// Matches declarations.
///
/// Examples matches \c X, \c C, and the friend declaration inside \c C;
/// \code
/// void X();
/// class C {
/// friend X;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<Decl> decl;
/// Matches decomposition-declarations.
///
/// Examples matches the declaration node with \c foo and \c bar, but not
/// \c number.
/// (matcher = declStmt(has(decompositionDecl())))
///
/// \code
/// int number = 42;
/// auto [foo, bar] = std::make_pair{42, 42};
/// \endcode
extern const internal::VariadicAllOfMatcher<DecompositionDecl>
decompositionDecl;
/// Matches a declaration of a linkage specification.
///
/// Given
/// \code
/// extern "C" {}
/// \endcode
/// linkageSpecDecl()
/// matches "extern "C" {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl>
linkageSpecDecl;
/// Matches a declaration of anything that could have a name.
///
/// Example matches \c X, \c S, the anonymous union type, \c i, and \c U;
/// \code
/// typedef int X;
/// struct S {
/// union {
/// int i;
/// } U;
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl;
/// Matches a declaration of label.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelDecl()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl;
/// Matches a declaration of a namespace.
///
/// Given
/// \code
/// namespace {}
/// namespace test {}
/// \endcode
/// namespaceDecl()
/// matches "namespace {}" and "namespace test {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl>
namespaceDecl;
/// Matches a declaration of a namespace alias.
///
/// Given
/// \code
/// namespace test {}
/// namespace alias = ::test;
/// \endcode
/// namespaceAliasDecl()
/// matches "namespace alias" but not "namespace test"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl>
namespaceAliasDecl;
/// Matches class, struct, and union declarations.
///
/// Example matches \c X, \c Z, \c U, and \c S
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl;
/// Matches C++ class declarations.
///
/// Example matches \c X, \c Z
/// \code
/// class X;
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl>
cxxRecordDecl;
/// Matches C++ class template declarations.
///
/// Example matches \c Z
/// \code
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl>
classTemplateDecl;
/// Matches C++ class template specializations.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
/// \endcode
/// classTemplateSpecializationDecl()
/// matches the specializations \c A<int> and \c A<double>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplateSpecializationDecl>
classTemplateSpecializationDecl;
/// Matches C++ class template partial specializations.
///
/// Given
/// \code
/// template<class T1, class T2, int I>
/// class A {};
///
/// template<class T, int I>
/// class A<T, T*, I> {};
///
/// template<>
/// class A<int, int, 1> {};
/// \endcode
/// classTemplatePartialSpecializationDecl()
/// matches the specialization \c A<T,T*,I> but not \c A<int,int,1>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplatePartialSpecializationDecl>
classTemplatePartialSpecializationDecl;
/// Matches declarator declarations (field, variable, function
/// and non-type template parameter declarations).
///
/// Given
/// \code
/// class X { int y; };
/// \endcode
/// declaratorDecl()
/// matches \c int y.
extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl>
declaratorDecl;
/// Matches parameter variable declarations.
///
/// Given
/// \code
/// void f(int x);
/// \endcode
/// parmVarDecl()
/// matches \c int x.
extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl>
parmVarDecl;
/// Matches C++ access specifier declarations.
///
/// Given
/// \code
/// class C {
/// public:
/// int a;
/// };
/// \endcode
/// accessSpecDecl()
/// matches 'public:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl>
accessSpecDecl;
/// Matches constructor initializers.
///
/// Examples matches \c i(42).
/// \code
/// class C {
/// C() : i(42) {}
/// int i;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<CXXCtorInitializer>
cxxCtorInitializer;
/// Matches template arguments.
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgument()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument;
/// Matches template arguments (with location info).
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgumentLoc()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgumentLoc>
templateArgumentLoc;
/// Matches template name.
///
/// Given
/// \code
/// template <typename T> class X { };
/// X<int> xi;
/// \endcode
/// templateName()
/// matches 'X' in X<int>.
extern const internal::VariadicAllOfMatcher<TemplateName> templateName;
/// Matches non-type template parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// nonTypeTemplateParmDecl()
/// matches 'N', but not 'T'.
extern const internal::VariadicDynCastAllOfMatcher<Decl,
NonTypeTemplateParmDecl>
nonTypeTemplateParmDecl;
/// Matches template type parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// templateTypeParmDecl()
/// matches 'T', but not 'N'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl>
templateTypeParmDecl;
/// Matches template template parameter declarations.
///
/// Given
/// \code
/// template <template <typename> class Z, int N> struct C {};
/// \endcode
/// templateTypeParmDecl()
/// matches 'Z', but not 'N'.
extern const internal::VariadicDynCastAllOfMatcher<Decl,
TemplateTemplateParmDecl>
templateTemplateParmDecl;
/// Matches public C++ declarations and C++ base specifers that specify public
/// inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a; // fieldDecl(isPublic()) matches 'a'
/// protected: int b;
/// private: int c;
/// };
/// \endcode
///
/// \code
/// class Base {};
/// class Derived1 : public Base {}; // matches 'Base'
/// struct Derived2 : Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isPublic,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_public;
}
/// Matches protected C++ declarations and C++ base specifers that specify
/// protected inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a;
/// protected: int b; // fieldDecl(isProtected()) matches 'b'
/// private: int c;
/// };
/// \endcode
///
/// \code
/// class Base {};
/// class Derived : protected Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isProtected,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_protected;
}
/// Matches private C++ declarations and C++ base specifers that specify private
/// inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c; // fieldDecl(isPrivate()) matches 'c'
/// };
/// \endcode
///
/// \code
/// struct Base {};
/// struct Derived1 : private Base {}; // matches 'Base'
/// class Derived2 : Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isPrivate,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_private;
}
/// Matches non-static data members that are bit-fields.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b;
/// };
/// \endcode
/// fieldDecl(isBitField())
/// matches 'int a;' but not 'int b;'.
AST_MATCHER(FieldDecl, isBitField) {
return Node.isBitField();
}
/// Matches non-static data members that are bit-fields of the specified
/// bit width.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b : 4;
/// int c : 2;
/// };
/// \endcode
/// fieldDecl(hasBitWidth(2))
/// matches 'int a;' and 'int c;' but not 'int b;'.
AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) {
return Node.isBitField() &&
Node.getBitWidthValue(Finder->getASTContext()) == Width;
}
/// Matches non-static data members that have an in-class initializer.
///
/// Given
/// \code
/// class C {
/// int a = 2;
/// int b = 3;
/// int c;
/// };
/// \endcode
/// fieldDecl(hasInClassInitializer(integerLiteral(equals(2))))
/// matches 'int a;' but not 'int b;'.
/// fieldDecl(hasInClassInitializer(anything()))
/// matches 'int a;' and 'int b;' but not 'int c;'.
AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getInClassInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// Determines whether the function is "main", which is the entry point
/// into an executable program.
AST_MATCHER(FunctionDecl, isMain) {
return Node.isMain();
}
/// Matches the specialized template of a specialization declaration.
///
/// Given
/// \code
/// template<typename T> class A {}; #1
/// template<> class A<int> {}; #2
/// \endcode
/// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl()))
/// matches '#2' with classTemplateDecl() matching the class template
/// declaration of 'A' at #1.
AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate,
internal::Matcher<ClassTemplateDecl>, InnerMatcher) {
const ClassTemplateDecl* Decl = Node.getSpecializedTemplate();
return (Decl != nullptr &&
InnerMatcher.matches(*Decl, Finder, Builder));
}
/// Matches a declaration that has been implicitly added
/// by the compiler (eg. implicit default/copy constructors).
AST_MATCHER(Decl, isImplicit) {
return Node.isImplicit();
}
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl that have at least one TemplateArgument matching the given
/// InnerMatcher.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
///
/// template<typename T> f() {};
/// void func() { f<int>(); };
/// \endcode
///
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(asString("int"))))
/// matches the specialization \c A<int>
///
/// functionDecl(hasAnyTemplateArgument(refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P(
hasAnyTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder,
Builder);
}
/// Causes all nested matchers to be matched with the specified traversal kind.
///
/// Given
/// \code
/// void foo()
/// {
/// int i = 3.0;
/// }
/// \endcode
/// The matcher
/// \code
/// traverse(TK_IgnoreImplicitCastsAndParentheses,
/// varDecl(hasInitializer(floatLiteral().bind("init")))
/// )
/// \endcode
/// matches the variable declaration with "init" bound to the "3.0".
template <typename T>
internal::Matcher<T> traverse(TraversalKind TK,
const internal::Matcher<T> &InnerMatcher) {
return internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>();
}
template <typename T>
internal::BindableMatcher<T>
traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) {
return internal::BindableMatcher<T>(
internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>());
}
template <typename... T>
internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>
traverse(TraversalKind TK,
const internal::VariadicOperatorMatcher<T...> &InnerMatcher) {
return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>(
TK, InnerMatcher);
}
template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
typename T, typename ToTypes>
internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>
traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor<
ArgumentAdapterT, T, ToTypes> &InnerMatcher) {
return internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T,
ToTypes>>(TK, InnerMatcher);
}
template <template <typename T, typename P1> class MatcherT, typename P1,
typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>
traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam1<
MatcherT, P1, ReturnTypesF> &InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>(
TK, InnerMatcher);
}
template <template <typename T, typename P1, typename P2> class MatcherT,
typename P1, typename P2, typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>
traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam2<
MatcherT, P1, P2, ReturnTypesF> &InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>(
TK, InnerMatcher);
}
/// Matches expressions that match InnerMatcher after any implicit AST
/// nodes are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// class C {};
/// C a = C();
/// C b;
/// C c = b;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr())))
/// \endcode
/// would match the declarations for a, b, and c.
/// While
/// \code
/// varDecl(hasInitializer(cxxConstructExpr()))
/// \endcode
/// only match the declarations for b and c.
AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>,
InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after any implicit casts
/// are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = 0;
/// const int c = a;
/// int *d = arr;
/// long e = (long) 0l;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringImpCasts(declRefExpr())))
/// \endcode
/// would match the declarations for a, b, c, and d, but not e.
/// While
/// \code
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// \endcode
/// only match the declarations for b, c, and d.
AST_MATCHER_P(Expr, ignoringImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after parentheses and
/// casts are stripped off.
///
/// Implicit and non-C Style casts are also discarded.
/// Given
/// \code
/// int a = 0;
/// char b = (0);
/// void* c = reinterpret_cast<char*>(0);
/// char d = char(0);
/// \endcode
/// The matcher
/// varDecl(hasInitializer(ignoringParenCasts(integerLiteral())))
/// would match the declarations for a, b, c, and d.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after implicit casts and
/// parentheses are stripped off.
///
/// Explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = (0);
/// const int c = a;
/// int *d = (arr);
/// long e = ((long) 0l);
/// \endcode
/// The matchers
/// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr())))
/// would match the declarations for a, b, c, and d, but not e.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// would only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder);
}
/// Matches types that match InnerMatcher after any parens are stripped.
///
/// Given
/// \code
/// void (*fp)(void);
/// \endcode
/// The matcher
/// \code
/// varDecl(hasType(pointerType(pointee(ignoringParens(functionType())))))
/// \endcode
/// would match the declaration for fp.
AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>,
InnerMatcher, 0) {
return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder);
}
/// Overload \c ignoringParens for \c Expr.
///
/// Given
/// \code
/// const char* str = ("my-string");
/// \endcode
/// The matcher
/// \code
/// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral())))
/// \endcode
/// would match the implicit cast resulting from the assignment.
AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>,
InnerMatcher, 1) {
const Expr *E = Node.IgnoreParens();
return InnerMatcher.matches(*E, Finder, Builder);
}
/// Matches expressions that are instantiation-dependent even if it is
/// neither type- nor value-dependent.
///
/// In the following example, the expression sizeof(sizeof(T() + T()))
/// is instantiation-dependent (since it involves a template parameter T),
/// but is neither type- nor value-dependent, since the type of the inner
/// sizeof is known (std::size_t) and therefore the size of the outer
/// sizeof is known.
/// \code
/// template<typename T>
/// void f(T x, T y) { sizeof(sizeof(T() + T()); }
/// \endcode
/// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T())
AST_MATCHER(Expr, isInstantiationDependent) {
return Node.isInstantiationDependent();
}
/// Matches expressions that are type-dependent because the template type
/// is not yet instantiated.
///
/// For example, the expressions "x" and "x + y" are type-dependent in
/// the following code, but "y" is not type-dependent:
/// \code
/// template<typename T>
/// void add(T x, int y) {
/// x + y;
/// }
/// \endcode
/// expr(isTypeDependent()) matches x + y
AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); }
/// Matches expression that are value-dependent because they contain a
/// non-type template parameter.
///
/// For example, the array bound of "Chars" in the following example is
/// value-dependent.
/// \code
/// template<int Size> int f() { return Size; }
/// \endcode
/// expr(isValueDependent()) matches return Size
AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); }
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl where the n'th TemplateArgument matches the given InnerMatcher.
///
/// Given
/// \code
/// template<typename T, typename U> class A {};
/// A<bool, int> b;
/// A<int, bool> c;
///
/// template<typename T> void f() {}
/// void func() { f<int>(); };
/// \endcode
/// classTemplateSpecializationDecl(hasTemplateArgument(
/// 1, refersToType(asString("int"))))
/// matches the specialization \c A<bool, int>
///
/// functionDecl(hasTemplateArgument(0, refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P2(
hasTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
if (List.size() <= N)
return false;
return InnerMatcher.matches(List[N], Finder, Builder);
}
/// Matches if the number of template arguments equals \p N.
///
/// Given
/// \code
/// template<typename T> struct C {};
/// C<int> c;
/// \endcode
/// classTemplateSpecializationDecl(templateArgumentCountIs(1))
/// matches C<int>.
AST_POLYMORPHIC_MATCHER_P(
templateArgumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType),
unsigned, N) {
return internal::getTemplateSpecializationArgs(Node).size() == N;
}
/// Matches a TemplateArgument that refers to a certain type.
///
/// Given
/// \code
/// struct X {};
/// template<typename T> struct A {};
/// A<X> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(class(hasName("X")))))
/// matches the specialization \c A<X>
AST_MATCHER_P(TemplateArgument, refersToType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Type)
return false;
return InnerMatcher.matches(Node.getAsType(), Finder, Builder);
}
/// Matches a TemplateArgument that refers to a certain template.
///
/// Given
/// \code
/// template<template <typename> class S> class X {};
/// template<typename T> class Y {};
/// X<Y> xi;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToTemplate(templateName())))
/// matches the specialization \c X<Y>
AST_MATCHER_P(TemplateArgument, refersToTemplate,
internal::Matcher<TemplateName>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Template)
return false;
return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder);
}
/// Matches a canonical TemplateArgument that refers to a certain
/// declaration.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToDeclaration(fieldDecl(hasName("next")))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, refersToDeclaration,
internal::Matcher<Decl>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Declaration)
return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder);
return false;
}
/// Matches a sugar TemplateArgument that refers to a certain expression.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// templateSpecializationType(hasAnyTemplateArgument(
/// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next"))))))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Expression)
return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder);
return false;
}
/// Matches a TemplateArgument that is an integral value.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(isIntegral()))
/// matches the implicit instantiation of C in C<42>
/// with isIntegral() matching 42.
AST_MATCHER(TemplateArgument, isIntegral) {
return Node.getKind() == TemplateArgument::Integral;
}
/// Matches a TemplateArgument that refers to an integral type.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(refersToIntegralType(asString("int"))))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, refersToIntegralType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder);
}
/// Matches a TemplateArgument of integral type with a given value.
///
/// Note that 'Value' is a string as the template argument's value is
/// an arbitrary precision integer. 'Value' must be euqal to the canonical
/// representation of that integral value in base 10.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(equalsIntegralValue("42")))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, equalsIntegralValue,
std::string, Value) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return Node.getAsIntegral().toString(10) == Value;
}
/// Matches an Objective-C autorelease pool statement.
///
/// Given
/// \code
/// @autoreleasepool {
/// int x = 0;
/// }
/// \endcode
/// autoreleasePoolStmt(stmt()) matches the declaration of "x"
/// inside the autorelease pool.
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
ObjCAutoreleasePoolStmt> autoreleasePoolStmt;
/// Matches any value declaration.
///
/// Example matches A, B, C and F
/// \code
/// enum X { A, B, C };
/// void F();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl;
/// Matches C++ constructor declarations.
///
/// Example matches Foo::Foo() and Foo::Foo(int)
/// \code
/// class Foo {
/// public:
/// Foo();
/// Foo(int);
/// int DoSomething();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl>
cxxConstructorDecl;
/// Matches explicit C++ destructor declarations.
///
/// Example matches Foo::~Foo()
/// \code
/// class Foo {
/// public:
/// virtual ~Foo();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl>
cxxDestructorDecl;
/// Matches enum declarations.
///
/// Example matches X
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl;
/// Matches enum constants.
///
/// Example matches A, B, C
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl>
enumConstantDecl;
/// Matches tag declarations.
///
/// Example matches X, Z, U, S, E
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// enum E {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl;
/// Matches method declarations.
///
/// Example matches y
/// \code
/// class X { void y(); };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl>
cxxMethodDecl;
/// Matches conversion operator declarations.
///
/// Example matches the operator.
/// \code
/// class X { operator int() const; };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
cxxConversionDecl;
/// Matches user-defined and implicitly generated deduction guide.
///
/// Example matches the deduction guide.
/// \code
/// template<typename T>
/// class X { X(int) };
/// X(int) -> X<int>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl>
cxxDeductionGuideDecl;
/// Matches variable declarations.
///
/// Note: this does not match declarations of member variables, which are
/// "field" declarations in Clang parlance.
///
/// Example matches a
/// \code
/// int a;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl;
/// Matches field declarations.
///
/// Given
/// \code
/// class X { int m; };
/// \endcode
/// fieldDecl()
/// matches 'm'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl;
/// Matches indirect field declarations.
///
/// Given
/// \code
/// struct X { struct { int a; }; };
/// \endcode
/// indirectFieldDecl()
/// matches 'a'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl>
indirectFieldDecl;
/// Matches function declarations.
///
/// Example matches f
/// \code
/// void f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl>
functionDecl;
/// Matches C++ function template declarations.
///
/// Example matches f
/// \code
/// template<class T> void f(T t) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl>
functionTemplateDecl;
/// Matches friend declarations.
///
/// Given
/// \code
/// class X { friend void foo(); };
/// \endcode
/// friendDecl()
/// matches 'friend void foo()'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl;
/// Matches statements.
///
/// Given
/// \code
/// { ++a; }
/// \endcode
/// stmt()
/// matches both the compound statement '{ ++a; }' and '++a'.
extern const internal::VariadicAllOfMatcher<Stmt> stmt;
/// Matches declaration statements.
///
/// Given
/// \code
/// int a;
/// \endcode
/// declStmt()
/// matches 'int a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt;
/// Matches member expressions.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// int a; static int b;
/// };
/// \endcode
/// memberExpr()
/// matches this->x, x, y.x, a, this->b
extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr;
/// Matches unresolved member expressions.
///
/// Given
/// \code
/// struct X {
/// template <class T> void f();
/// void g();
/// };
/// template <class T> void h() { X x; x.f<T>(); x.g(); }
/// \endcode
/// unresolvedMemberExpr()
/// matches x.f<T>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr>
unresolvedMemberExpr;
/// Matches member expressions where the actual member referenced could not be
/// resolved because the base expression or the member name was dependent.
///
/// Given
/// \code
/// template <class T> void f() { T t; t.g(); }
/// \endcode
/// cxxDependentScopeMemberExpr()
/// matches t.g
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXDependentScopeMemberExpr>
cxxDependentScopeMemberExpr;
/// Matches call expressions.
///
/// Example matches x.y() and y()
/// \code
/// X x;
/// x.y();
/// y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr;
/// Matches call expressions which were resolved using ADL.
///
/// Example matches y(x) but not y(42) or NS::y(x).
/// \code
/// namespace NS {
/// struct X {};
/// void y(X);
/// }
///
/// void y(...);
///
/// void test() {
/// NS::X x;
/// y(x); // Matches
/// NS::y(x); // Doesn't match
/// y(42); // Doesn't match
/// using NS::y;
/// y(x); // Found by both unqualified lookup and ADL, doesn't match
// }
/// \endcode
AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); }
/// Matches lambda expressions.
///
/// Example matches [&](){return 5;}
/// \code
/// [&](){return 5;}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr;
/// Matches member call expressions.
///
/// Example matches x.y()
/// \code
/// X x;
/// x.y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr>
cxxMemberCallExpr;
/// Matches ObjectiveC Message invocation expressions.
///
/// The innermost message send invokes the "alloc" class method on the
/// NSString class, while the outermost message send invokes the
/// "initWithString" instance method on the object returned from
/// NSString's "alloc". This matcher should match both message sends.
/// \code
/// [[NSString alloc] initWithString:@"Hello"]
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr>
objcMessageExpr;
/// Matches Objective-C interface declarations.
///
/// Example matches Foo
/// \code
/// @interface Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl>
objcInterfaceDecl;
/// Matches Objective-C implementation declarations.
///
/// Example matches Foo
/// \code
/// @implementation Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl>
objcImplementationDecl;
/// Matches Objective-C protocol declarations.
///
/// Example matches FooDelegate
/// \code
/// @protocol FooDelegate
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl>
objcProtocolDecl;
/// Matches Objective-C category declarations.
///
/// Example matches Foo (Additions)
/// \code
/// @interface Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl>
objcCategoryDecl;
/// Matches Objective-C category definitions.
///
/// Example matches Foo (Additions)
/// \code
/// @implementation Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl>
objcCategoryImplDecl;
/// Matches Objective-C method declarations.
///
/// Example matches both declaration and definition of -[Foo method]
/// \code
/// @interface Foo
/// - (void)method;
/// @end
///
/// @implementation Foo
/// - (void)method {}
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl>
objcMethodDecl;
/// Matches block declarations.
///
/// Example matches the declaration of the nameless block printing an input
/// integer.
///
/// \code
/// myFunc(^(int p) {
/// printf("%d", p);
/// })
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl>
blockDecl;
/// Matches Objective-C instance variable declarations.
///
/// Example matches _enabled
/// \code
/// @implementation Foo {
/// BOOL _enabled;
/// }
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl>
objcIvarDecl;
/// Matches Objective-C property declarations.
///
/// Example matches enabled
/// \code
/// @interface Foo
/// @property BOOL enabled;
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl>
objcPropertyDecl;
/// Matches Objective-C \@throw statements.
///
/// Example matches \@throw
/// \code
/// @throw obj;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt>
objcThrowStmt;
/// Matches Objective-C @try statements.
///
/// Example matches @try
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt>
objcTryStmt;
/// Matches Objective-C @catch statements.
///
/// Example matches @catch
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt>
objcCatchStmt;
/// Matches Objective-C @finally statements.
///
/// Example matches @finally
/// \code
/// @try {}
/// @finally {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt>
objcFinallyStmt;
/// Matches expressions that introduce cleanups to be run at the end
/// of the sub-expression's evaluation.
///
/// Example matches std::string()
/// \code
/// const std::string str = std::string();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups>
exprWithCleanups;
/// Matches init list expressions.
///
/// Given
/// \code
/// int a[] = { 1, 2 };
/// struct B { int x, y; };
/// B b = { 5, 6 };
/// \endcode
/// initListExpr()
/// matches "{ 1, 2 }" and "{ 5, 6 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr>
initListExpr;
/// Matches the syntactic form of init list expressions
/// (if expression have it).
AST_MATCHER_P(InitListExpr, hasSyntacticForm,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *SyntForm = Node.getSyntacticForm();
return (SyntForm != nullptr &&
InnerMatcher.matches(*SyntForm, Finder, Builder));
}
/// Matches C++ initializer list expressions.
///
/// Given
/// \code
/// std::vector<int> a({ 1, 2, 3 });
/// std::vector<int> b = { 4, 5 };
/// int c[] = { 6, 7 };
/// std::pair<int, int> d = { 8, 9 };
/// \endcode
/// cxxStdInitializerListExpr()
/// matches "{ 1, 2, 3 }" and "{ 4, 5 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXStdInitializerListExpr>
cxxStdInitializerListExpr;
/// Matches implicit initializers of init list expressions.
///
/// Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 };
/// \endcode
/// implicitValueInitExpr()
/// matches "[0].y" (implicitly)
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr>
implicitValueInitExpr;
/// Matches paren list expressions.
/// ParenListExprs don't have a predefined type and are used for late parsing.
/// In the final AST, they can be met in template declarations.
///
/// Given
/// \code
/// template<typename T> class X {
/// void f() {
/// X x(*this);
/// int a = 0, b = 1; int i = (a, b);
/// }
/// };
/// \endcode
/// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b)
/// has a predefined type and is a ParenExpr, not a ParenListExpr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr>
parenListExpr;
/// Matches substitutions of non-type template parameters.
///
/// Given
/// \code
/// template <int N>
/// struct A { static const int n = N; };
/// struct B : public A<42> {};
/// \endcode
/// substNonTypeTemplateParmExpr()
/// matches "N" in the right-hand side of "static const int n = N;"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
SubstNonTypeTemplateParmExpr>
substNonTypeTemplateParmExpr;
/// Matches using declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using X::x;
/// \endcode
/// usingDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl;
/// Matches using namespace declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using namespace X;
/// \endcode
/// usingDirectiveDecl()
/// matches \code using namespace X \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl>
usingDirectiveDecl;
/// Matches reference to a name that can be looked up during parsing
/// but could not be resolved to a specific declaration.
///
/// Given
/// \code
/// template<typename T>
/// T foo() { T a; return a; }
/// template<typename T>
/// void bar() {
/// foo<T>();
/// }
/// \endcode
/// unresolvedLookupExpr()
/// matches \code foo<T>() \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr>
unresolvedLookupExpr;
/// Matches unresolved using value declarations.
///
/// Given
/// \code
/// template<typename X>
/// class C : private X {
/// using X::x;
/// };
/// \endcode
/// unresolvedUsingValueDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingValueDecl>
unresolvedUsingValueDecl;
/// Matches unresolved using value declarations that involve the
/// typename.
///
/// Given
/// \code
/// template <typename T>
/// struct Base { typedef T Foo; };
///
/// template<typename T>
/// struct S : private Base<T> {
/// using typename Base<T>::Foo;
/// };
/// \endcode
/// unresolvedUsingTypenameDecl()
/// matches \code using Base<T>::Foo \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingTypenameDecl>
unresolvedUsingTypenameDecl;
/// Matches a constant expression wrapper.
///
/// Example matches the constant in the case statement:
/// (matcher = constantExpr())
/// \code
/// switch (a) {
/// case 37: break;
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr>
constantExpr;
/// Matches parentheses used in expressions.
///
/// Example matches (foo() + 1)
/// \code
/// int foo() { return 1; }
/// int a = (foo() + 1);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr;
/// Matches constructor call expressions (including implicit ones).
///
/// Example matches string(ptr, n) and ptr within arguments of f
/// (matcher = cxxConstructExpr())
/// \code
/// void f(const string &a, const string &b);
/// char *ptr;
/// int n;
/// f(string(ptr, n), ptr);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr>
cxxConstructExpr;
/// Matches unresolved constructor call expressions.
///
/// Example matches T(t) in return statement of f
/// (matcher = cxxUnresolvedConstructExpr())
/// \code
/// template <typename T>
/// void f(const T& t) { return T(t); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXUnresolvedConstructExpr>
cxxUnresolvedConstructExpr;
/// Matches implicit and explicit this expressions.
///
/// Example matches the implicit this expression in "return i".
/// (matcher = cxxThisExpr())
/// \code
/// struct foo {
/// int i;
/// int f() { return i; }
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr>
cxxThisExpr;
/// Matches nodes where temporaries are created.
///
/// Example matches FunctionTakesString(GetStringByValue())
/// (matcher = cxxBindTemporaryExpr())
/// \code
/// FunctionTakesString(GetStringByValue());
/// FunctionTakesStringByPointer(GetStringPointer());
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr>
cxxBindTemporaryExpr;
/// Matches nodes where temporaries are materialized.
///
/// Example: Given
/// \code
/// struct T {void func();};
/// T f();
/// void g(T);
/// \endcode
/// materializeTemporaryExpr() matches 'f()' in these statements
/// \code
/// T u(f());
/// g(f());
/// f().func();
/// \endcode
/// but does not match
/// \code
/// f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
MaterializeTemporaryExpr>
materializeTemporaryExpr;
/// Matches new expressions.
///
/// Given
/// \code
/// new X;
/// \endcode
/// cxxNewExpr()
/// matches 'new X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr;
/// Matches delete expressions.
///
/// Given
/// \code
/// delete X;
/// \endcode
/// cxxDeleteExpr()
/// matches 'delete X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr>
cxxDeleteExpr;
/// Matches noexcept expressions.
///
/// Given
/// \code
/// bool a() noexcept;
/// bool b() noexcept(true);
/// bool c() noexcept(false);
/// bool d() noexcept(noexcept(a()));
/// bool e = noexcept(b()) || noexcept(c());
/// \endcode
/// cxxNoexceptExpr()
/// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`.
/// doesn't match the noexcept specifier in the declarations a, b, c or d.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
cxxNoexceptExpr;
/// Matches array subscript expressions.
///
/// Given
/// \code
/// int i = a[1];
/// \endcode
/// arraySubscriptExpr()
/// matches "a[1]"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr>
arraySubscriptExpr;
/// Matches the value of a default argument at the call site.
///
/// Example matches the CXXDefaultArgExpr placeholder inserted for the
/// default value of the second parameter in the call expression f(42)
/// (matcher = cxxDefaultArgExpr())
/// \code
/// void f(int x, int y = 0);
/// f(42);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
cxxDefaultArgExpr;
/// Matches overloaded operator calls.
///
/// Note that if an operator isn't overloaded, it won't match. Instead, use
/// binaryOperator matcher.
/// Currently it does not match operators such as new delete.
/// FIXME: figure out why these do not match?
///
/// Example matches both operator<<((o << b), c) and operator<<(o, b)
/// (matcher = cxxOperatorCallExpr())
/// \code
/// ostream &operator<< (ostream &out, int i) { };
/// ostream &o; int b = 1, c = 1;
/// o << b << c;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
cxxOperatorCallExpr;
/// Matches expressions.
///
/// Example matches x()
/// \code
/// void f() { x(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
/// Matches expressions that refer to declarations.
///
/// Example matches x in if (x)
/// \code
/// bool x;
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr>
declRefExpr;
/// Matches a reference to an ObjCIvar.
///
/// Example: matches "a" in "init" method:
/// \code
/// @implementation A {
/// NSString *a;
/// }
/// - (void) init {
/// a = @"hello";
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr>
objcIvarRefExpr;
/// Matches a reference to a block.
///
/// Example: matches "^{}":
/// \code
/// void f() { ^{}(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr;
/// Matches if statements.
///
/// Example matches 'if (x) {}'
/// \code
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt;
/// Matches for statements.
///
/// Example matches 'for (;;) {}'
/// \code
/// for (;;) {}
/// int i[] = {1, 2, 3}; for (auto a : i);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt;
/// Matches the increment statement of a for loop.
///
/// Example:
/// forStmt(hasIncrement(unaryOperator(hasOperatorName("++"))))
/// matches '++x' in
/// \code
/// for (x; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Increment = Node.getInc();
return (Increment != nullptr &&
InnerMatcher.matches(*Increment, Finder, Builder));
}
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopInit(declStmt()))
/// matches 'int x = 0' in
/// \code
/// for (int x = 0; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Init = Node.getInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches range-based for statements.
///
/// cxxForRangeStmt() matches 'for (auto a : i)'
/// \code
/// int i[] = {1, 2, 3}; for (auto a : i);
/// for(int j = 0; j < 5; ++j);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt>
cxxForRangeStmt;
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopVariable(anything()))
/// matches 'int x' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>,
InnerMatcher) {
const VarDecl *const Var = Node.getLoopVariable();
return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder));
}
/// Matches the range initialization statement of a for loop.
///
/// Example:
/// forStmt(hasRangeInit(anything()))
/// matches 'a' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *const Init = Node.getRangeInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches while statements.
///
/// Given
/// \code
/// while (true) {}
/// \endcode
/// whileStmt()
/// matches 'while (true) {}'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt;
/// Matches do statements.
///
/// Given
/// \code
/// do {} while (true);
/// \endcode
/// doStmt()
/// matches 'do {} while(true)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt;
/// Matches break statements.
///
/// Given
/// \code
/// while (true) { break; }
/// \endcode
/// breakStmt()
/// matches 'break'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt;
/// Matches continue statements.
///
/// Given
/// \code
/// while (true) { continue; }
/// \endcode
/// continueStmt()
/// matches 'continue'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt>
continueStmt;
/// Matches return statements.
///
/// Given
/// \code
/// return 1;
/// \endcode
/// returnStmt()
/// matches 'return 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt;
/// Matches goto statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// gotoStmt()
/// matches 'goto FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt;
/// Matches label statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelStmt()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt;
/// Matches address of label statements (GNU extension).
///
/// Given
/// \code
/// FOO: bar();
/// void *ptr = &&FOO;
/// goto *bar;
/// \endcode
/// addrLabelExpr()
/// matches '&&FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr>
addrLabelExpr;
/// Matches switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchStmt()
/// matches 'switch(a)'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt;
/// Matches case and default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchCase()
/// matches 'case 42:' and 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase;
/// Matches case statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// caseStmt()
/// matches 'case 42:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt;
/// Matches default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// defaultStmt()
/// matches 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt>
defaultStmt;
/// Matches compound statements.
///
/// Example matches '{}' and '{{}}' in 'for (;;) {{}}'
/// \code
/// for (;;) {{}}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt>
compoundStmt;
/// Matches catch statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxCatchStmt()
/// matches 'catch(int i)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt>
cxxCatchStmt;
/// Matches try statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxTryStmt()
/// matches 'try {}'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt;
/// Matches throw expressions.
///
/// \code
/// try { throw 5; } catch(int i) {}
/// \endcode
/// cxxThrowExpr()
/// matches 'throw 5'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr>
cxxThrowExpr;
/// Matches null statements.
///
/// \code
/// foo();;
/// \endcode
/// nullStmt()
/// matches the second ';'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt;
/// Matches asm statements.
///
/// \code
/// int i = 100;
/// __asm("mov al, 2");
/// \endcode
/// asmStmt()
/// matches '__asm("mov al, 2")'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt;
/// Matches bool literals.
///
/// Example matches true
/// \code
/// true
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr>
cxxBoolLiteral;
/// Matches string literals (also matches wide string literals).
///
/// Example matches "abcd", L"abcd"
/// \code
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral>
stringLiteral;
/// Matches character literals (also matches wchar_t).
///
/// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral),
/// though.
///
/// Example matches 'a', L'a'
/// \code
/// char ch = 'a';
/// wchar_t chw = L'a';
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral>
characterLiteral;
/// Matches integer literals of all sizes / encodings, e.g.
/// 1, 1L, 0x1 and 1U.
///
/// Does not match character-encoded integers such as L'a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral>
integerLiteral;
/// Matches float literals of all sizes / encodings, e.g.
/// 1.0, 1.0f, 1.0L and 1e10.
///
/// Does not match implicit conversions such as
/// \code
/// float a = 10;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral>
floatLiteral;
/// Matches imaginary literals, which are based on integer and floating
/// point literals e.g.: 1i, 1.0i
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral>
imaginaryLiteral;
/// Matches fixed point literals
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral>
fixedPointLiteral;
/// Matches user defined literal operator call.
///
/// Example match: "foo"_suffix
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral>
userDefinedLiteral;
/// Matches compound (i.e. non-scalar) literals
///
/// Example match: {1}, (1, 2)
/// \code
/// int array[4] = {1};
/// vector int myvec = (vector int)(1, 2);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
compoundLiteralExpr;
/// Matches nullptr literal.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
/// Matches GNU __builtin_choose_expr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr>
chooseExpr;
/// Matches GNU __null expression.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr>
gnuNullExpr;
/// Matches atomic builtins.
/// Example matches __atomic_load_n(ptr, 1)
/// \code
/// void foo() { int *ptr; __atomic_load_n(ptr, 1); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr;
/// Matches statement expression (GNU extension).
///
/// Example match: ({ int X = 4; X; })
/// \code
/// int C = ({ int X = 4; X; });
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr;
/// Matches binary operator expressions.
///
/// Example matches a || b
/// \code
/// !(a || b)
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator>
binaryOperator;
/// Matches unary operator expressions.
///
/// Example matches !a
/// \code
/// !a || b
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator>
unaryOperator;
/// Matches conditional operator expressions.
///
/// Example matches a ? b : c
/// \code
/// (a ? b : c) + 42
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator>
conditionalOperator;
/// Matches binary conditional operator expressions (GNU extension).
///
/// Example matches a ?: b
/// \code
/// (a ?: b) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
BinaryConditionalOperator>
binaryConditionalOperator;
/// Matches opaque value expressions. They are used as helpers
/// to reference another expressions and can be met
/// in BinaryConditionalOperators, for example.
///
/// Example matches 'a'
/// \code
/// (a ?: c) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr>
opaqueValueExpr;
/// Matches a C++ static_assert declaration.
///
/// Example:
/// staticAssertExpr()
/// matches
/// static_assert(sizeof(S) == sizeof(int))
/// in
/// \code
/// struct S {
/// int x;
/// };
/// static_assert(sizeof(S) == sizeof(int));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl>
staticAssertDecl;
/// Matches a reinterpret_cast expression.
///
/// Either the source expression or the destination type can be matched
/// using has(), but hasDestinationType() is more specific and can be
/// more readable.
///
/// Example matches reinterpret_cast<char*>(&p) in
/// \code
/// void* p = reinterpret_cast<char*>(&p);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr>
cxxReinterpretCastExpr;
/// Matches a C++ static_cast expression.
///
/// \see hasDestinationType
/// \see reinterpretCast
///
/// Example:
/// cxxStaticCastExpr()
/// matches
/// static_cast<long>(8)
/// in
/// \code
/// long eight(static_cast<long>(8));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr>
cxxStaticCastExpr;
/// Matches a dynamic_cast expression.
///
/// Example:
/// cxxDynamicCastExpr()
/// matches
/// dynamic_cast<D*>(&b);
/// in
/// \code
/// struct B { virtual ~B() {} }; struct D : B {};
/// B b;
/// D* p = dynamic_cast<D*>(&b);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr>
cxxDynamicCastExpr;
/// Matches a const_cast expression.
///
/// Example: Matches const_cast<int*>(&r) in
/// \code
/// int n = 42;
/// const int &r(n);
/// int* p = const_cast<int*>(&r);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr>
cxxConstCastExpr;
/// Matches a C-style cast expression.
///
/// Example: Matches (int) 2.2f in
/// \code
/// int i = (int) 2.2f;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr>
cStyleCastExpr;
/// Matches explicit cast expressions.
///
/// Matches any cast expression written in user code, whether it be a
/// C-style cast, a functional-style cast, or a keyword cast.
///
/// Does not match implicit conversions.
///
/// Note: the name "explicitCast" is chosen to match Clang's terminology, as
/// Clang uses the term "cast" to apply to implicit conversions as well as to
/// actual cast expressions.
///
/// \see hasDestinationType.
///
/// Example: matches all five of the casts in
/// \code
/// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42)))))
/// \endcode
/// but does not match the implicit conversion in
/// \code
/// long ell = 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr>
explicitCastExpr;
/// Matches the implicit cast nodes of Clang's AST.
///
/// This matches many different places, including function call return value
/// eliding, as well as any type conversions.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr>
implicitCastExpr;
/// Matches any cast nodes of Clang's AST.
///
/// Example: castExpr() matches each of the following:
/// \code
/// (int) 3;
/// const_cast<Expr *>(SubExpr);
/// char c = 0;
/// \endcode
/// but does not match
/// \code
/// int i = (0);
/// int k = 0;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr;
/// Matches functional cast expressions
///
/// Example: Matches Foo(bar);
/// \code
/// Foo f = bar;
/// Foo g = (Foo) bar;
/// Foo h = Foo(bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr>
cxxFunctionalCastExpr;
/// Matches functional cast expressions having N != 1 arguments
///
/// Example: Matches Foo(bar, bar)
/// \code
/// Foo h = Foo(bar, bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr>
cxxTemporaryObjectExpr;
/// Matches predefined identifier expressions [C99 6.4.2.2].
///
/// Example: Matches __func__
/// \code
/// printf("%s", __func__);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr>
predefinedExpr;
/// Matches C99 designated initializer expressions [C99 6.7.8].
///
/// Example: Matches { [2].y = 1.0, [0].x = 1.0 }
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr>
designatedInitExpr;
/// Matches designated initializer expressions that contain
/// a specific number of designators.
///
/// Example: Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 };
/// \endcode
/// designatorCountIs(2)
/// matches '{ [2].y = 1.0, [0].x = 1.0 }',
/// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'.
AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches \c QualTypes in the clang AST.
extern const internal::VariadicAllOfMatcher<QualType> qualType;
/// Matches \c Types in the clang AST.
extern const internal::VariadicAllOfMatcher<Type> type;
/// Matches \c TypeLocs in the clang AST.
extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc;
/// Matches if any of the given matchers matches.
///
/// Unlike \c anyOf, \c eachOf will generate a match result for each
/// matching submatcher.
///
/// For example, in:
/// \code
/// class A { int a; int b; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")),
/// has(fieldDecl(hasName("b")).bind("v"))))
/// \endcode
/// will generate two results binding "v", the first of which binds
/// the field declaration of \c a, the second the field declaration of
/// \c b.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
eachOf;
/// Matches if any of the given matchers matches.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
anyOf;
/// Matches if all given matchers match.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
allOf;
/// Matches any node regardless of the submatcher.
///
/// However, \c optionally will retain any bindings generated by the submatcher.
/// Useful when additional information which may or may not present about a main
/// matching node is desired.
///
/// For example, in:
/// \code
/// class Foo {
/// int bar;
/// }
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(
/// optionally(has(
/// fieldDecl(hasName("bar")).bind("var")
/// ))).bind("record")
/// \endcode
/// will produce a result binding for both "record" and "var".
/// The matcher will produce a "record" binding for even if there is no data
/// member named "bar" in that class.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally;
/// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL)
///
/// Given
/// \code
/// Foo x = bar;
/// int y = sizeof(x) + alignof(x);
/// \endcode
/// unaryExprOrTypeTraitExpr()
/// matches \c sizeof(x) and \c alignof(x)
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
UnaryExprOrTypeTraitExpr>
unaryExprOrTypeTraitExpr;
/// Matches unary expressions that have a specific type of argument.
///
/// Given
/// \code
/// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c);
/// \endcode
/// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int"))
/// matches \c sizeof(a) and \c alignof(c)
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType ArgumentType = Node.getTypeOfArgument();
return InnerMatcher.matches(ArgumentType, Finder, Builder);
}
/// Matches unary expressions of a certain kind.
///
/// Given
/// \code
/// int x;
/// int s = sizeof(x) + alignof(x)
/// \endcode
/// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf))
/// matches \c sizeof(x)
///
/// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter
/// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf").
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) {
return Node.getKind() == Kind;
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// alignof.
inline internal::BindableMatcher<Stmt> alignOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)),
InnerMatcher)));
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// sizeof.
inline internal::BindableMatcher<Stmt> sizeOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(ofKind(UETT_SizeOf), InnerMatcher)));
}
/// Matches NamedDecl nodes that have the specified name.
///
/// Supports specifying enclosing namespaces or classes by prefixing the name
/// with '<enclosing>::'.
/// Does not match typedefs of an underlying type with the given name.
///
/// Example matches X (Name == "X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X")
/// \code
/// namespace a { namespace b { class X; } }
/// \endcode
inline internal::Matcher<NamedDecl> hasName(StringRef Name) {
return internal::Matcher<NamedDecl>(
new internal::HasNameMatcher({std::string(Name)}));
}
/// Matches NamedDecl nodes that have any of the specified names.
///
/// This matcher is only provided as a performance optimization of hasName.
/// \code
/// hasAnyName(a, b, c)
/// \endcode
/// is equivalent to, but faster than
/// \code
/// anyOf(hasName(a), hasName(b), hasName(c))
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef,
internal::hasAnyNameFunc>
hasAnyName;
/// Matches NamedDecl nodes whose fully qualified names contain
/// a substring matched by the given RegExp.
///
/// Supports specifying enclosing namespaces or classes by
/// prefixing the name with '<enclosing>::'. Does not match typedefs
/// of an underlying type with the given name.
///
/// Example matches X (regexp == "::X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (regexp is one of "::X", "^foo::.*X", among others)
/// \code
/// namespace foo { namespace bar { class X; } }
/// \endcode
AST_MATCHER_REGEX(NamedDecl, matchesName, RegExp) {
std::string FullNameString = "::" + Node.getQualifiedNameAsString();
return RegExp->match(FullNameString);
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// Given:
/// \code
/// class A { int operator*(); };
/// const A &operator<<(const A &a, const A &b);
/// A a;
/// a << a; // <-- This matches
/// \endcode
///
/// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the
/// specified line and
/// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*")))
/// matches the declaration of \c A.
///
/// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl>
inline internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>
hasOverloadedOperatorName(StringRef Name) {
return internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(
{std::string(Name)});
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// hasAnyOverloadesOperatorName("+", "-")
/// Is equivalent to
/// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-"))
extern const internal::VariadicFunction<
internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>,
StringRef, internal::hasAnyOverloadedOperatorNameFunc>
hasAnyOverloadedOperatorName;
/// Matches C++ classes that are directly or indirectly derived from a class
/// matching \c Base, or Objective-C classes that directly or indirectly
/// subclass a class matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, Z, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("NSObject"))
/// \code
/// @interface NSObject @end
/// @interface Bar : NSObject @end
/// \endcode
///
/// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl>
AST_POLYMORPHIC_MATCHER_P(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/false);
}
/// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ classes that have a direct or indirect base matching \p
/// BaseSpecMatcher.
///
/// Example:
/// matcher hasAnyBase(hasType(cxxRecordDecl(hasName("SpecialBase"))))
/// \code
/// class Foo;
/// class Bar : Foo {};
/// class Baz : Bar {};
/// class SpecialBase;
/// class Proxy : SpecialBase {}; // matches Proxy
/// class IndirectlyDerived : Proxy {}; //matches IndirectlyDerived
/// \endcode
///
// FIXME: Refactor this and isDerivedFrom to reuse implementation.
AST_MATCHER_P(CXXRecordDecl, hasAnyBase, internal::Matcher<CXXBaseSpecifier>,
BaseSpecMatcher) {
return internal::matchesAnyBase(Node, BaseSpecMatcher, Finder, Builder);
}
/// Matches C++ classes that have a direct base matching \p BaseSpecMatcher.
///
/// Example:
/// matcher hasDirectBase(hasType(cxxRecordDecl(hasName("SpecialBase"))))
/// \code
/// class Foo;
/// class Bar : Foo {};
/// class Baz : Bar {};
/// class SpecialBase;
/// class Proxy : SpecialBase {}; // matches Proxy
/// class IndirectlyDerived : Proxy {}; // doesn't match
/// \endcode
AST_MATCHER_P(CXXRecordDecl, hasDirectBase, internal::Matcher<CXXBaseSpecifier>,
BaseSpecMatcher) {
return Node.hasDefinition() &&
llvm::any_of(Node.bases(), [&](const CXXBaseSpecifier &Base) {
return BaseSpecMatcher.matches(Base, Finder, Builder);
});
}
/// Similar to \c isDerivedFrom(), but also matches classes that directly
/// match \c Base.
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
const auto M = anyOf(Base, isDerivedFrom(Base));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Overloaded method as shortcut for
/// \c isSameOrDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isSameOrDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ or Objective-C classes that are directly derived from a class
/// matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/true);
}
/// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDirectlyDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches the first method of a class or struct that satisfies \c
/// InnerMatcher.
///
/// Given:
/// \code
/// class A { void func(); };
/// class B { void member(); };
/// \endcode
///
/// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of
/// \c A but not \c B.
AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(),
Node.method_end(), Finder, Builder);
}
/// Matches the generated class of lambda expressions.
///
/// Given:
/// \code
/// auto x = []{};
/// \endcode
///
/// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of
/// \c decltype(x)
AST_MATCHER(CXXRecordDecl, isLambda) {
return Node.isLambda();
}
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y
/// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X")))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// Usable as: Any Matcher
/// Note that has is direct matcher, so it also matches things like implicit
/// casts and paren casts. If you are matching with expr then you should
/// probably consider using ignoringParenImpCasts like:
/// has(ignoringParenImpCasts(expr())).
extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Z
/// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasDescendantMatcher>
hasDescendant;
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Y::X, Z::Y, Z::Y::X
/// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X")))
/// \code
/// class X {};
/// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X
/// // inside Y.
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// As opposed to 'has', 'forEach' will cause a match for each result that
/// matches instead of only on the first one.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher>
forEach;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, A, A::X, B, B::C, B::C::X
/// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {};
/// class A { class X {}; }; // Matches A, because A::X is a class of name
/// // X inside A.
/// class B { class C { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for
/// each result that matches instead of only on the first one.
///
/// Note: Recursively combined ForEachDescendant can cause many matches:
/// cxxRecordDecl(forEachDescendant(cxxRecordDecl(
/// forEachDescendant(cxxRecordDecl())
/// )))
/// will match 10 times (plus injected class name matches) on:
/// \code
/// class A { class B { class C { class D { class E {}; }; }; }; };
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::ForEachDescendantMatcher>
forEachDescendant;
/// Matches if the node or any descendant matches.
///
/// Generates results for each match.
///
/// For example, in:
/// \code
/// class A { class B {}; class C {}; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(hasName("::A"),
/// findAll(cxxRecordDecl(isDefinition()).bind("m")))
/// \endcode
/// will generate results for \c A, \c B and \c C.
///
/// Usable as: Any Matcher
template <typename T>
internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) {
return eachOf(Matcher, forEachDescendant(Matcher));
}
/// Matches AST nodes that have a parent that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } }
/// \endcode
/// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }".
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasParentMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasParent;
/// Matches AST nodes that have an ancestor that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { if (true) { int x = 42; } }
/// void g() { for (;;) { int x = 43; } }
/// \endcode
/// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasAncestorMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasAncestor;
/// Matches if the provided matcher does not match.
///
/// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X"))))
/// \code
/// class X {};
/// class Y {};
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> unless;
/// Matches a node if the declaration associated with that node
/// matches the given matcher.
///
/// The associated declaration is:
/// - for type nodes, the declaration of the underlying type
/// - for CallExpr, the declaration of the callee
/// - for MemberExpr, the declaration of the referenced member
/// - for CXXConstructExpr, the declaration of the constructor
/// - for CXXNewExpr, the declaration of the operator new
/// - for ObjCIvarExpr, the declaration of the ivar
///
/// For type nodes, hasDeclaration will generally match the declaration of the
/// sugared type. Given
/// \code
/// class X {};
/// typedef X Y;
/// Y y;
/// \endcode
/// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the
/// typedefDecl. A common use case is to match the underlying, desugared type.
/// This can be achieved by using the hasUnqualifiedDesugaredType matcher:
/// \code
/// varDecl(hasType(hasUnqualifiedDesugaredType(
/// recordType(hasDeclaration(decl())))))
/// \endcode
/// In this matcher, the decl will match the CXXRecordDecl of class X.
///
/// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>,
/// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>,
/// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>,
/// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>,
/// Matcher<TagType>, Matcher<TemplateSpecializationType>,
/// Matcher<TemplateTypeParmType>, Matcher<TypedefType>,
/// Matcher<UnresolvedUsingType>
inline internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>
hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) {
return internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>(InnerMatcher);
}
/// Matches a \c NamedDecl whose underlying declaration matches the given
/// matcher.
///
/// Given
/// \code
/// namespace N { template<class T> void f(T t); }
/// template <class T> void g() { using N::f; f(T()); }
/// \endcode
/// \c unresolvedLookupExpr(hasAnyDeclaration(
/// namedDecl(hasUnderlyingDecl(hasName("::N::f")))))
/// matches the use of \c f in \c g() .
AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>,
InnerMatcher) {
const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl();
return UnderlyingDecl != nullptr &&
InnerMatcher.matches(*UnderlyingDecl, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression, after
/// stripping off any parentheses or implicit casts.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y {};
/// void z(Y y, X x) { y.m(); (g()).m(); x.m(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y")))))
/// matches `y.m()` and `(g()).m()`.
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m()`.
/// cxxMemberCallExpr(on(callExpr()))
/// matches `(g()).m()`.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument()
->IgnoreParenImpCasts();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches on the receiver of an ObjectiveC Message expression.
///
/// Example
/// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *")));
/// matches the [webView ...] message invocation.
/// \code
/// NSString *webViewJavaScript = ...
/// UIWebView *webView = ...
/// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>,
InnerMatcher) {
const QualType TypeDecl = Node.getReceiverType();
return InnerMatcher.matches(TypeDecl, Finder, Builder);
}
/// Returns true when the Objective-C method declaration is a class method.
///
/// Example
/// matcher = objcMethodDecl(isClassMethod())
/// matches
/// \code
/// @interface I + (void)foo; @end
/// \endcode
/// but not
/// \code
/// @interface I - (void)bar; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isClassMethod) {
return Node.isClassMethod();
}
/// Returns true when the Objective-C method declaration is an instance method.
///
/// Example
/// matcher = objcMethodDecl(isInstanceMethod())
/// matches
/// \code
/// @interface I - (void)bar; @end
/// \endcode
/// but not
/// \code
/// @interface I + (void)foo; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isInstanceMethod) {
return Node.isInstanceMethod();
}
/// Returns true when the Objective-C message is sent to a class.
///
/// Example
/// matcher = objcMessageExpr(isClassMessage())
/// matches
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
/// but not
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isClassMessage) {
return Node.isClassMessage();
}
/// Returns true when the Objective-C message is sent to an instance.
///
/// Example
/// matcher = objcMessageExpr(isInstanceMessage())
/// matches
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// but not
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isInstanceMessage) {
return Node.isInstanceMessage();
}
/// Matches if the Objective-C message is sent to an instance,
/// and the inner matcher matches on that instance.
///
/// For example the method call in
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// is matched by
/// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x"))))))
AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ReceiverNode = Node.getInstanceReceiver();
return (ReceiverNode != nullptr &&
InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder,
Builder));
}
/// Matches when BaseName == Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) {
Selector Sel = Node.getSelector();
return BaseName.compare(Sel.getAsString()) == 0;
}
/// Matches when at least one of the supplied string equals to the
/// Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:"));
/// matches both of the expressions below:
/// \code
/// [myObj methodA:argA];
/// [myObj methodB:argB];
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>,
StringRef,
internal::hasAnySelectorFunc>
hasAnySelector;
/// Matches ObjC selectors whose name contains
/// a substring matched by the given RegExp.
/// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_REGEX(ObjCMessageExpr, matchesSelector, RegExp) {
std::string SelectorString = Node.getSelector().getAsString();
return RegExp->match(SelectorString);
}
/// Matches when the selector is the empty selector
///
/// Matches only when the selector of the objCMessageExpr is NULL. This may
/// represent an error condition in the tree!
AST_MATCHER(ObjCMessageExpr, hasNullSelector) {
return Node.getSelector().isNull();
}
/// Matches when the selector is a Unary Selector
///
/// matcher = objCMessageExpr(matchesSelector(hasUnarySelector());
/// matches self.bodyView in the code below, but NOT the outer message
/// invocation of "loadHTMLString:baseURL:".
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasUnarySelector) {
return Node.getSelector().isUnarySelector();
}
/// Matches when the selector is a keyword selector
///
/// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame
/// message expression in
///
/// \code
/// UIWebView *webView = ...;
/// CGRect bodyFrame = webView.frame;
/// bodyFrame.size.height = self.bodyContentHeight;
/// webView.frame = bodyFrame;
/// // ^---- matches here
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) {
return Node.getSelector().isKeywordSelector();
}
/// Matches when the selector has the specified number of arguments
///
/// matcher = objCMessageExpr(numSelectorArgs(0));
/// matches self.bodyView in the code below
///
/// matcher = objCMessageExpr(numSelectorArgs(2));
/// matches the invocation of "loadHTMLString:baseURL:" but not that
/// of self.bodyView
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) {
return Node.getSelector().getNumArgs() == N;
}
/// Matches if the call expression's callee expression matches.
///
/// Given
/// \code
/// class Y { void x() { this->x(); x(); Y y; y.x(); } };
/// void f() { f(); }
/// \endcode
/// callExpr(callee(expr()))
/// matches this->x(), x(), y.x(), f()
/// with callee(...)
/// matching this->x, x, y.x, f respectively
///
/// Note: Callee cannot take the more general internal::Matcher<Expr>
/// because this introduces ambiguous overloads with calls to Callee taking a
/// internal::Matcher<Decl>, as the matcher hierarchy is purely
/// implemented in terms of implicit casts.
AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>,
InnerMatcher) {
const Expr *ExprNode = Node.getCallee();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the call expression's callee's declaration matches the
/// given matcher.
///
/// Example matches y.x() (matcher = callExpr(callee(
/// cxxMethodDecl(hasName("x")))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y y; y.x(); }
/// \endcode
AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher,
1) {
return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder);
}
/// Matches if the expression's or declaration's type matches a type
/// matcher.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and U (matcher = typedefDecl(hasType(asString("int")))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// typedef int U;
/// class Y { friend class X; };
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl,
ValueDecl),
internal::Matcher<QualType>, InnerMatcher, 0) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return InnerMatcher.matches(QT, Finder, Builder);
return false;
}
/// Overloaded to match the declaration of the expression's or value
/// declaration's type.
///
/// In case of a value declaration (for example a variable declaration),
/// this resolves one layer of indirection. For example, in the value
/// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of
/// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the
/// declaration of x.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// class Y { friend class X; };
/// \endcode
///
/// Example matches class Derived
/// (matcher = cxxRecordDecl(hasAnyBase(hasType(cxxRecordDecl(hasName("Base"))))))
/// \code
/// class Base {};
/// class Derived : Base {};
/// \endcode
///
/// Usable as: Matcher<Expr>, Matcher<FriendDecl>, Matcher<ValueDecl>,
/// Matcher<CXXBaseSpecifier>
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl,
CXXBaseSpecifier),
internal::Matcher<Decl>, InnerMatcher, 1) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder);
return false;
}
/// Matches if the type location of the declarator decl's type matches
/// the inner matcher.
///
/// Given
/// \code
/// int x;
/// \endcode
/// declaratorDecl(hasTypeLoc(loc(asString("int"))))
/// matches int x
AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) {
if (!Node.getTypeSourceInfo())
// This happens for example for implicit destructors.
return false;
return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder);
}
/// Matches if the matched type is represented by the given string.
///
/// Given
/// \code
/// class Y { public: void x(); };
/// void z() { Y* y; y->x(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(asString("class Y *"))))
/// matches y->x()
AST_MATCHER_P(QualType, asString, std::string, Name) {
return Name == Node.getAsString();
}
/// Matches if the matched type is a pointer type and the pointee type
/// matches the specified matcher.
///
/// Example matches y->x()
/// (matcher = cxxMemberCallExpr(on(hasType(pointsTo
/// cxxRecordDecl(hasName("Y")))))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y *y; y->x(); }
/// \endcode
AST_MATCHER_P(
QualType, pointsTo, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isAnyPointerType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Overloaded to match the pointee type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>,
InnerMatcher, 1) {
return pointsTo(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches if the matched type matches the unqualified desugared
/// type of the matched node.
///
/// For example, in:
/// \code
/// class A {};
/// using B = A;
/// \endcode
/// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches
/// both B and A.
AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>,
InnerMatcher) {
return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder,
Builder);
}
/// Matches if the matched type is a reference type and the referenced
/// type matches the specified matcher.
///
/// Example matches X &x and const X &y
/// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X"))))))
/// \code
/// class X {
/// void a(X b) {
/// X &x = b;
/// const X &y = b;
/// }
/// };
/// \endcode
AST_MATCHER_P(QualType, references, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isReferenceType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Matches QualTypes whose canonical type matches InnerMatcher.
///
/// Given:
/// \code
/// typedef int &int_ref;
/// int a;
/// int_ref b = a;
/// \endcode
///
/// \c varDecl(hasType(qualType(referenceType()))))) will not match the
/// declaration of b but \c
/// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does.
AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>,
InnerMatcher) {
if (Node.isNull())
return false;
return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder);
}
/// Overloaded to match the referenced type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>,
InnerMatcher, 1) {
return references(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression. Unlike
/// `on`, matches the argument directly without stripping away anything.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y { void g(); };
/// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); }
/// \endcode
/// cxxMemberCallExpr(onImplicitObjectArgument(hasType(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`.
/// cxxMemberCallExpr(on(callExpr()))
/// does not match `(g()).m()`, because the parens are not ignored.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the type of the expression's implicit object argument either
/// matches the InnerMatcher, or is a pointer to a type that matches the
/// InnerMatcher.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// class X : public Y { void g(); };
/// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); }
/// \endcode
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `p->m()` and `x.m()`.
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("X")))))
/// matches `x.g()`.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<QualType>, InnerMatcher, 0) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Overloaded to match the type's declaration.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<Decl>, InnerMatcher, 1) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Matches a DeclRefExpr that refers to a declaration that matches the
/// specified matcher.
///
/// Example matches x in if(x)
/// (matcher = declRefExpr(to(varDecl(hasName("x")))))
/// \code
/// bool x;
/// if (x) {}
/// \endcode
AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>,
InnerMatcher) {
const Decl *DeclNode = Node.getDecl();
return (DeclNode != nullptr &&
InnerMatcher.matches(*DeclNode, Finder, Builder));
}
/// Matches a \c DeclRefExpr that refers to a declaration through a
/// specific using shadow declaration.
///
/// Given
/// \code
/// namespace a { void f() {} }
/// using a::f;
/// void g() {
/// f(); // Matches this ..
/// a::f(); // .. but not this.
/// }
/// \endcode
/// declRefExpr(throughUsingDecl(anything()))
/// matches \c f()
AST_MATCHER_P(DeclRefExpr, throughUsingDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
const NamedDecl *FoundDecl = Node.getFoundDecl();
if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl))
return InnerMatcher.matches(*UsingDecl, Finder, Builder);
return false;
}
/// Matches an \c OverloadExpr if any of the declarations in the set of
/// overloads matches the given matcher.
///
/// Given
/// \code
/// template <typename T> void foo(T);
/// template <typename T> void bar(T);
/// template <typename T> void baz(T t) {
/// foo(t);
/// bar(t);
/// }
/// \endcode
/// unresolvedLookupExpr(hasAnyDeclaration(
/// functionTemplateDecl(hasName("foo"))))
/// matches \c foo in \c foo(t); but not \c bar in \c bar(t);
AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(),
Node.decls_end(), Finder, Builder);
}
/// Matches the Decl of a DeclStmt which has a single declaration.
///
/// Given
/// \code
/// int a, b;
/// int c;
/// \endcode
/// declStmt(hasSingleDecl(anything()))
/// matches 'int c;' but not 'int a, b;'.
AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) {
if (Node.isSingleDecl()) {
const Decl *FoundDecl = Node.getSingleDecl();
return InnerMatcher.matches(*FoundDecl, Finder, Builder);
}
return false;
}
/// Matches a variable declaration that has an initializer expression
/// that matches the given matcher.
///
/// Example matches x (matcher = varDecl(hasInitializer(callExpr())))
/// \code
/// bool y() { return true; }
/// bool x = y();
/// \endcode
AST_MATCHER_P(
VarDecl, hasInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getAnyInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// \brief Matches a static variable with local scope.
///
/// Example matches y (matcher = varDecl(isStaticLocal()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// static int z;
/// \endcode
AST_MATCHER(VarDecl, isStaticLocal) {
return Node.isStaticLocal();
}
/// Matches a variable declaration that has function scope and is a
/// non-static local variable.
///
/// Example matches x (matcher = varDecl(hasLocalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasLocalStorage) {
return Node.hasLocalStorage();
}
/// Matches a variable declaration that does not have local storage.
///
/// Example matches y and z (matcher = varDecl(hasGlobalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasGlobalStorage) {
return Node.hasGlobalStorage();
}
/// Matches a variable declaration that has automatic storage duration.
///
/// Example matches x, but not y, z, or a.
/// (matcher = varDecl(hasAutomaticStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasAutomaticStorageDuration) {
return Node.getStorageDuration() == SD_Automatic;
}
/// Matches a variable declaration that has static storage duration.
/// It includes the variable declared at namespace scope and those declared
/// with "static" and "extern" storage class specifiers.
///
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// static int b;
/// extern int c;
/// varDecl(hasStaticStorageDuration())
/// matches the function declaration y, a, b and c.
/// \endcode
AST_MATCHER(VarDecl, hasStaticStorageDuration) {
return Node.getStorageDuration() == SD_Static;
}
/// Matches a variable declaration that has thread storage duration.
///
/// Example matches z, but not x, z, or a.
/// (matcher = varDecl(hasThreadStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasThreadStorageDuration) {
return Node.getStorageDuration() == SD_Thread;
}
/// Matches a variable declaration that is an exception variable from
/// a C++ catch block, or an Objective-C \@catch statement.
///
/// Example matches x (matcher = varDecl(isExceptionVariable())
/// \code
/// void f(int y) {
/// try {
/// } catch (int x) {
/// }
/// }
/// \endcode
AST_MATCHER(VarDecl, isExceptionVariable) {
return Node.isExceptionVariable();
}
/// Checks that a call expression or a constructor call expression has
/// a specific number of arguments (including absent default arguments).
///
/// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2)))
/// \code
/// void f(int x, int y);
/// f(0, 0);
/// \endcode
AST_POLYMORPHIC_MATCHER_P(argumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N) {
return Node.getNumArgs() == N;
}
/// Matches the n'th argument of a call expression or a constructor
/// call expression.
///
/// Example matches y in x(y)
/// (matcher = callExpr(hasArgument(0, declRefExpr())))
/// \code
/// void x(int) { int y; x(y); }
/// \endcode
AST_POLYMORPHIC_MATCHER_P2(hasArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N, internal::Matcher<Expr>, InnerMatcher) {
return (N < Node.getNumArgs() &&
InnerMatcher.matches(
*Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder));
}
/// Matches the n'th item of an initializer list expression.
///
/// Example matches y.
/// (matcher = initListExpr(hasInit(0, expr())))
/// \code
/// int x{y}.
/// \endcode
AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
return N < Node.getNumInits() &&
InnerMatcher.matches(*Node.getInit(N), Finder, Builder);
}
/// Matches declaration statements that contain a specific number of
/// declarations.
///
/// Example: Given
/// \code
/// int a, b;
/// int c;
/// int d = 2, e;
/// \endcode
/// declCountIs(2)
/// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'.
AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) {
return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N;
}
/// Matches the n'th declaration of a declaration statement.
///
/// Note that this does not work for global declarations because the AST
/// breaks up multiple-declaration DeclStmt's into multiple single-declaration
/// DeclStmt's.
/// Example: Given non-global declarations
/// \code
/// int a, b = 0;
/// int c;
/// int d = 2, e;
/// \endcode
/// declStmt(containsDeclaration(
/// 0, varDecl(hasInitializer(anything()))))
/// matches only 'int d = 2, e;', and
/// declStmt(containsDeclaration(1, varDecl()))
/// \code
/// matches 'int a, b = 0' as well as 'int d = 2, e;'
/// but 'int c;' is not matched.
/// \endcode
AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N,
internal::Matcher<Decl>, InnerMatcher) {
const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end());
if (N >= NumDecls)
return false;
DeclStmt::const_decl_iterator Iterator = Node.decl_begin();
std::advance(Iterator, N);
return InnerMatcher.matches(**Iterator, Finder, Builder);
}
/// Matches a C++ catch statement that has a catch-all handler.
///
/// Given
/// \code
/// try {
/// // ...
/// } catch (int) {
/// // ...
/// } catch (...) {
/// // ...
/// }
/// \endcode
/// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int).
AST_MATCHER(CXXCatchStmt, isCatchAll) {
return Node.getExceptionDecl() == nullptr;
}
/// Matches a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(
/// hasAnyConstructorInitializer(anything())
/// )))
/// record matches Foo, hasAnyConstructorInitializer matches foo_(1)
AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(),
Node.init_end(), Finder, Builder);
}
/// Matches the field declaration of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// forField(hasName("foo_"))))))
/// matches Foo
/// with forField matching foo_
AST_MATCHER_P(CXXCtorInitializer, forField,
internal::Matcher<FieldDecl>, InnerMatcher) {
const FieldDecl *NodeAsDecl = Node.getAnyMember();
return (NodeAsDecl != nullptr &&
InnerMatcher.matches(*NodeAsDecl, Finder, Builder));
}
/// Matches the initializer expression of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// withInitializer(integerLiteral(equals(1)))))))
/// matches Foo
/// with withInitializer matching (1)
AST_MATCHER_P(CXXCtorInitializer, withInitializer,
internal::Matcher<Expr>, InnerMatcher) {
const Expr* NodeAsExpr = Node.getInit();
return (NodeAsExpr != nullptr &&
InnerMatcher.matches(*NodeAsExpr, Finder, Builder));
}
/// Matches a constructor initializer if it is explicitly written in
/// code (as opposed to implicitly added by the compiler).
///
/// Given
/// \code
/// struct Foo {
/// Foo() { }
/// Foo(int) : foo_("A") { }
/// string foo_;
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten()))
/// will match Foo(int), but not Foo()
AST_MATCHER(CXXCtorInitializer, isWritten) {
return Node.isWritten();
}
/// Matches a constructor initializer if it is initializing a base, as
/// opposed to a member.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer()))
/// will match E(), but not match D(int).
AST_MATCHER(CXXCtorInitializer, isBaseInitializer) {
return Node.isBaseInitializer();
}
/// Matches a constructor initializer if it is initializing a member, as
/// opposed to a base.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer()))
/// will match D(int), but not match E().
AST_MATCHER(CXXCtorInitializer, isMemberInitializer) {
return Node.isMemberInitializer();
}
/// Matches any argument of a call expression or a constructor call
/// expression, or an ObjC-message-send expression.
///
/// Given
/// \code
/// void x(int, int, int) { int y; x(1, y, 42); }
/// \endcode
/// callExpr(hasAnyArgument(declRefExpr()))
/// matches x(1, y, 42)
/// with hasAnyArgument(...)
/// matching y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// void foo(I *i) { [i f:12]; }
/// \endcode
/// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12))))
/// matches [i f:12]
AST_POLYMORPHIC_MATCHER_P(hasAnyArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
internal::Matcher<Expr>, InnerMatcher) {
for (const Expr *Arg : Node.arguments()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Arg, Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
return false;
}
/// Matches any capture of a lambda expression.
///
/// Given
/// \code
/// void foo() {
/// int x;
/// auto f = [x](){};
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(anything()))
/// matches [x](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>,
InnerMatcher, 0) {
for (const LambdaCapture &Capture : Node.captures()) {
if (Capture.capturesVariable()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
}
return false;
}
/// Matches any capture of 'this' in a lambda expression.
///
/// Given
/// \code
/// struct foo {
/// void bar() {
/// auto f = [this](){};
/// }
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(cxxThisExpr()))
/// matches [this](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture,
internal::Matcher<CXXThisExpr>, InnerMatcher, 1) {
return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) {
return LC.capturesThis();
});
}
/// Matches a constructor call expression which uses list initialization.
AST_MATCHER(CXXConstructExpr, isListInitialization) {
return Node.isListInitialization();
}
/// Matches a constructor call expression which requires
/// zero initialization.
///
/// Given
/// \code
/// void foo() {
/// struct point { double x; double y; };
/// point pt[2] = { { 1.0, 2.0 } };
/// }
/// \endcode
/// initListExpr(has(cxxConstructExpr(requiresZeroInitialization()))
/// will match the implicit array filler for pt[1].
AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) {
return Node.requiresZeroInitialization();
}
/// Matches the n'th parameter of a function or an ObjC method
/// declaration or a block.
///
/// Given
/// \code
/// class X { void f(int x) {} };
/// \endcode
/// cxxMethodDecl(hasParameter(0, hasType(varDecl())))
/// matches f(int x) {}
/// with hasParameter(...)
/// matching int x
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasParameter(0, hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P2(hasParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
unsigned, N, internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return (N < Node.parameters().size()
&& InnerMatcher.matches(*Node.parameters()[N], Finder, Builder));
}
/// Matches all arguments and their respective ParmVarDecl.
///
/// Given
/// \code
/// void f(int i);
/// int y;
/// f(y);
/// \endcode
/// callExpr(
/// forEachArgumentWithParam(
/// declRefExpr(to(varDecl(hasName("y")))),
/// parmVarDecl(hasType(isInteger()))
/// ))
/// matches f(y);
/// with declRefExpr(...)
/// matching int y
/// and parmVarDecl(...)
/// matching int i
AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr),
internal::Matcher<Expr>, ArgMatcher,
internal::Matcher<ParmVarDecl>, ParamMatcher) {
BoundNodesTreeBuilder Result;
// The first argument of an overloaded member operator is the implicit object
// argument of the method which should not be matched against a parameter, so
// we skip over it here.
BoundNodesTreeBuilder Matches;
unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl()))
.matches(Node, Finder, &Matches)
? 1
: 0;
int ParamIndex = 0;
bool Matched = false;
for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()),
Finder, &ArgMatches)) {
BoundNodesTreeBuilder ParamMatches(ArgMatches);
if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl(
hasParameter(ParamIndex, ParamMatcher)))),
callExpr(callee(functionDecl(
hasParameter(ParamIndex, ParamMatcher))))))
.matches(Node, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
}
}
++ParamIndex;
}
*Builder = std::move(Result);
return Matched;
}
/// Matches all arguments and their respective types for a \c CallExpr or
/// \c CXXConstructExpr. It is very similar to \c forEachArgumentWithParam but
/// it works on calls through function pointers as well.
///
/// The difference is, that function pointers do not provide access to a
/// \c ParmVarDecl, but only the \c QualType for each argument.
///
/// Given
/// \code
/// void f(int i);
/// int y;
/// f(y);
/// void (*f_ptr)(int) = f;
/// f_ptr(y);
/// \endcode
/// callExpr(
/// forEachArgumentWithParamType(
/// declRefExpr(to(varDecl(hasName("y")))),
/// qualType(isInteger()).bind("type)
/// ))
/// matches f(y) and f_ptr(y)
/// with declRefExpr(...)
/// matching int y
/// and qualType(...)
/// matching int
AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParamType,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr),
internal::Matcher<Expr>, ArgMatcher,
internal::Matcher<QualType>, ParamMatcher) {
BoundNodesTreeBuilder Result;
// The first argument of an overloaded member operator is the implicit object
// argument of the method which should not be matched against a parameter, so
// we skip over it here.
BoundNodesTreeBuilder Matches;
unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl()))
.matches(Node, Finder, &Matches)
? 1
: 0;
const FunctionProtoType *FProto = nullptr;
if (const auto *Call = dyn_cast<CallExpr>(&Node)) {
if (const auto *Value =
dyn_cast_or_null<ValueDecl>(Call->getCalleeDecl())) {
QualType QT = Value->getType().getCanonicalType();
// This does not necessarily lead to a `FunctionProtoType`,
// e.g. K&R functions do not have a function prototype.
if (QT->isFunctionPointerType())
FProto = QT->getPointeeType()->getAs<FunctionProtoType>();
if (QT->isMemberFunctionPointerType()) {
const auto *MP = QT->getAs<MemberPointerType>();
assert(MP && "Must be member-pointer if its a memberfunctionpointer");
FProto = MP->getPointeeType()->getAs<FunctionProtoType>();
assert(FProto &&
"The call must have happened through a member function "
"pointer");
}
}
}
int ParamIndex = 0;
bool Matched = false;
for (; ArgIndex < Node.getNumArgs(); ++ArgIndex, ++ParamIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder,
&ArgMatches)) {
BoundNodesTreeBuilder ParamMatches(ArgMatches);
// This test is cheaper compared to the big matcher in the next if.
// Therefore, please keep this order.
if (FProto) {
QualType ParamType = FProto->getParamType(ParamIndex);
if (ParamMatcher.matches(ParamType, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
continue;
}
}
if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl(
hasParameter(ParamIndex, hasType(ParamMatcher))))),
callExpr(callee(functionDecl(
hasParameter(ParamIndex, hasType(ParamMatcher)))))))
.matches(Node, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
continue;
}
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches the ParmVarDecl nodes that are at the N'th position in the parameter
/// list. The parameter list could be that of either a block, function, or
/// objc-method.
///
///
/// Given
///
/// \code
/// void f(int a, int b, int c) {
/// }
/// \endcode
///
/// ``parmVarDecl(isAtPosition(0))`` matches ``int a``.
///
/// ``parmVarDecl(isAtPosition(1))`` matches ``int b``.
AST_MATCHER_P(ParmVarDecl, isAtPosition, unsigned, N) {
const clang::DeclContext *Context = Node.getParentFunctionOrMethod();
if (const auto *Decl = dyn_cast_or_null<FunctionDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
if (const auto *Decl = dyn_cast_or_null<BlockDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
if (const auto *Decl = dyn_cast_or_null<ObjCMethodDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
return false;
}
/// Matches any parameter of a function or an ObjC method declaration or a
/// block.
///
/// Does not match the 'this' parameter of a method.
///
/// Given
/// \code
/// class X { void f(int x, int y, int z) {} };
/// \endcode
/// cxxMethodDecl(hasAnyParameter(hasName("y")))
/// matches f(int x, int y, int z) {}
/// with hasAnyParameter(...)
/// matching int y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
///
/// For blocks, given
/// \code
/// b = ^(int y) { printf("%d", y) };
/// \endcode
///
/// the matcher blockDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of the block b with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P(hasAnyParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(),
Node.param_end(), Finder, Builder);
}
/// Matches \c FunctionDecls and \c FunctionProtoTypes that have a
/// specific parameter count.
///
/// Given
/// \code
/// void f(int i) {}
/// void g(int i, int j) {}
/// void h(int i, int j);
/// void j(int i);
/// void k(int x, int y, int z, ...);
/// \endcode
/// functionDecl(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(3))
/// matches \c k
AST_POLYMORPHIC_MATCHER_P(parameterCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType),
unsigned, N) {
return Node.getNumParams() == N;
}
/// Matches \c FunctionDecls that have a noreturn attribute.
///
/// Given
/// \code
/// void nope();
/// [[noreturn]] void a();
/// __attribute__((noreturn)) void b();
/// struct c { [[noreturn]] c(); };
/// \endcode
/// functionDecl(isNoReturn())
/// matches all of those except
/// \code
/// void nope();
/// \endcode
AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); }
/// Matches the return type of a function declaration.
///
/// Given:
/// \code
/// class X { int f() { return 1; } };
/// \endcode
/// cxxMethodDecl(returns(asString("int")))
/// matches int f() { return 1; }
AST_MATCHER_P(FunctionDecl, returns,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getReturnType(), Finder, Builder);
}
/// Matches extern "C" function or variable declarations.
///
/// Given:
/// \code
/// extern "C" void f() {}
/// extern "C" { void g() {} }
/// void h() {}
/// extern "C" int x = 1;
/// extern "C" int y = 2;
/// int z = 3;
/// \endcode
/// functionDecl(isExternC())
/// matches the declaration of f and g, but not the declaration of h.
/// varDecl(isExternC())
/// matches the declaration of x and y, but not the declaration of z.
AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.isExternC();
}
/// Matches variable/function declarations that have "static" storage
/// class specifier ("static" keyword) written in the source.
///
/// Given:
/// \code
/// static void f() {}
/// static int i = 0;
/// extern int j;
/// int k;
/// \endcode
/// functionDecl(isStaticStorageClass())
/// matches the function declaration f.
/// varDecl(isStaticStorageClass())
/// matches the variable declaration i.
AST_POLYMORPHIC_MATCHER(isStaticStorageClass,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.getStorageClass() == SC_Static;
}
/// Matches deleted function declarations.
///
/// Given:
/// \code
/// void Func();
/// void DeletedFunc() = delete;
/// \endcode
/// functionDecl(isDeleted())
/// matches the declaration of DeletedFunc, but not Func.
AST_MATCHER(FunctionDecl, isDeleted) {
return Node.isDeleted();
}
/// Matches defaulted function declarations.
///
/// Given:
/// \code
/// class A { ~A(); };
/// class B { ~B() = default; };
/// \endcode
/// functionDecl(isDefaulted())
/// matches the declaration of ~B, but not ~A.
AST_MATCHER(FunctionDecl, isDefaulted) {
return Node.isDefaulted();
}
/// Matches weak function declarations.
///
/// Given:
/// \code
/// void foo() __attribute__((__weakref__("__foo")));
/// void bar();
/// \endcode
/// functionDecl(isWeak())
/// matches the weak declaration "foo", but not "bar".
AST_MATCHER(FunctionDecl, isWeak) { return Node.isWeak(); }
/// Matches functions that have a dynamic exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() noexcept(true);
/// void i() noexcept(false);
/// void j() throw();
/// void k() throw(int);
/// void l() throw(...);
/// \endcode
/// functionDecl(hasDynamicExceptionSpec()) and
/// functionProtoType(hasDynamicExceptionSpec())
/// match the declarations of j, k, and l, but not f, g, h, or i.
AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node))
return FnTy->hasDynamicExceptionSpec();
return false;
}
/// Matches functions that have a non-throwing exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() throw();
/// void i() throw(int);
/// void j() noexcept(false);
/// \endcode
/// functionDecl(isNoThrow()) and functionProtoType(isNoThrow())
/// match the declarations of g, and h, but not f, i or j.
AST_POLYMORPHIC_MATCHER(isNoThrow,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node);
// If the function does not have a prototype, then it is assumed to be a
// throwing function (as it would if the function did not have any exception
// specification).
if (!FnTy)
return false;
// Assume the best for any unresolved exception specification.
if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType()))
return true;
return FnTy->isNothrow();
}
/// Matches constexpr variable and function declarations,
/// and if constexpr.
///
/// Given:
/// \code
/// constexpr int foo = 42;
/// constexpr int bar();
/// void baz() { if constexpr(1 > 0) {} }
/// \endcode
/// varDecl(isConstexpr())
/// matches the declaration of foo.
/// functionDecl(isConstexpr())
/// matches the declaration of bar.
/// ifStmt(isConstexpr())
/// matches the if statement in baz.
AST_POLYMORPHIC_MATCHER(isConstexpr,
AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl,
FunctionDecl,
IfStmt)) {
return Node.isConstexpr();
}
/// Matches selection statements with initializer.
///
/// Given:
/// \code
/// void foo() {
/// if (int i = foobar(); i > 0) {}
/// switch (int i = foobar(); i) {}
/// for (auto& a = get_range(); auto& x : a) {}
/// }
/// void bar() {
/// if (foobar() > 0) {}
/// switch (foobar()) {}
/// for (auto& x : get_range()) {}
/// }
/// \endcode
/// ifStmt(hasInitStatement(anything()))
/// matches the if statement in foo but not in bar.
/// switchStmt(hasInitStatement(anything()))
/// matches the switch statement in foo but not in bar.
/// cxxForRangeStmt(hasInitStatement(anything()))
/// matches the range for statement in foo but not in bar.
AST_POLYMORPHIC_MATCHER_P(hasInitStatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt,
CXXForRangeStmt),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *Init = Node.getInit();
return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder);
}
/// Matches the condition expression of an if statement, for loop,
/// switch statement or conditional operator.
///
/// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true))))
/// \code
/// if (true) {}
/// \endcode
AST_POLYMORPHIC_MATCHER_P(
hasCondition,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt,
SwitchStmt, AbstractConditionalOperator),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const Condition = Node.getCond();
return (Condition != nullptr &&
InnerMatcher.matches(*Condition, Finder, Builder));
}
/// Matches the then-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) true; else false;
/// \endcode
AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Then = Node.getThen();
return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder));
}
/// Matches the else-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) false; else true;
/// \endcode
AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Else = Node.getElse();
return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder));
}
/// Matches if a node equals a previously bound node.
///
/// Matches a node if it equals the node previously bound to \p ID.
///
/// Given
/// \code
/// class X { int a; int b; };
/// \endcode
/// cxxRecordDecl(
/// has(fieldDecl(hasName("a"), hasType(type().bind("t")))),
/// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t"))))))
/// matches the class \c X, as \c a and \c b have the same type.
///
/// Note that when multiple matches are involved via \c forEach* matchers,
/// \c equalsBoundNodes acts as a filter.
/// For example:
/// compoundStmt(
/// forEachDescendant(varDecl().bind("d")),
/// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d"))))))
/// will trigger a match for each combination of variable declaration
/// and reference to that variable declaration within a compound statement.
AST_POLYMORPHIC_MATCHER_P(equalsBoundNode,
AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type,
QualType),
std::string, ID) {
// FIXME: Figure out whether it makes sense to allow this
// on any other node types.
// For *Loc it probably does not make sense, as those seem
// unique. For NestedNameSepcifier it might make sense, as
// those also have pointer identity, but I'm not sure whether
// they're ever reused.
internal::NotEqualsBoundNodePredicate Predicate;
Predicate.ID = ID;
Predicate.Node = DynTypedNode::create(Node);
return Builder->removeBindings(Predicate);
}
/// Matches the condition variable statement in an if statement.
///
/// Given
/// \code
/// if (A* a = GetAPointer()) {}
/// \endcode
/// hasConditionVariableStatement(...)
/// matches 'A* a = GetAPointer()'.
AST_MATCHER_P(IfStmt, hasConditionVariableStatement,
internal::Matcher<DeclStmt>, InnerMatcher) {
const DeclStmt* const DeclarationStatement =
Node.getConditionVariableDeclStmt();
return DeclarationStatement != nullptr &&
InnerMatcher.matches(*DeclarationStatement, Finder, Builder);
}
/// Matches the index expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasIndex(integerLiteral()))
/// matches \c i[1] with the \c integerLiteral() matching \c 1
AST_MATCHER_P(ArraySubscriptExpr, hasIndex,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getIdx())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches the base expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasBase(implicitCastExpr(
/// hasSourceExpression(declRefExpr()))))
/// matches \c i[1] with the \c declRefExpr() matching \c i
AST_MATCHER_P(ArraySubscriptExpr, hasBase,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getBase())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches a 'for', 'while', 'do while' statement or a function
/// definition that has a given body. Note that in case of functions
/// this matcher only matches the definition itself and not the other
/// declarations of the same function.
///
/// Given
/// \code
/// for (;;) {}
/// \endcode
/// hasBody(compoundStmt())
/// matches 'for (;;) {}'
/// with compoundStmt()
/// matching '{}'
///
/// Given
/// \code
/// void f();
/// void f() {}
/// \endcode
/// hasBody(functionDecl())
/// matches 'void f() {}'
/// with compoundStmt()
/// matching '{}'
/// but does not match 'void f();'
AST_POLYMORPHIC_MATCHER_P(hasBody,
AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt,
WhileStmt,
CXXForRangeStmt,
FunctionDecl),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node);
return (Statement != nullptr &&
InnerMatcher.matches(*Statement, Finder, Builder));
}
/// Matches a function declaration that has a given body present in the AST.
/// Note that this matcher matches all the declarations of a function whose
/// body is present in the AST.
///
/// Given
/// \code
/// void f();
/// void f() {}
/// void g();
/// \endcode
/// hasAnyBody(functionDecl())
/// matches both 'void f();'
/// and 'void f() {}'
/// with compoundStmt()
/// matching '{}'
/// but does not match 'void g();'
AST_MATCHER_P(FunctionDecl, hasAnyBody,
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Statement = Node.getBody();
return (Statement != nullptr &&
InnerMatcher.matches(*Statement, Finder, Builder));
}
/// Matches compound statements where at least one substatement matches
/// a given matcher. Also matches StmtExprs that have CompoundStmt as children.
///
/// Given
/// \code
/// { {}; 1+2; }
/// \endcode
/// hasAnySubstatement(compoundStmt())
/// matches '{ {}; 1+2; }'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt,
StmtExpr),
internal::Matcher<Stmt>, InnerMatcher) {
const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node);
return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(),
CS->body_end(), Finder, Builder);
}
/// Checks that a compound statement contains a specific number of
/// child statements.
///
/// Example: Given
/// \code
/// { for (;;) {} }
/// \endcode
/// compoundStmt(statementCountIs(0)))
/// matches '{}'
/// but does not match the outer compound statement.
AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches literals that are equal to the given value of type ValueT.
///
/// Given
/// \code
/// f('\0', false, 3.14, 42);
/// \endcode
/// characterLiteral(equals(0))
/// matches '\0'
/// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0))
/// match false
/// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2))
/// match 3.14
/// integerLiteral(equals(42))
/// matches 42
///
/// Note that you cannot directly match a negative numeric literal because the
/// minus sign is not part of the literal: It is a unary operator whose operand
/// is the positive numeric literal. Instead, you must use a unaryOperator()
/// matcher to match the minus sign:
///
/// unaryOperator(hasOperatorName("-"),
/// hasUnaryOperand(integerLiteral(equals(13))))
///
/// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>,
/// Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
template <typename ValueT>
internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT>
equals(const ValueT &Value) {
return internal::PolymorphicMatcherWithParam1<
internal::ValueEqualsMatcher,
ValueT>(Value);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
bool, Value, 0) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
unsigned, Value, 1) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
FloatingLiteral,
IntegerLiteral),
double, Value, 2) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
/// Matches the operator Name of operator expressions (binary or
/// unary).
///
/// Example matches a || b (matcher = binaryOperator(hasOperatorName("||")))
/// \code
/// !(a || b)
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasOperatorName,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
UnaryOperator),
std::string, Name) {
return Name == Node.getOpcodeStr(Node.getOpcode());
}
/// Matches operator expressions (binary or unary) that have any of the
/// specified names.
///
/// hasAnyOperatorName("+", "-")
/// Is equivalent to
/// anyOf(hasOperatorName("+"), hasOperatorName("-"))
extern const internal::VariadicFunction<
internal::PolymorphicMatcherWithParam1<
internal::HasAnyOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator)>,
StringRef, internal::hasAnyOperatorNameFunc>
hasAnyOperatorName;
/// Matches all kinds of assignment operators.
///
/// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 = s2
/// (matcher = cxxOperatorCallExpr(isAssignmentOperator()))
/// \code
/// struct S { S& operator=(const S&); };
/// void x() { S s1, s2; s1 = s2; }
/// \endcode
AST_POLYMORPHIC_MATCHER(isAssignmentOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
CXXOperatorCallExpr)) {
return Node.isAssignmentOp();
}
/// Matches comparison operators.
///
/// Example 1: matches a == b (matcher = binaryOperator(isComparisonOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 < s2
/// (matcher = cxxOperatorCallExpr(isComparisonOperator()))
/// \code
/// struct S { bool operator<(const S& other); };
/// void x(S s1, S s2) { bool b1 = s1 < s2; }
/// \endcode
AST_POLYMORPHIC_MATCHER(isComparisonOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
CXXOperatorCallExpr)) {
return Node.isComparisonOp();
}
/// Matches the left hand side of binary operator expressions.
///
/// Example matches a (matcher = binaryOperator(hasLHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasLHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *LeftHandSide = Node.getLHS();
return (LeftHandSide != nullptr &&
InnerMatcher.matches(*LeftHandSide, Finder, Builder));
}
/// Matches the right hand side of binary operator expressions.
///
/// Example matches b (matcher = binaryOperator(hasRHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasRHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *RightHandSide = Node.getRHS();
return (RightHandSide != nullptr &&
InnerMatcher.matches(*RightHandSide, Finder, Builder));
}
/// Matches if either the left hand side or the right hand side of a
/// binary operator matches.
inline internal::Matcher<BinaryOperator> hasEitherOperand(
const internal::Matcher<Expr> &InnerMatcher) {
return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher));
}
/// Matches if both matchers match with opposite sides of the binary operator.
///
/// Example matcher = binaryOperator(hasOperands(integerLiteral(equals(1),
/// integerLiteral(equals(2)))
/// \code
/// 1 + 2 // Match
/// 2 + 1 // Match
/// 1 + 1 // No match
/// 2 + 2 // No match
/// \endcode
inline internal::Matcher<BinaryOperator>
hasOperands(const internal::Matcher<Expr> &Matcher1,
const internal::Matcher<Expr> &Matcher2) {
return anyOf(allOf(hasLHS(Matcher1), hasRHS(Matcher2)),
allOf(hasLHS(Matcher2), hasRHS(Matcher1)));
}
/// Matches if the operand of a unary operator matches.
///
/// Example matches true (matcher = hasUnaryOperand(
/// cxxBoolLiteral(equals(true))))
/// \code
/// !true
/// \endcode
AST_MATCHER_P(UnaryOperator, hasUnaryOperand,
internal::Matcher<Expr>, InnerMatcher) {
const Expr * const Operand = Node.getSubExpr();
return (Operand != nullptr &&
InnerMatcher.matches(*Operand, Finder, Builder));
}
/// Matches if the cast's source expression
/// or opaque value's source expression matches the given matcher.
///
/// Example 1: matches "a string"
/// (matcher = castExpr(hasSourceExpression(cxxConstructExpr())))
/// \code
/// class URL { URL(string); };
/// URL url = "a string";
/// \endcode
///
/// Example 2: matches 'b' (matcher =
/// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr())))
/// \code
/// int a = b ?: 1;
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasSourceExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr,
OpaqueValueExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const SubExpression =
internal::GetSourceExpressionMatcher<NodeType>::get(Node);
return (SubExpression != nullptr &&
InnerMatcher.matches(*SubExpression, Finder, Builder));
}
/// Matches casts that has a given cast kind.
///
/// Example: matches the implicit cast around \c 0
/// (matcher = castExpr(hasCastKind(CK_NullToPointer)))
/// \code
/// int *p = 0;
/// \endcode
///
/// If the matcher is use from clang-query, CastKind parameter
/// should be passed as a quoted string. e.g., hasCastKind("CK_NullToPointer").
AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) {
return Node.getCastKind() == Kind;
}
/// Matches casts whose destination type matches a given matcher.
///
/// (Note: Clang's AST refers to other conversions as "casts" too, and calls
/// actual casts "explicit" casts.)
AST_MATCHER_P(ExplicitCastExpr, hasDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType NodeType = Node.getTypeAsWritten();
return InnerMatcher.matches(NodeType, Finder, Builder);
}
/// Matches implicit casts whose destination type matches a given
/// matcher.
///
/// FIXME: Unit test this matcher
AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getType(), Finder, Builder);
}
/// Matches TagDecl object that are spelled with "struct."
///
/// Example matches S, but not C, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isStruct) {
return Node.isStruct();
}
/// Matches TagDecl object that are spelled with "union."
///
/// Example matches U, but not C, S or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isUnion) {
return Node.isUnion();
}
/// Matches TagDecl object that are spelled with "class."
///
/// Example matches C, but not S, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isClass) {
return Node.isClass();
}
/// Matches TagDecl object that are spelled with "enum."
///
/// Example matches E, but not C, S or U.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isEnum) {
return Node.isEnum();
}
/// Matches the true branch expression of a conditional operator.
///
/// Example 1 (conditional ternary operator): matches a
/// \code
/// condition ? a : b
/// \endcode
///
/// Example 2 (conditional binary operator): matches opaqueValueExpr(condition)
/// \code
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getTrueExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches the false branch expression of a conditional operator
/// (binary or ternary).
///
/// Example matches b
/// \code
/// condition ? a : b
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getFalseExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches if a declaration has a body attached.
///
/// Example matches A, va, fa
/// \code
/// class A {};
/// class B; // Doesn't match, as it has no body.
/// int va;
/// extern int vb; // Doesn't match, as it doesn't define the variable.
/// void fa() {}
/// void fb(); // Doesn't match, as it has no body.
/// @interface X
/// - (void)ma; // Doesn't match, interface is declaration.
/// @end
/// @implementation X
/// - (void)ma {}
/// @end
/// \endcode
///
/// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>,
/// Matcher<ObjCMethodDecl>
AST_POLYMORPHIC_MATCHER(isDefinition,
AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl,
ObjCMethodDecl,
FunctionDecl)) {
return Node.isThisDeclarationADefinition();
}
/// Matches if a function declaration is variadic.
///
/// Example matches f, but not g or h. The function i will not match, even when
/// compiled in C mode.
/// \code
/// void f(...);
/// void g(int);
/// template <typename... Ts> void h(Ts...);
/// void i();
/// \endcode
AST_MATCHER(FunctionDecl, isVariadic) {
return Node.isVariadic();
}
/// Matches the class declaration that the given method declaration
/// belongs to.
///
/// FIXME: Generalize this for other kinds of declarations.
/// FIXME: What other kind of declarations would we need to generalize
/// this to?
///
/// Example matches A() in the last line
/// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl(
/// ofClass(hasName("A"))))))
/// \code
/// class A {
/// public:
/// A();
/// };
/// A a = A();
/// \endcode
AST_MATCHER_P(CXXMethodDecl, ofClass,
internal::Matcher<CXXRecordDecl>, InnerMatcher) {
const CXXRecordDecl *Parent = Node.getParent();
return (Parent != nullptr &&
InnerMatcher.matches(*Parent, Finder, Builder));
}
/// Matches each method overridden by the given method. This matcher may
/// produce multiple matches.
///
/// Given
/// \code
/// class A { virtual void f(); };
/// class B : public A { void f(); };
/// class C : public B { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note
/// that B::f is not overridden by C::f).
///
/// The check can produce multiple matches in case of multiple inheritance, e.g.
/// \code
/// class A1 { virtual void f(); };
/// class A2 { virtual void f(); };
/// class C : public A1, public A2 { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and
/// once with "b" binding "A2::f" and "d" binding "C::f".
AST_MATCHER_P(CXXMethodDecl, forEachOverridden,
internal::Matcher<CXXMethodDecl>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *Overridden : Node.overridden_methods()) {
BoundNodesTreeBuilder OverriddenBuilder(*Builder);
const bool OverriddenMatched =
InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder);
if (OverriddenMatched) {
Matched = true;
Result.addMatch(OverriddenBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches declarations of virtual methods and C++ base specifers that specify
/// virtual inheritance.
///
/// Example:
/// \code
/// class A {
/// public:
/// virtual void x(); // matches x
/// };
/// \endcode
///
/// Example:
/// \code
/// class Base {};
/// class DirectlyDerived : virtual Base {}; // matches Base
/// class IndirectlyDerived : DirectlyDerived, Base {}; // matches Base
/// \endcode
///
/// Usable as: Matcher<CXXMethodDecl>, Matcher<CXXBaseSpecifier>
AST_POLYMORPHIC_MATCHER(isVirtual,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXMethodDecl,
CXXBaseSpecifier)) {
return Node.isVirtual();
}
/// Matches if the given method declaration has an explicit "virtual".
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// void x();
/// };
/// \endcode
/// matches A::x but not B::x
AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) {
return Node.isVirtualAsWritten();
}
/// Matches if the given method or class declaration is final.
///
/// Given:
/// \code
/// class A final {};
///
/// struct B {
/// virtual void f();
/// };
///
/// struct C : B {
/// void f() final;
/// };
/// \endcode
/// matches A and C::f, but not B, C, or B::f
AST_POLYMORPHIC_MATCHER(isFinal,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl,
CXXMethodDecl)) {
return Node.template hasAttr<FinalAttr>();
}
/// Matches if the given method declaration is pure.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x() = 0;
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isPure) {
return Node.isPure();
}
/// Matches if the given method declaration is const.
///
/// Given
/// \code
/// struct A {
/// void foo() const;
/// void bar();
/// };
/// \endcode
///
/// cxxMethodDecl(isConst()) matches A::foo() but not A::bar()
AST_MATCHER(CXXMethodDecl, isConst) {
return Node.isConst();
}
/// Matches if the given method declaration declares a copy assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not
/// the second one.
AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) {
return Node.isCopyAssignmentOperator();
}
/// Matches if the given method declaration declares a move assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not
/// the first one.
AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) {
return Node.isMoveAssignmentOperator();
}
/// Matches if the given method declaration overrides another method.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches B::x
AST_MATCHER(CXXMethodDecl, isOverride) {
return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>();
}
/// Matches method declarations that are user-provided.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &) = default; // #2
/// S(S &&) = delete; // #3
/// };
/// \endcode
/// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3.
AST_MATCHER(CXXMethodDecl, isUserProvided) {
return Node.isUserProvided();
}
/// Matches member expressions that are called with '->' as opposed
/// to '.'.
///
/// Member calls on the implicit this pointer match as called with '->'.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// template <class T> void f() { this->f<T>(); f<T>(); }
/// int a;
/// static int b;
/// };
/// template <class T>
/// class Z {
/// void x() { this->m; }
/// };
/// \endcode
/// memberExpr(isArrow())
/// matches this->x, x, y.x, a, this->b
/// cxxDependentScopeMemberExpr(isArrow())
/// matches this->m
/// unresolvedMemberExpr(isArrow())
/// matches this->f<T>, f<T>
AST_POLYMORPHIC_MATCHER(
isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr)) {
return Node.isArrow();
}
/// Matches QualType nodes that are of integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isInteger())))
/// matches "a(int)", "b(long)", but not "c(double)".
AST_MATCHER(QualType, isInteger) {
return Node->isIntegerType();
}
/// Matches QualType nodes that are of unsigned integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isUnsignedInteger())))
/// matches "b(unsigned long)", but not "a(int)" and "c(double)".
AST_MATCHER(QualType, isUnsignedInteger) {
return Node->isUnsignedIntegerType();
}
/// Matches QualType nodes that are of signed integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isSignedInteger())))
/// matches "a(int)", but not "b(unsigned long)" and "c(double)".
AST_MATCHER(QualType, isSignedInteger) {
return Node->isSignedIntegerType();
}
/// Matches QualType nodes that are of character type.
///
/// Given
/// \code
/// void a(char);
/// void b(wchar_t);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isAnyCharacter())))
/// matches "a(char)", "b(wchar_t)", but not "c(double)".
AST_MATCHER(QualType, isAnyCharacter) {
return Node->isAnyCharacterType();
}
/// Matches QualType nodes that are of any pointer type; this includes
/// the Objective-C object pointer type, which is different despite being
/// syntactically similar.
///
/// Given
/// \code
/// int *i = nullptr;
///
/// @interface Foo
/// @end
/// Foo *f;
///
/// int j;
/// \endcode
/// varDecl(hasType(isAnyPointer()))
/// matches "int *i" and "Foo *f", but not "int j".
AST_MATCHER(QualType, isAnyPointer) {
return Node->isAnyPointerType();
}
/// Matches QualType nodes that are const-qualified, i.e., that
/// include "top-level" const.
///
/// Given
/// \code
/// void a(int);
/// void b(int const);
/// void c(const int);
/// void d(const int*);
/// void e(int const) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isConstQualified())))
/// matches "void b(int const)", "void c(const int)" and
/// "void e(int const) {}". It does not match d as there
/// is no top-level const on the parameter type "const int *".
AST_MATCHER(QualType, isConstQualified) {
return Node.isConstQualified();
}
/// Matches QualType nodes that are volatile-qualified, i.e., that
/// include "top-level" volatile.
///
/// Given
/// \code
/// void a(int);
/// void b(int volatile);
/// void c(volatile int);
/// void d(volatile int*);
/// void e(int volatile) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isVolatileQualified())))
/// matches "void b(int volatile)", "void c(volatile int)" and
/// "void e(int volatile) {}". It does not match d as there
/// is no top-level volatile on the parameter type "volatile int *".
AST_MATCHER(QualType, isVolatileQualified) {
return Node.isVolatileQualified();
}
/// Matches QualType nodes that have local CV-qualifiers attached to
/// the node, not hidden within a typedef.
///
/// Given
/// \code
/// typedef const int const_int;
/// const_int i;
/// int *const j;
/// int *volatile k;
/// int m;
/// \endcode
/// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k.
/// \c i is const-qualified but the qualifier is not local.
AST_MATCHER(QualType, hasLocalQualifiers) {
return Node.hasLocalQualifiers();
}
/// Matches a member expression where the member is matched by a
/// given matcher.
///
/// Given
/// \code
/// struct { int first, second; } first, second;
/// int i(second.first);
/// int j(first.second);
/// \endcode
/// memberExpr(member(hasName("first")))
/// matches second.first
/// but not first.second (because the member name there is "second").
AST_MATCHER_P(MemberExpr, member,
internal::Matcher<ValueDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder);
}
/// Matches a member expression where the object expression is matched by a
/// given matcher. Implicit object expressions are included; that is, it matches
/// use of implicit `this`.
///
/// Given
/// \code
/// struct X {
/// int m;
/// int f(X x) { x.m; return m; }
/// };
/// \endcode
/// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m`, but not `m`; however,
/// memberExpr(hasObjectExpression(hasType(pointsTo(
// cxxRecordDecl(hasName("X"))))))
/// matches `m` (aka. `this->m`), but not `x.m`.
AST_POLYMORPHIC_MATCHER_P(
hasObjectExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr),
internal::Matcher<Expr>, InnerMatcher) {
if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
return InnerMatcher.matches(*Node.getBase(), Finder, Builder);
}
/// Matches any using shadow declaration.
///
/// Given
/// \code
/// namespace X { void b(); }
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasName("b"))))
/// matches \code using X::b \endcode
AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(),
Node.shadow_end(), Finder, Builder);
}
/// Matches a using shadow declaration where the target declaration is
/// matched by the given matcher.
///
/// Given
/// \code
/// namespace X { int a; void b(); }
/// using X::a;
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl())))
/// matches \code using X::b \endcode
/// but not \code using X::a \endcode
AST_MATCHER_P(UsingShadowDecl, hasTargetDecl,
internal::Matcher<NamedDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder);
}
/// Matches template instantiations of function, class, or static
/// member variable template instantiations.
///
/// Given
/// \code
/// template <typename T> class X {}; class A {}; X<A> x;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; template class X<A>;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; extern template class X<A>;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// matches the template instantiation of X<A>.
///
/// But given
/// \code
/// template <typename T> class X {}; class A {};
/// template <> class X<A> {}; X<A> x;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// does not match, as X<A> is an explicit template specialization.
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isTemplateInstantiation,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDefinition ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDeclaration);
}
/// Matches declarations that are template instantiations or are inside
/// template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { T i; }
/// A(0);
/// A(0U);
/// \endcode
/// functionDecl(isInstantiated())
/// matches 'A(int) {...};' and 'A(unsigned) {...}'.
AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) {
auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())));
return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation)));
}
/// Matches statements inside of a template instantiation.
///
/// Given
/// \code
/// int j;
/// template<typename T> void A(T t) { T i; j += 42;}
/// A(0);
/// A(0U);
/// \endcode
/// declStmt(isInTemplateInstantiation())
/// matches 'int i;' and 'unsigned i'.
/// unless(stmt(isInTemplateInstantiation()))
/// will NOT match j += 42; as it's shared between the template definition and
/// instantiation.
AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) {
return stmt(
hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())))));
}
/// Matches explicit template specializations of function, class, or
/// static member variable template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { }
/// template<> void A(int N) { }
/// \endcode
/// functionDecl(isExplicitTemplateSpecialization())
/// matches the specialization A<int>().
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization);
}
/// Matches \c TypeLocs for which the given inner
/// QualType-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc,
internal::Matcher<QualType>, InnerMatcher, 0) {
return internal::BindableMatcher<TypeLoc>(
new internal::TypeLocTypeMatcher(InnerMatcher));
}
/// Matches type \c bool.
///
/// Given
/// \code
/// struct S { bool func(); };
/// \endcode
/// functionDecl(returns(booleanType()))
/// matches "bool func();"
AST_MATCHER(Type, booleanType) {
return Node.isBooleanType();
}
/// Matches type \c void.
///
/// Given
/// \code
/// struct S { void func(); };
/// \endcode
/// functionDecl(returns(voidType()))
/// matches "void func();"
AST_MATCHER(Type, voidType) {
return Node.isVoidType();
}
template <typename NodeType>
using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>;
/// Matches builtin Types.
///
/// Given
/// \code
/// struct A {};
/// A a;
/// int b;
/// float c;
/// bool d;
/// \endcode
/// builtinType()
/// matches "int b", "float c" and "bool d"
extern const AstTypeMatcher<BuiltinType> builtinType;
/// Matches all kinds of arrays.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[4];
/// void f() { int c[a[0]]; }
/// \endcode
/// arrayType()
/// matches "int a[]", "int b[4]" and "int c[a[0]]";
extern const AstTypeMatcher<ArrayType> arrayType;
/// Matches C99 complex types.
///
/// Given
/// \code
/// _Complex float f;
/// \endcode
/// complexType()
/// matches "_Complex float f"
extern const AstTypeMatcher<ComplexType> complexType;
/// Matches any real floating-point type (float, double, long double).
///
/// Given
/// \code
/// int i;
/// float f;
/// \endcode
/// realFloatingPointType()
/// matches "float f" but not "int i"
AST_MATCHER(Type, realFloatingPointType) {
return Node.isRealFloatingType();
}
/// Matches arrays and C99 complex types that have a specific element
/// type.
///
/// Given
/// \code
/// struct A {};
/// A a[7];
/// int b[7];
/// \endcode
/// arrayType(hasElementType(builtinType()))
/// matches "int b[7]"
///
/// Usable as: Matcher<ArrayType>, Matcher<ComplexType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement,
AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType,
ComplexType));
/// Matches C arrays with a specified constant size.
///
/// Given
/// \code
/// void() {
/// int a[2];
/// int b[] = { 2, 3 };
/// int c[b[0]];
/// }
/// \endcode
/// constantArrayType()
/// matches "int a[2]"
extern const AstTypeMatcher<ConstantArrayType> constantArrayType;
/// Matches nodes that have the specified size.
///
/// Given
/// \code
/// int a[42];
/// int b[2 * 21];
/// int c[41], d[43];
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// char *w = "a";
/// \endcode
/// constantArrayType(hasSize(42))
/// matches "int a[42]" and "int b[2 * 21]"
/// stringLiteral(hasSize(4))
/// matches "abcd", L"abcd"
AST_POLYMORPHIC_MATCHER_P(hasSize,
AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType,
StringLiteral),
unsigned, N) {
return internal::HasSizeMatcher<NodeType>::hasSize(Node, N);
}
/// Matches C++ arrays whose size is a value-dependent expression.
///
/// Given
/// \code
/// template<typename T, int Size>
/// class array {
/// T data[Size];
/// };
/// \endcode
/// dependentSizedArrayType
/// matches "T data[Size]"
extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType;
/// Matches C arrays with unspecified size.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[42];
/// void f(int c[]) { int d[a[0]]; };
/// \endcode
/// incompleteArrayType()
/// matches "int a[]" and "int c[]"
extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType;
/// Matches C arrays with a specified size that is not an
/// integer-constant-expression.
///
/// Given
/// \code
/// void f() {
/// int a[] = { 2, 3 }
/// int b[42];
/// int c[a[0]];
/// }
/// \endcode
/// variableArrayType()
/// matches "int c[a[0]]"
extern const AstTypeMatcher<VariableArrayType> variableArrayType;
/// Matches \c VariableArrayType nodes that have a specific size
/// expression.
///
/// Given
/// \code
/// void f(int b) {
/// int a[b];
/// }
/// \endcode
/// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to(
/// varDecl(hasName("b")))))))
/// matches "int a[b]"
AST_MATCHER_P(VariableArrayType, hasSizeExpr,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder);
}
/// Matches atomic types.
///
/// Given
/// \code
/// _Atomic(int) i;
/// \endcode
/// atomicType()
/// matches "_Atomic(int) i"
extern const AstTypeMatcher<AtomicType> atomicType;
/// Matches atomic types with a specific value type.
///
/// Given
/// \code
/// _Atomic(int) i;
/// _Atomic(float) f;
/// \endcode
/// atomicType(hasValueType(isInteger()))
/// matches "_Atomic(int) i"
///
/// Usable as: Matcher<AtomicType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue,
AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType));
/// Matches types nodes representing C++11 auto types.
///
/// Given:
/// \code
/// auto n = 4;
/// int v[] = { 2, 3 }
/// for (auto i : v) { }
/// \endcode
/// autoType()
/// matches "auto n" and "auto i"
extern const AstTypeMatcher<AutoType> autoType;
/// Matches types nodes representing C++11 decltype(<expr>) types.
///
/// Given:
/// \code
/// short i = 1;
/// int j = 42;
/// decltype(i + j) result = i + j;
/// \endcode
/// decltypeType()
/// matches "decltype(i + j)"
extern const AstTypeMatcher<DecltypeType> decltypeType;
/// Matches \c AutoType nodes where the deduced type is a specific type.
///
/// Note: There is no \c TypeLoc for the deduced type and thus no
/// \c getDeducedLoc() matcher.
///
/// Given
/// \code
/// auto a = 1;
/// auto b = 2.0;
/// \endcode
/// autoType(hasDeducedType(isInteger()))
/// matches "auto a"
///
/// Usable as: Matcher<AutoType>
AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType));
/// Matches \c DecltypeType nodes to find out the underlying type.
///
/// Given
/// \code
/// decltype(1) a = 1;
/// decltype(2.0) b = 2.0;
/// \endcode
/// decltypeType(hasUnderlyingType(isInteger()))
/// matches the type of "a"
///
/// Usable as: Matcher<DecltypeType>
AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType,
AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType));
/// Matches \c FunctionType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionType()
/// matches "int (*f)(int)" and the type of "g".
extern const AstTypeMatcher<FunctionType> functionType;
/// Matches \c FunctionProtoType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionProtoType()
/// matches "int (*f)(int)" and the type of "g" in C++ mode.
/// In C mode, "g" is not matched because it does not contain a prototype.
extern const AstTypeMatcher<FunctionProtoType> functionProtoType;
/// Matches \c ParenType nodes.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int *array_of_ptrs[4];
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not
/// \c array_of_ptrs.
extern const AstTypeMatcher<ParenType> parenType;
/// Matches \c ParenType nodes where the inner type is a specific type.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int (*ptr_to_func)(int);
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches
/// \c ptr_to_func but not \c ptr_to_array.
///
/// Usable as: Matcher<ParenType>
AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType,
AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType));
/// Matches block pointer types, i.e. types syntactically represented as
/// "void (^)(int)".
///
/// The \c pointee is always required to be a \c FunctionType.
extern const AstTypeMatcher<BlockPointerType> blockPointerType;
/// Matches member pointer types.
/// Given
/// \code
/// struct A { int i; }
/// A::* ptr = A::i;
/// \endcode
/// memberPointerType()
/// matches "A::* ptr"
extern const AstTypeMatcher<MemberPointerType> memberPointerType;
/// Matches pointer types, but does not match Objective-C object pointer
/// types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int c = 5;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "int *a", but does not match "Foo *f".
extern const AstTypeMatcher<PointerType> pointerType;
/// Matches an Objective-C object pointer type, which is different from
/// a pointer type, despite being syntactically similar.
///
/// Given
/// \code
/// int *a;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "Foo *f", but does not match "int *a".
extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType;
/// Matches both lvalue and rvalue reference types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f.
extern const AstTypeMatcher<ReferenceType> referenceType;
/// Matches lvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is
/// matched since the type is deduced as int& by reference collapsing rules.
extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType;
/// Matches rvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not
/// matched as it is deduced to int& by reference collapsing rules.
extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType;
/// Narrows PointerType (and similar) matchers to those where the
/// \c pointee matches a given matcher.
///
/// Given
/// \code
/// int *a;
/// int const *b;
/// float const *f;
/// \endcode
/// pointerType(pointee(isConstQualified(), isInteger()))
/// matches "int const *b"
///
/// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>,
/// Matcher<PointerType>, Matcher<ReferenceType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(
pointee, getPointee,
AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType,
PointerType, ReferenceType));
/// Matches typedef types.
///
/// Given
/// \code
/// typedef int X;
/// \endcode
/// typedefType()
/// matches "typedef int X"
extern const AstTypeMatcher<TypedefType> typedefType;
/// Matches enum types.
///
/// Given
/// \code
/// enum C { Green };
/// enum class S { Red };
///
/// C c;
/// S s;
/// \endcode
//
/// \c enumType() matches the type of the variable declarations of both \c c and
/// \c s.
extern const AstTypeMatcher<EnumType> enumType;
/// Matches template specialization types.
///
/// Given
/// \code
/// template <typename T>
/// class C { };
///
/// template class C<int>; // A
/// C<char> var; // B
/// \endcode
///
/// \c templateSpecializationType() matches the type of the explicit
/// instantiation in \c A and the type of the variable declaration in \c B.
extern const AstTypeMatcher<TemplateSpecializationType>
templateSpecializationType;
/// Matches C++17 deduced template specialization types, e.g. deduced class
/// template types.
///
/// Given
/// \code
/// template <typename T>
/// class C { public: C(T); };
///
/// C c(123);
/// \endcode
/// \c deducedTemplateSpecializationType() matches the type in the declaration
/// of the variable \c c.
extern const AstTypeMatcher<DeducedTemplateSpecializationType>
deducedTemplateSpecializationType;
/// Matches types nodes representing unary type transformations.
///
/// Given:
/// \code
/// typedef __underlying_type(T) type;
/// \endcode
/// unaryTransformType()
/// matches "__underlying_type(T)"
extern const AstTypeMatcher<UnaryTransformType> unaryTransformType;
/// Matches record types (e.g. structs, classes).
///
/// Given
/// \code
/// class C {};
/// struct S {};
///
/// C c;
/// S s;
/// \endcode
///
/// \c recordType() matches the type of the variable declarations of both \c c
/// and \c s.
extern const AstTypeMatcher<RecordType> recordType;
/// Matches tag types (record and enum types).
///
/// Given
/// \code
/// enum E {};
/// class C {};
///
/// E e;
/// C c;
/// \endcode
///
/// \c tagType() matches the type of the variable declarations of both \c e
/// and \c c.
extern const AstTypeMatcher<TagType> tagType;
/// Matches types specified with an elaborated type keyword or with a
/// qualified name.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// class C {};
///
/// class C c;
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType() matches the type of the variable declarations of both
/// \c c and \c d.
extern const AstTypeMatcher<ElaboratedType> elaboratedType;
/// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier,
/// matches \c InnerMatcher if the qualifier exists.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N"))))
/// matches the type of the variable declaration of \c d.
AST_MATCHER_P(ElaboratedType, hasQualifier,
internal::Matcher<NestedNameSpecifier>, InnerMatcher) {
if (const NestedNameSpecifier *Qualifier = Node.getQualifier())
return InnerMatcher.matches(*Qualifier, Finder, Builder);
return false;
}
/// Matches ElaboratedTypes whose named type matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(namesType(recordType(
/// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable
/// declaration of \c d.
AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>,
InnerMatcher) {
return InnerMatcher.matches(Node.getNamedType(), Finder, Builder);
}
/// Matches types that represent the result of substituting a type for a
/// template type parameter.
///
/// Given
/// \code
/// template <typename T>
/// void F(T t) {
/// int i = 1 + t;
/// }
/// \endcode
///
/// \c substTemplateTypeParmType() matches the type of 't' but not '1'
extern const AstTypeMatcher<SubstTemplateTypeParmType>
substTemplateTypeParmType;
/// Matches template type parameter substitutions that have a replacement
/// type that matches the provided matcher.
///
/// Given
/// \code
/// template <typename T>
/// double F(T t);
/// int i;
/// double j = F(i);
/// \endcode
///
/// \c substTemplateTypeParmType(hasReplacementType(type())) matches int
AST_TYPE_TRAVERSE_MATCHER(
hasReplacementType, getReplacementType,
AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType));
/// Matches template type parameter types.
///
/// Example matches T, but not int.
/// (matcher = templateTypeParmType())
/// \code
/// template <typename T> void f(int i);
/// \endcode
extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType;
/// Matches injected class name types.
///
/// Example matches S s, but not S<T> s.
/// (matcher = parmVarDecl(hasType(injectedClassNameType())))
/// \code
/// template <typename T> struct S {
/// void f(S s);
/// void g(S<T> s);
/// };
/// \endcode
extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType;
/// Matches decayed type
/// Example matches i[] in declaration of f.
/// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType())))))
/// Example matches i[1].
/// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType())))))
/// \code
/// void f(int i[]) {
/// i[1] = 0;
/// }
/// \endcode
extern const AstTypeMatcher<DecayedType> decayedType;
/// Matches the decayed type, whoes decayed type matches \c InnerMatcher
AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>,
InnerType) {
return InnerType.matches(Node.getDecayedType(), Finder, Builder);
}
/// Matches declarations whose declaration context, interpreted as a
/// Decl, matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// \endcode
///
/// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the
/// declaration of \c class \c D.
AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) {
const DeclContext *DC = Node.getDeclContext();
if (!DC) return false;
return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder);
}
/// Matches nested name specifiers.
///
/// Given
/// \code
/// namespace ns {
/// struct A { static void f(); };
/// void A::f() {}
/// void g() { A::f(); }
/// }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier()
/// matches "ns::" and both "A::"
extern const internal::VariadicAllOfMatcher<NestedNameSpecifier>
nestedNameSpecifier;
/// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc.
extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc>
nestedNameSpecifierLoc;
/// Matches \c NestedNameSpecifierLocs for which the given inner
/// NestedNameSpecifier-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(
internal::BindableMatcher<NestedNameSpecifierLoc>, loc,
internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) {
return internal::BindableMatcher<NestedNameSpecifierLoc>(
new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>(
InnerMatcher));
}
/// Matches nested name specifiers that specify a type matching the
/// given \c QualType matcher without qualifiers.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(specifiesType(
/// hasDeclaration(cxxRecordDecl(hasName("A")))
/// ))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifier, specifiesType,
internal::Matcher<QualType>, InnerMatcher) {
if (!Node.getAsType())
return false;
return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder);
}
/// Matches nested name specifier locs that specify a type matching the
/// given \c TypeLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type(
/// hasDeclaration(cxxRecordDecl(hasName("A")))))))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc,
internal::Matcher<TypeLoc>, InnerMatcher) {
return Node && Node.getNestedNameSpecifier()->getAsType() &&
InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifier.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix,
internal::Matcher<NestedNameSpecifier>, InnerMatcher,
0) {
const NestedNameSpecifier *NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(*NextNode, Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifierLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A")))))
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix,
internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher,
1) {
NestedNameSpecifierLoc NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(NextNode, Finder, Builder);
}
/// Matches nested name specifiers that specify a namespace matching the
/// given namespace matcher.
///
/// Given
/// \code
/// namespace ns { struct A {}; }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier(specifiesNamespace(hasName("ns")))
/// matches "ns::"
AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace,
internal::Matcher<NamespaceDecl>, InnerMatcher) {
if (!Node.getAsNamespace())
return false;
return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder);
}
/// Overloads for the \c equalsNode matcher.
/// FIXME: Implement for other node types.
/// @{
/// Matches if a node equals another node.
///
/// \c Decl has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Stmt has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Type has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) {
return &Node == Other;
}
/// @}
/// Matches each case or default statement belonging to the given switch
/// statement. This matcher may produce multiple matches.
///
/// Given
/// \code
/// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } }
/// \endcode
/// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s")
/// matches four times, with "c" binding each of "case 1:", "case 2:",
/// "case 3:" and "case 4:", and "s" respectively binding "switch (1)",
/// "switch (1)", "switch (2)" and "switch (2)".
AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>,
InnerMatcher) {
BoundNodesTreeBuilder Result;
// FIXME: getSwitchCaseList() does not necessarily guarantee a stable
// iteration order. We should use the more general iterating matchers once
// they are capable of expressing this matcher (for example, it should ignore
// case statements belonging to nested switch statements).
bool Matched = false;
for (const SwitchCase *SC = Node.getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
BoundNodesTreeBuilder CaseBuilder(*Builder);
bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder);
if (CaseMatched) {
Matched = true;
Result.addMatch(CaseBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches each constructor initializer in a constructor definition.
///
/// Given
/// \code
/// class A { A() : i(42), j(42) {} int i; int j; };
/// \endcode
/// cxxConstructorDecl(forEachConstructorInitializer(
/// forField(decl().bind("x"))
/// ))
/// will trigger two matches, binding for 'i' and 'j' respectively.
AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *I : Node.inits()) {
BoundNodesTreeBuilder InitBuilder(*Builder);
if (InnerMatcher.matches(*I, Finder, &InitBuilder)) {
Matched = true;
Result.addMatch(InitBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches constructor declarations that are copy constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3.
AST_MATCHER(CXXConstructorDecl, isCopyConstructor) {
return Node.isCopyConstructor();
}
/// Matches constructor declarations that are move constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2.
AST_MATCHER(CXXConstructorDecl, isMoveConstructor) {
return Node.isMoveConstructor();
}
/// Matches constructor declarations that are default constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3.
AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) {
return Node.isDefaultConstructor();
}
/// Matches constructors that delegate to another constructor.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(int) {} // #2
/// S(S &&) : S() {} // #3
/// };
/// S::S() : S(0) {} // #4
/// \endcode
/// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not
/// #1 or #2.
AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) {
return Node.isDelegatingConstructor();
}
/// Matches constructor, conversion function, and deduction guide declarations
/// that have an explicit specifier if this explicit specifier is resolved to
/// true.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9.
/// cxxConversionDecl(isExplicit()) will match #4, but not #3.
/// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5.
AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES(
CXXConstructorDecl, CXXConversionDecl,
CXXDeductionGuideDecl)) {
return Node.isExplicit();
}
/// Matches the expression in an explicit specifier if present in the given
/// declaration.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2.
/// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4.
/// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6.
AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>,
InnerMatcher) {
ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node);
if (!ES.getExpr())
return false;
return InnerMatcher.matches(*ES.getExpr(), Finder, Builder);
}
/// Matches function and namespace declarations that are marked with
/// the inline keyword.
///
/// Given
/// \code
/// inline void f();
/// void g();
/// namespace n {
/// inline namespace m {}
/// }
/// \endcode
/// functionDecl(isInline()) will match ::f().
/// namespaceDecl(isInline()) will match n::m.
AST_POLYMORPHIC_MATCHER(isInline,
AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl,
FunctionDecl)) {
// This is required because the spelling of the function used to determine
// whether inline is specified or not differs between the polymorphic types.
if (const auto *FD = dyn_cast<FunctionDecl>(&Node))
return FD->isInlineSpecified();
else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node))
return NSD->isInline();
llvm_unreachable("Not a valid polymorphic type");
}
/// Matches anonymous namespace declarations.
///
/// Given
/// \code
/// namespace n {
/// namespace {} // #1
/// }
/// \endcode
/// namespaceDecl(isAnonymous()) will match #1 but not ::n.
AST_MATCHER(NamespaceDecl, isAnonymous) {
return Node.isAnonymousNamespace();
}
/// Matches declarations in the namespace `std`, but not in nested namespaces.
///
/// Given
/// \code
/// class vector {};
/// namespace foo {
/// class vector {};
/// namespace std {
/// class vector {};
/// }
/// }
/// namespace std {
/// inline namespace __1 {
/// class vector {}; // #1
/// namespace experimental {
/// class vector {};
/// }
/// }
/// }
/// \endcode
/// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1.
AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); }
/// If the given case statement does not use the GNU case range
/// extension, matches the constant given in the statement.
///
/// Given
/// \code
/// switch (1) { case 1: case 1+1: case 3 ... 4: ; }
/// \endcode
/// caseStmt(hasCaseConstant(integerLiteral()))
/// matches "case 1:"
AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>,
InnerMatcher) {
if (Node.getRHS())
return false;
return InnerMatcher.matches(*Node.getLHS(), Finder, Builder);
}
/// Matches declaration that has a given attribute.
///
/// Given
/// \code
/// __attribute__((device)) void f() { ... }
/// \endcode
/// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of
/// f. If the matcher is used from clang-query, attr::Kind parameter should be
/// passed as a quoted string. e.g., hasAttr("attr::CUDADevice").
AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) {
for (const auto *Attr : Node.attrs()) {
if (Attr->getKind() == AttrKind)
return true;
}
return false;
}
/// Matches the return value expression of a return statement
///
/// Given
/// \code
/// return a + b;
/// \endcode
/// hasReturnValue(binaryOperator())
/// matches 'return a + b'
/// with binaryOperator()
/// matching 'a + b'
AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>,
InnerMatcher) {
if (const auto *RetValue = Node.getRetValue())
return InnerMatcher.matches(*RetValue, Finder, Builder);
return false;
}
/// Matches CUDA kernel call expression.
///
/// Example matches,
/// \code
/// kernel<<<i,j>>>();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr>
cudaKernelCallExpr;
/// Matches expressions that resolve to a null pointer constant, such as
/// GNU's __null, C++11's nullptr, or C's NULL macro.
///
/// Given:
/// \code
/// void *v1 = NULL;
/// void *v2 = nullptr;
/// void *v3 = __null; // GNU extension
/// char *cp = (char *)0;
/// int *ip = 0;
/// int i = 0;
/// \endcode
/// expr(nullPointerConstant())
/// matches the initializer for v1, v2, v3, cp, and ip. Does not match the
/// initializer for i.
AST_MATCHER(Expr, nullPointerConstant) {
return Node.isNullPointerConstant(Finder->getASTContext(),
Expr::NPC_ValueDependentIsNull);
}
/// Matches declaration of the function the statement belongs to
///
/// Given:
/// \code
/// F& operator=(const F& o) {
/// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; });
/// return *this;
/// }
/// \endcode
/// returnStmt(forFunction(hasName("operator=")))
/// matches 'return *this'
/// but does not match 'return v > 0'
AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>,
InnerMatcher) {
const auto &Parents = Finder->getASTContext().getParents(Node);
llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end());
while(!Stack.empty()) {
const auto &CurNode = Stack.back();
Stack.pop_back();
if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) {
if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) {
return true;
}
} else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) {
if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(),
Finder, Builder)) {
return true;
}
} else {
for(const auto &Parent: Finder->getASTContext().getParents(CurNode))
Stack.push_back(Parent);
}
}
return false;
}
/// Matches a declaration that has external formal linkage.
///
/// Example matches only z (matcher = varDecl(hasExternalFormalLinkage()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
///
/// Example matches f() because it has external formal linkage despite being
/// unique to the translation unit as though it has internal likage
/// (matcher = functionDecl(hasExternalFormalLinkage()))
///
/// \code
/// namespace {
/// void f() {}
/// }
/// \endcode
AST_MATCHER(NamedDecl, hasExternalFormalLinkage) {
return Node.hasExternalFormalLinkage();
}
/// Matches a declaration that has default arguments.
///
/// Example matches y (matcher = parmVarDecl(hasDefaultArgument()))
/// \code
/// void x(int val) {}
/// void y(int val = 0) {}
/// \endcode
///
/// Deprecated. Use hasInitializer() instead to be able to
/// match on the contents of the default argument. For example:
///
/// \code
/// void x(int val = 7) {}
/// void y(int val = 42) {}
/// \endcode
/// parmVarDecl(hasInitializer(integerLiteral(equals(42))))
/// matches the parameter of y
///
/// A matcher such as
/// parmVarDecl(hasInitializer(anything()))
/// is equivalent to parmVarDecl(hasDefaultArgument()).
AST_MATCHER(ParmVarDecl, hasDefaultArgument) {
return Node.hasDefaultArg();
}
/// Matches array new expressions.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(isArray())
/// matches the expression 'new MyClass[10]'.
AST_MATCHER(CXXNewExpr, isArray) {
return Node.isArray();
}
/// Matches placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage, 16) MyClass();
/// \endcode
/// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16))))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index,
internal::Matcher<Expr>, InnerMatcher) {
return Node.getNumPlacementArgs() > Index &&
InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder);
}
/// Matches any placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage) MyClass();
/// \endcode
/// cxxNewExpr(hasAnyPlacementArg(anything()))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>,
InnerMatcher) {
return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) {
return InnerMatcher.matches(*Arg, Finder, Builder);
});
}
/// Matches array new expressions with a given array size.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(hasArraySize(integerLiteral(equals(10))))
/// matches the expression 'new MyClass[10]'.
AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) {
return Node.isArray() && *Node.getArraySize() &&
InnerMatcher.matches(**Node.getArraySize(), Finder, Builder);
}
/// Matches a class declaration that is defined.
///
/// Example matches x (matcher = cxxRecordDecl(hasDefinition()))
/// \code
/// class x {};
/// class y;
/// \endcode
AST_MATCHER(CXXRecordDecl, hasDefinition) {
return Node.hasDefinition();
}
/// Matches C++11 scoped enum declaration.
///
/// Example matches Y (matcher = enumDecl(isScoped()))
/// \code
/// enum X {};
/// enum class Y {};
/// \endcode
AST_MATCHER(EnumDecl, isScoped) {
return Node.isScoped();
}
/// Matches a function declared with a trailing return type.
///
/// Example matches Y (matcher = functionDecl(hasTrailingReturn()))
/// \code
/// int X() {}
/// auto Y() -> int {}
/// \endcode
AST_MATCHER(FunctionDecl, hasTrailingReturn) {
if (const auto *F = Node.getType()->getAs<FunctionProtoType>())
return F->hasTrailingReturn();
return false;
}
/// Matches expressions that match InnerMatcher that are possibly wrapped in an
/// elidable constructor and other corresponding bookkeeping nodes.
///
/// In C++17, elidable copy constructors are no longer being generated in the
/// AST as it is not permitted by the standard. They are, however, part of the
/// AST in C++14 and earlier. So, a matcher must abstract over these differences
/// to work in all language modes. This matcher skips elidable constructor-call
/// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and
/// various implicit nodes inside the constructor calls, all of which will not
/// appear in the C++17 AST.
///
/// Given
///
/// \code
/// struct H {};
/// H G();
/// void f() {
/// H D = G();
/// }
/// \endcode
///
/// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))``
/// matches ``H D = G()`` in C++11 through C++17 (and beyond).
AST_MATCHER_P(Expr, ignoringElidableConstructorCall,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
// E tracks the node that we are examining.
const Expr *E = &Node;
// If present, remove an outer `ExprWithCleanups` corresponding to the
// underlying `CXXConstructExpr`. This check won't cover all cases of added
// `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the
// EWC is placed on the outermost node of the expression, which this may not
// be), but, it still improves the coverage of this matcher.
if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node))
E = CleanupsExpr->getSubExpr();
if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) {
if (CtorExpr->isElidable()) {
if (const auto *MaterializeTemp =
dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) {
return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder,
Builder);
}
}
}
return InnerMatcher.matches(Node, Finder, Builder);
}
//----------------------------------------------------------------------------//
// OpenMP handling.
//----------------------------------------------------------------------------//
/// Matches any ``#pragma omp`` executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective()`` matches ``omp parallel``,
/// ``omp parallel default(none)`` and ``omp taskyield``.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective>
ompExecutableDirective;
/// Matches standalone OpenMP directives,
/// i.e., directives that can't have a structured block.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective(isStandaloneDirective()))`` matches
/// ``omp taskyield``.
AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) {
return Node.isStandaloneDirective();
}
/// Matches the structured-block of the OpenMP executable directive
///
/// Prerequisite: the executable directive must not be standalone directive.
/// If it is, it will never match.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// ;
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;``
AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock,
internal::Matcher<Stmt>, InnerMatcher) {
if (Node.isStandaloneDirective())
return false; // Standalone directives have no structured blocks.
return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder);
}
/// Matches any clause in an OpenMP directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// \endcode
///
/// ``ompExecutableDirective(hasAnyClause(anything()))`` matches
/// ``omp parallel default(none)``.
AST_MATCHER_P(OMPExecutableDirective, hasAnyClause,
internal::Matcher<OMPClause>, InnerMatcher) {
ArrayRef<OMPClause *> Clauses = Node.clauses();
return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(),
Clauses.end(), Finder, Builder);
}
/// Matches OpenMP ``default`` clause.
///
/// Given
///
/// \code
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// #pragma omp parallel
/// \endcode
///
/// ``ompDefaultClause()`` matches ``default(none)``, ``default(shared)``, and
/// ``default(firstprivate)``
extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause>
ompDefaultClause;
/// Matches if the OpenMP ``default`` clause has ``none`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``.
AST_MATCHER(OMPDefaultClause, isNoneKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none;
}
/// Matches if the OpenMP ``default`` clause has ``shared`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``.
AST_MATCHER(OMPDefaultClause, isSharedKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared;
}
/// Matches if the OpenMP ``default`` clause has ``firstprivate`` kind
/// specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isFirstPrivateKind())`` matches only
/// ``default(firstprivate)``.
AST_MATCHER(OMPDefaultClause, isFirstPrivateKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_firstprivate;
}
/// Matches if the OpenMP directive is allowed to contain the specified OpenMP
/// clause kind.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel for
/// #pragma omp for
/// \endcode
///
/// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches
/// ``omp parallel`` and ``omp parallel for``.
///
/// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter
/// should be passed as a quoted string. e.g.,
/// ``isAllowedToContainClauseKind("OMPC_default").``
AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind,
OpenMPClauseKind, CKind) {
return llvm::omp::isAllowedClauseForDirective(
Node.getDirectiveKind(), CKind,
Finder->getASTContext().getLangOpts().OpenMP);
}
//----------------------------------------------------------------------------//
// End OpenMP handling.
//----------------------------------------------------------------------------//
} // namespace ast_matchers
} // namespace clang
#endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
|
collective.c | /*******************************************************************************
Collective Matrix Factorization
-------------------------------
This is a module for multi-way factorization of sparse and dense matrices
intended to be used for recommender system with explicit feedback data plus
side information about users and/or items.
The reference papers are:
(a) Cortes, David.
"Cold-start recommendations in Collective Matrix Factorization."
arXiv preprint arXiv:1809.00366 (2018).
(b) Singh, Ajit P., and Geoffrey J. Gordon.
"Relational learning via collective matrix factorization."
Proceedings of the 14th ACM SIGKDD international conference on
Knowledge discovery and data mining. 2008.
(c) Hu, Yifan, Yehuda Koren, and Chris Volinsky.
"Collaborative filtering for implicit feedback datasets."
2008 Eighth IEEE International Conference on Data Mining.
Ieee, 2008.
(d) Takacs, Gabor, Istvan Pilaszy, and Domonkos Tikk.
"Applications of the conjugate gradient method for
implicit feedback collaborative filtering."
Proceedings of the fifth ACM conference on
Recommender systems. 2011.
(e) Rendle, Steffen, Li Zhang, and Yehuda Koren.
"On the difficulty of evaluating baselines:
A study on recommender systems."
arXiv preprint arXiv:1905.01395 (2019).
(f) Franc, Vojtech, Vaclav Hlavac, and Mirko Navara.
"Sequential coordinate-wise algorithm for the
non-negative least squares problem."
International Conference on Computer Analysis of Images
and Patterns. Springer, Berlin, Heidelberg, 2005.
(g) Zhou, Yunhong, et al.
"Large-scale parallel collaborative filtering for
the netflix prize."
International conference on algorithmic applications in management.
Springer, Berlin, Heidelberg, 2008.
For information about the models offered here and how they are fit to
the data, see the files 'collective.c' and 'offsets.c'.
Written for C99 standard and OpenMP version 2.0 or higher, and aimed to be
used either as a stand-alone program, or wrapped into scripting languages
such as Python and R.
<https://www.github.com/david-cortes/cmfrec>
MIT License:
Copyright (c) 2020-2021 David Cortes
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
*******************************************************************************/
#include "cmfrec.h"
/*******************************************************************************
Collective Model
----------------
This is a generalization of the model described in
Singh, Ajit P., and Geoffrey J. Gordon.
"Relational learning via collective matrix factorization."
Proceedings of the 14th ACM SIGKDD international conference on
Knowledge discovery and data mining. 2008.
=========================== COMMON PART ===========================
A note about the mathematical notation used through these comments:
- Variables with capital letters denote 2D matrices - e.g. 'X'.
- Smaller-letter versions of a 2D matrix denote a single row of it.
- X[m,n] denotes that matrix 'X' has dimensions (m,n)
- x[n] denotes that vector 'x' has dimension 'm'.
- X(:m,:n) denotes taking the first 'm' rows and 'n' columns of 'X'.
- X(m:p,n:q) denotes thaking the rows from 'm' through 'p' and columns
from 'n' to 'q'.
- t(X) denotes the transpose of 'X', inv(X) the inverse.
- ||X|| denotes the L2 norm of X (sum of squared entries).
- '.' denotes element-wise multiplication
- sigm(X) denotes a sigmoid elementwise transformation: 1 / (1 + exp(-X))
- Matrices followed by a small 'b' denote binary (0/1) entries only.
- Matrices followed by small 'u', 'i', 'm', 's', denote that only the
columns corresponding to certain components are taken.
- Ae denotes an extended block matrix.
- [Au, As, Ak] denotes a block matrix comprised by the column union of
the above 3 matrices.
- [[A1, A1, A3], denotes a block matrix with sub-blocks arranged by rows
[A4, A5, A6]] and by columns like that.
General idea is to factorize a sparse input matrix X[m,n] into the product
of two lower dimensional matrices A[m,k] and B[n,k], in such a way that the
squared error is minimized on the non-missing entries in X, given by binary
matrix M[m,n], i.e.
min || M . (X - A*t(B)) ||^2
As some small improvements, the matrix 'X' is centered by substracting the
mean from it, and additionally subtracting row and column biases, which
are model parameters too, while imposing a regularization penalty on the
magnitude of the parameters (given by L2 norm):
min ||M . (X - A*t(B) - mu[1] - b1[m,1] - b2[1,n])||^2
+ lambda*(||A||^2 + ||B||^2 + ||b1||^2 + ||b2||^2)
The intended purpose is to use this as a recommender system model, in which
'X' is a matrix comprising the ratings that users give to items, with each
row corresponding to a user, each column to an item, and each non-missing
entry to the observed rating or explicit evaluation from the user.
For the case of recommender systems, there is also the so-called
'implicit-feedback' model, in which the entries of 'X' are assumed to all
be zeros or ones (i.e. the matrix is full with no missing values), but with
a weight given by the actual values and a confidence score multiplier:
min ||sqrt(alpha*X + 1) . (M - A*t(B))||^2 + lambda*(||A||^2 + ||B||^2)
======================= END OF COMMON PART =========================
The basic model is complemented with side information about the users and
the items in the form of matrices U[m,p], I[n,q], which are also factorized
using the same A and B matrices as before, which are multiplied by new
matrices C[p,k] and D[q,k] - e.g.:
min ||M . (X - A*t(B))||^2 + ||U - A*t(C)||^2 + ||I - B*t(D)||^2
This idea is further extended by:
- Letting some of the 'k' components of each matrix be used only
for one factorization but not for the other (these are: 'k' for the
shared ones, 'k_main' for those that apply to approximate 'X',
'k_user' for those that apply to approximate U, 'k_item' for I).
- Applying a sigmoid transformation on the obtained approximate matrices
for the columns that are binary in the user/item side information, e.g.
min ||Ub - sigm(A*t(C))||^2
The model can be fit either through a gradient-based approach using the
L-BFGS solver, or (when there are no binary variables requiring a sigmoid
transformation) through an alternating least-squares - note that when
all but one matrices are fixed, there is a closed-form solution for the
variable matrix which can be computed for each row in parallel.
This module allows some yet additional distinctions in the formulas:
- Different regularizaton parameter for each matrix.
- Different weights for each factorization (w_main, w_user, w_item).
- Observation weights W[m,n] for each entry in X (these have the same
effect as repeating 'w_main' by 'n' times).
- Having biases only for users and/or only for items, or for neither.
- The U and I matrices are centered column-by-column, but these column
biases are not model parameters.
- U and I can also have missing values.
And allows working with the inputs either as sparse or as dense matrices.
For the gradient-based solution, the gradients can be calculated as:
grad(A) = (W[m,n] . M[m,n] . (A*t(B) - X - b1 - b2 - mu)) * B
+ (Mu[m,p] . (A*t(C) - U))*C
+ (Mb.(Ub-sigm(A*t(Cb)))*exp(-A*t(Cb))/(exp(-A*t(Cb))+1)^2)*Cb
+ lambda * A
(The function value needs to be divided by 2 to match with the gradient
calculated like that)
For the closed-form solution with no binary variables, assuming that the
matrix A is a block matrix composed of independent components in this order
(first user-independent, then shared, then rating-independent):
Ae = [Au, As, Am]
The solution can be obtained **for each row 'a' of 'A'** by factorizing an
extended X matrix like this:
Xe = [Xa[1,n], Ua[1,p]]
in which [Au[1,k_user], As[1,k], Am[1,k_main]] is multiplied by another
extended block matrix:
Be = [[0, Bs, Bm],
[Cu, Cs, 0 ]]
Where each row of [Bi, Bs, Bm] has the values of B if the entry for that
column in the corresponding row Xa of X is present, and zeros if it's
missing, i.e.
[Bs, Bm, Bi] = B * t(M[1,n])
The solution is then given by:
A* = inv(t(Be.W)*Be + diag(lambda)) * (t(Be.W)*t(Xa))
Note that since the left-hand side is, by definition, a symmetric
positive-semi definite matrix, this computation can be done with
specialized procedures based on e.g. a Cholesky factorization, rather than
a more general linear solver or matrix inversion.
Also note that 'w_main' and 'w_user' can be incorporated efficiently by
rescaling the rows once they sum into t(Be)*Be - that is, it can be updated
like this:
T := 0[k_user+k+k_main, k_user+k+k_main]
T(k_user:,k_user:) += w_main*t(B)*B
T(:k_user+k, :k_user+k) += w_user*t(C)*C
<T is now equal to t(Be)*Be>
What's more, it's possible to simplify out one of the 3 weights by dividing
the other two and the regularization by it. Here, 'w_main' is the one that
gets simplified out (this also allows simpler functions for the
non-collective factors) by dividing the others (w_user, w_main, lambda)
by it.
As an alternative to the Cholesky method, can also use a Conjugate Gradient
method which follows an iterative procedure for each row a[n] of A,
taking the corresponding vectors u[p] from 'U', x[n] from 'X',
**assuming that all rows in 'B' and 'C' for which the corresponding value
in x[n] or u[p] is missing are set to zero**, iterating as follows:
r[k_user+k+k_main] := 0
r(k_user:) += w_main * (t(B)*x - t(B)*B*a(k_user:))
r(:k_user+k) += w_user * (t(C)*u - t(C)*C*a(:k_user+k))
r(:) += lambda * a
pp[k_user+k+k_main] := r(:)
ap[k_user+k+k_main] := 0
r_old = ||r||^2
for i..s:
ap(:) := 0
ap(k_user:) += w_main * t(B)*B*pp(k_user:)
ap(:k_user+k) += w_user * t(C)*C*pp(:k_user+k)
ap(:) += lambda * pp
a(:) += (r_old / <pp, ap>) * pp
r(:) -= (r_old / <pp, ap>) * ap
r_new := ||r||^2
<Terminate if r_new is small enough>
pp(:) := (r_new / r_old) * pp + r
r_old := r_new
The key for this approach is: if there are few non-missing values of 'X',
it's faster to compute t(B)*B*v as t(B)*( B*v ) several times than to
compute t(B)*B as required for the Cholesky. The CG method is mathematically
guaranteed to reach the optimum solution in no more steps than the dimension
of 'Be' (here: k_user+k+k_main), assuming infinite numerical precision, but
in practice, since it is not required to reach the optimal solution at each
iteration, can run it for only a small number of steps (e.g. 2 or 3) during
each update of 'A' (since next time the other matrices will be updated too,
makes sense not to spend too much time on a perfect solution for a single
matrix vs. spending little time on worse solutions but doing more updates
to all matrices).
Note that, for both the Cholesky and the CG method, there are some extra
potential shortcuts that can be taken - for example:
- If 'X' has no missing values, it's possible to precompute
t(B)*B to use it for all rows at once (same for 'U' and t(C)*C).
If there are few missing values, can compute it for all rows at once
and then subtract from it to obtain the required values for
a given row.
- In the Cholesky case, if there are no missing values, can use the same
Cholesky for all rows at once, and if there are few missing values,
can at first compute the solution for all rows (ignoring potential
missing values for the rows that have missing values), and then do
a post-hoc pass over the rows that have missing values computing their
actual solutions through either the Cholesky or the CG method.
- If t(B)*B or t(C)*C are computed from all rows in B/C, when iterating
over a given row which has missing values in only one of 'X' or 'U',
it's still possible to use the precomputed matrix for one side while
making the necessary computations for the other.
- If 'X' has weights and is sparse with missing-as-zero (e.g. the
implicit-feedback model), it's possible for both methods to use a
t(B)*B computed from all rows, and then add the non-zero entries as
necessary, taking into consideration that they were previously added
with a weight of one and thus their new weight needs to be decreased.
The row and column biases (b1/b2) for the factorization of 'X' are obtained
as follows: first, all the X[m,n] data are centered by subtracting their
global mean (mu). Then, the biases are initialized by an alternating
optimization procedure (between row/column biases), in which at each
iteration, the optimal biases are calculated in closed-form.
For a given row 'a', the closed-form minimizer of the bias is:
bias(a) = sum(X(a,:) - b2[1,n] - mu[1]) / (N_non_NA(X(a,:) + lambda))
At the beginning of each ALS iteration, assuming the 'A' matrix is being
optimized, the procedure then subtracts from the already-centered X the
current biases:
X_iter[m,n] = X[m,n] - b1[m,1] - mu[1])
and adds an extra column of all-ones at the end of the other factorizing
matrix:
B_iter[n+p, k_user+k+k_main+1] := [[Be(:n, :), 1[n,1] ],
[Be(n:, :), 0[p,1] ]]
The A matrix being solved for is also extended by 1 column.
The procedure is then run as usual with the altered X and Be matrices, and
after solving for A, the new values for b1[m,1] will correspond to its last
column, which will not be used any further when optimizing for the other
matrices. The 'X' matrix then needs to be restored to its original values,
and the all-ones column in 'Be' is also ignored.
Note that it is possible to set a different regulatization parameter for the
biases by simply changing the last element of diag(lambda) that is added
to the matrices (or the last element of ap/pp for the CG method)
accordingly.
When using 'NA_as_zero', adding the biases is a bit more tricky, as then
subtracting them from 'X' would result in a dense matrix. However, since
the solution to the problem is also the solution to:
min t(B)*B - t(B)*t(X)
and it is this second form that's used to find the factors, the biases can
instead be subtracted from t(B)*t(X):
t(B)*t(X-b) = t(B)*t(X) + t(B)*t(-b)
Hence, it's only necessary to calculate -t(B)*t(b), and then subtract it
when necessary (first step in the conjugate gradient method, right hand side
in the Cholesky method). The same trick can be used for mean-centering too.
Both the gradient-based approach and the closed-form solution with these
formulas can be used for any of the 4 matrices by substituting the matrices
accordingly - i.e.
For matrix 'B', replace:
A->B, C->D, X->t(X), U->I
For matrix 'C', replace:
A->C, C->NULL, X->U, U->NULL
For matrix 'D', replace:
A->D, C->NULL, X->I, U->NULL
In addition, it's also possible to fit the weighted-implicit model in which:
- There are no biases and the global mean is not used.
- The values of 'X' are all zeros or ones.
- There is an associated weight to each non-zero value of 'X', given by
W = alpha*X + 1.
Using the same procedures as explained earlier. The module includes
specialized functions for those which make fewer checks on the data.
As a special case of 'side information', the model an also add so-called
'implicit-features', which are binarized matrices telling whether each
entry in 'X' is non-zero or not. These usually result in a very small
improvement in recommender systems, with or without side information.
Since these are considered separate from the actual side information, it's
still possible to add external features. The procedure for adding these
features is the same as for the real side information, but in this case
assuming the missing entries are actual zeros (see reference (d)).
In order to obtain the factor matrices for new users, in a cold-start
scenario (based on user attributes alone), it's only necessary to obtain
the closed form for A assuming X is zero, while for a warm-start scenario
(based on both user attributes and ratings), the closed form on block
matrices can be applied. If there are binary variables, there is no
closed-form solution, but can still be obtained in a reasonable time with
a gradient-based approach.
*******************************************************************************/
/* Note: the descriptions about input parameters of the functions might be
outdated and might not match with the actual code anymore. */
/*******************************************************************************
Function and Gradient Calculation
---------------------------------
This function calculates the gradient as explained at the beginning. It
can receive the X, U, I, matrices either as sparse (COO or CSR+CSC depending
on parallelization strategy - see file 'common.c' for details), but be sure
to pass only ONE (dense/sparse) of them. If they have binary variables
(Ub, Ib), these must be passed as dense.
For sparse matrices, non-present values will not be accounted for into the
function and gradient calculations, while for dense matrices, missing values
(as 'NAN') will not be counted.
The X matrix should have already undergone global centering (subtracting the
mean), while the U and I matrices, should be centered column-wise as they
don't have biases as parameters.
If passing observation weights, these must match the shape of the X matrix
- that is, if X is dense, weights must be an array of dimensions (m, n),
if X is sparse, must be an array of dimensions (nnz), and if parallelizing
by independent X matrices, must pass it twice, each matching to a given
formar of X.
In order to know how many variables will the model have and/or how much
space is required for temporary variables, use the function
'nvars_collective_fun_grad' beforehand.
Parameters
----------
values[nvars]
The current values for which the function and gradient will
be evaluated.
grad[nvars]
Array in which to write the gradient evauates at 'values'.
m
Number of rows in X, A, U, Ub.
n
Number of columns in X and number of rows in B, I, Ib.
k
Dimensionality of the factorizing matrices (a.k.a. latent factors),
denoting only the columns/factors that are shared between two
factorizations (so A has number of columns equal to k_user + k + k_main,
B has number of columns equal to k_item + k + k_main, and they arranged
in that order - i.e. the first 'k_user' columns in A are factors used
only for factorizing U).
ixA[nnz], ixB[nnz], X[nnz], nnz
The X matrix in sparse triplets (a.k.a. COO) format. Pass NULL if
X will be provided in a different format.
Xfull[m * n]
The X matrix in dense format, with missing entries set as NAN. Pass
NULL if X will be provided in a different format. If X is passed
in multiple formats, the dense one will be required. Note that working
with dense matrices will require extra memory of the same sizefor
temporary values.
Xcsr_p[m+1], Xcsr_i[nnz], Xcsr[nnz], Xcsc_p[n], Xcsc_i[nnz], Xcsc[nnz]
The X matrix in sparse CSR and CSC formats. Only used if nthreads>1,
otherwise pass X as COO. These are used if parallelizing according to
a 2-pass strategy (see file "common.c" for details). Pass NULL if
not applicable.
weight[nnz or m*n], weightR[nnz], weightC[nnz]
The observation weights for each entry of X. Must match with the shape
of X - that is, if passing Xfull, must have size 'm*n', if passing X,
must have size 'nnz', if passing Xcsr and Xcsc, must pass weightR
and weightC instead. Pass NULL for uniform weights.
user_bias, item_bias
Whether the model should include these biases as parameters.
lam
Regularization parameter to apply to all matrices.
lam_unique[6]
Regularization parameters for each matrix used in the model. If passed,
each one will have different regulatization, while if ommited (passing
NULL), will apply the same regulatization to all. The entries must be
in this order: (1) user bias, (2) item bias, (3) row/user factors,
(4) column/item factors, (5) user attribute factors, (6) item attribute
factors.
U[m_u*p], U_row[nnz_U], U_col[nnz_U], U_sp[nnz_U],
U_csr_p[m], U_csr_i[nnz_U], U_csr[nnz_U],
U_csc_p[p], U_csc_i[nnz_U], U_csc[nnz_U]
The user/row attribute matrix - same guidelines as for X apply. Note
that it's possible to pass combinations of sparse X, dense U, sparse I,
etc. without problems.
II[n_i*q], I_row[nnz_I], I_col[nnz_I], I_sp[nnz_I],
I_csr_p[n+1], I_csr_i[nnz_I], I_csr[nnz_I],
I_csc_p[q+1], I_csc_i[nnz_I], I_csc[nnz_I]
The item/column attribute matrix - same guidelines as for X apply. Note
that it's possible to pass combinations of sparse X, dense U, sparse I,
etc. without problems.
Ub[m*pbin]
The binary columns of the user/row attribute matrix. Must be passed as
dense, but can contain missing values. The non-missing entries should
all be either zero or one. Note that it's still possible to pass only
one of U or Ubin, or neither.
m_u
Number of rows in the U matrix, in case not all rows in A have
a corresponding entry in U. If this differs from 'm', the topmost rows
of A and X will be assumed to match to the rows of U. Ignored when
passing sparse U.
m_ubin
Same as above, but for Ub matrix.
p
Number of columns in the U matrix (i.e. number of user attributes).
pbin
Number of columns in the Ub matrix (i.e. number of binary attributes).
Ib[n*qbin]
The binary columns of the item/column attribute matrix. Must be passed
as dense, but can contain missing values. The non-missing entries should
all be either zero or one. Note that it's still possible to pass only
one of I or Ibin, or neither.
n_i
Number of rows in the II matrix, in case not all rows in B have a
corresponding entry in II. If this differs from 'n', the topmost rows
of B and columns of X will be assumed to match to the rows of II.
Ignored when passing sparse U.
n_ibin
Same as above, but for Ib matrix.
q
Number of columns in the I matrix (i.e. number of item attributes).
qbin
Number of columns in the Ib matrix (i.e. number of binary attributes).
U_has_NA, I_has_NA, Ub_has_NA, Ib_has_NA
Whether these matrices contain any missing values. Ignored when the
matrices are passed as sparse.
buffer_real_t[temp]
Temporary array in which to write values. Only used when passing at
least one matrix as dense. Pass NULL if not applicable. The required
size can be obtained from function 'nvars_collective_fun_grad', but
as a guideline it needs to be able to hold the largest dense matrix
that is passed.
buffer_mt
Temporary array in which to write thread-local values when using the
one-pass parallelization strategy with sparse X matrix in COO format.
Will not be used if passing Xcsr/Xcsc or Xfull or nthreads=1. The
required size can be obtained through function
'nvars_collective_gradient', but as a guideline it needs to hold the
largest combination of gradient matrices used for a given factorization,
multiplied by the number of threads. Pass NULL if not applicable, or if
no parallelization is desired for a sparse X matrix in COO format.
k_main
Factors of A and B which are used only for factorizing the X matrix and
not the other matrices. These will be available in the last columns
of both matrices.
k_user
Factors of A which are used only for factorizing the U and Ub matrices.
These will be available in the first columns of A.
k_item
Factors of B which are used only for factorizing the I and Ib matrices.
These will be available in the first columns of B.
w_main
Weight given to the squared error in the factorization of the X matrix.
w_user
Weight given to the squared error in the factorization of the U and
Ub matrices.
w_item
Weight given to the squared error in the factorization of the II and
Ib matrices.
nthreads
Number of parallel threads to use. Note that (a) this function relies on
BLAS and LAPACK functions, which set their number of threads externally,
(b) Depending on the number of threads relative to the data size and the
parallelization strategy (one-pass or two-pass), adding more threads
might result in a slow down, and if using the one-pass strategy, will
require extra memory.
*******************************************************************************/
void nvars_collective_fun_grad
(
size_t m, size_t n, size_t m_u, size_t n_i, size_t m_ubin, size_t n_ibin,
size_t p, size_t q, size_t pbin, size_t qbin,
size_t nnz, size_t nnz_U, size_t nnz_I,
size_t k, size_t k_main, size_t k_user, size_t k_item,
bool user_bias, bool item_bias, size_t nthreads,
real_t *X, real_t *Xfull,
real_t *U, real_t *Ub, real_t *II, real_t *Ib,
real_t *U_sp, real_t *U_csr, real_t *I_sp, real_t *I_csr,
size_t *nvars, size_t *nbuffer, size_t *nbuffer_mt
)
{
size_t m_max = max2(max2(m, m_u), m_ubin);
size_t n_max = max2(max2(n, n_i), n_ibin);
size_t sizeA = m_max * (k_user + k + k_main);
size_t sizeB = n_max * (k_item + k + k_main);
*nvars = m_max * (k_user + k + k_main)
+ n_max * (k_item + k + k_main)
+ (p + pbin) * (k + k_user)
+ (q + qbin) * (k + k_item);
if (user_bias) *nvars += m_max;
if (item_bias) *nvars += n_max;
*nbuffer = 0;
if (Xfull != NULL) *nbuffer = m * n;
if (U != NULL) *nbuffer = max2(*nbuffer, m_u * p + sizeA);
if (II != NULL) *nbuffer = max2(*nbuffer, n_i * q + sizeB);
if (Ub != NULL) *nbuffer = max2(*nbuffer, m_ubin * pbin + sizeA);
if (Ib != NULL) *nbuffer = max2(*nbuffer, n_ibin * qbin + sizeB);
if (U_csr != NULL || U_sp != NULL) *nbuffer = max2(*nbuffer, sizeA);
if (I_csr != NULL || U_sp != NULL) *nbuffer = max2(*nbuffer, sizeB);
*nbuffer_mt = 0;
if (nthreads > 1)
{
if (Xfull == NULL && X != NULL)
*nbuffer_mt = (k + k_main) * (m + n)
+ (user_bias? m : 0)
+ (item_bias? n : 0);
if (U == NULL && U_sp != NULL)
*nbuffer_mt = max2(*nbuffer_mt, (k_user + k) * (m_u + p));
if (II == NULL && I_sp != NULL)
*nbuffer_mt = max2(*nbuffer_mt, (k_item + k) * (n_i + q));
*nbuffer_mt *= nthreads;
}
}
real_t collective_fun_grad
(
real_t *restrict values, real_t *restrict grad,
int_t m, int_t n, int_t k,
int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz,
real_t *restrict Xfull,
size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr,
size_t Xcsc_p[], int_t Xcsc_i[], real_t *restrict Xcsc,
real_t *restrict weight, real_t *restrict weightR, real_t *restrict weightC,
bool user_bias, bool item_bias,
real_t lam, real_t *restrict lam_unique,
real_t *restrict U, int_t m_u, int_t p, bool U_has_NA,
real_t *restrict II, int_t n_i, int_t q, bool I_has_NA,
real_t *restrict Ub, int_t m_ubin, int_t pbin, bool Ub_has_NA,
real_t *restrict Ib, int_t n_ibin, int_t qbin, bool Ib_has_NA,
int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U,
int_t I_row[], int_t I_col[], real_t *restrict I_sp, size_t nnz_I,
size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr,
size_t U_csc_p[], int_t U_csc_i[], real_t *restrict U_csc,
size_t I_csr_p[], int_t I_csr_i[], real_t *restrict I_csr,
size_t I_csc_p[], int_t I_csc_i[], real_t *restrict I_csc,
real_t *restrict buffer_real_t, real_t *restrict buffer_mt,
int_t k_main, int_t k_user, int_t k_item,
real_t w_main, real_t w_user, real_t w_item,
int nthreads
)
{
/* TODO: implement option 'NA_as_zero' here too. */
/* Shorthands to use later */
int_t k_totA = k_user + k + k_main;
int_t k_totB = k_item + k + k_main;
int_t m_max = max2(max2(m, m_u), m_ubin);
int_t n_max = max2(max2(n, n_i), n_ibin);
long double f = 0;
/* set the gradients to zero - first need to know how many entries to set */
size_t nvars, ignored, mtvars;
nvars_collective_fun_grad(
(size_t)m, (size_t)n, (size_t)m_u, (size_t)n_i,
(size_t)m_ubin, (size_t)n_ibin,
(size_t)p, (size_t)q, (size_t)pbin, (size_t)qbin,
nnz, nnz_U, nnz_I,
(size_t)k, (size_t)k_main, (size_t)k_user, (size_t)k_item,
user_bias, item_bias, (size_t)nthreads,
X, Xfull,
U, Ub, II, Ib,
U_sp, U_csr, I_sp, I_csr,
&nvars, &ignored, &mtvars
);
set_to_zero_(grad, nvars, nthreads);
if (mtvars && buffer_mt != NULL) set_to_zero_(buffer_mt, mtvars, nthreads);
/* unravel the arrays */
real_t *restrict biasA = values;
real_t *restrict biasB = biasA + (user_bias? m_max : 0);
real_t *restrict A = biasB + (item_bias? n_max : 0);
real_t *restrict B = A + (size_t)m_max * (size_t)k_totA;
real_t *restrict C = B + (size_t)n_max * (size_t)k_totB;
real_t *restrict Cb = C + (size_t)(k + k_user) * (size_t)p;
real_t *restrict D = Cb + (size_t)(k + k_user) * (size_t)pbin;
real_t *restrict Db = D + (size_t)(k + k_item) * (size_t)q;
real_t *restrict g_biasA = grad;
real_t *restrict g_biasB = g_biasA + (user_bias? m_max : 0);
real_t *restrict g_A = g_biasB + (item_bias? n_max : 0);
real_t *restrict g_B = g_A + (size_t)m_max * (size_t)k_totA;
real_t *restrict g_C = g_B + (size_t)n_max * (size_t)k_totB;
real_t *restrict g_Cb = g_C + (size_t)(k + k_user) * (size_t)p;
real_t *restrict g_D = g_Cb + (size_t)(k + k_user) * (size_t)pbin;
real_t *restrict g_Db = g_D + (size_t)(k + k_item) * (size_t)q;
size_t sizeA = m_max * (size_t)k_totA;
size_t sizeB = n_max * (size_t)k_totB;
/* first the main factorization */
f = fun_grad_cannonical_form(
A + k_user, k_totA, B + k_item, k_totB,
g_A + k_user, g_B + k_item,
m, n, k + k_main,
ixA, ixB, X, nnz,
Xfull, false,
Xcsr_p, Xcsr_i, Xcsr,
Xcsc_p, Xcsc_i, Xcsc,
user_bias, item_bias,
biasA, biasB,
g_biasA, g_biasB,
weight, weightR, weightC,
w_main,
buffer_real_t,
buffer_mt,
nthreads
);
/* then user non-binary factorization */
if (U != NULL || U_sp != NULL || U_csr != NULL)
{
set_to_zero_(buffer_real_t, sizeA, nthreads);
f += fun_grad_cannonical_form(
A, k_totA, C, k_user + k,
buffer_real_t, g_C,
(U != NULL)? m_u : m, p, k_user + k,
U_row, U_col, U_sp, nnz_U,
U, !U_has_NA,
U_csr_p, U_csr_i, U_csr,
U_csc_p, U_csc_i, U_csc,
false, false,
(real_t*)NULL, (real_t*)NULL,
(real_t*)NULL, (real_t*)NULL,
(real_t*)NULL, (real_t*)NULL, (real_t*)NULL,
w_user,
buffer_real_t + sizeA,
buffer_mt,
nthreads
);
taxpy_large(buffer_real_t, 1., g_A, sizeA, nthreads);
}
/* then item non-binary factorization */
if (II != NULL || I_sp != NULL || I_csr != NULL)
{
set_to_zero_(buffer_real_t, sizeB, nthreads);
f += fun_grad_cannonical_form(
B, k_totB, D, k_item + k,
buffer_real_t, g_D,
(II != NULL)? n_i : n, q, k_item + k,
I_row, I_col, I_sp, nnz_I,
II, !I_has_NA,
I_csr_p, I_csr_i, I_csr,
I_csc_p, I_csc_i, I_csc,
false, false,
(real_t*)NULL, (real_t*)NULL,
(real_t*)NULL, (real_t*)NULL,
(real_t*)NULL, (real_t*)NULL, (real_t*)NULL,
w_item,
buffer_real_t + sizeB,
buffer_mt,
nthreads
);
taxpy_large(buffer_real_t, 1., g_B, sizeB, nthreads);
}
/* if there are binary matrices with sigmoid transformation, need a
different formula for the gradients */
if (Ub != NULL)
{
set_to_zero_(buffer_real_t, sizeA, nthreads);
f += collective_fun_grad_bin(
A, k_totA, Cb, k_user + k,
buffer_real_t, g_Cb,
Ub,
m_ubin, pbin, k_user + k,
!Ub_has_NA, w_user,
buffer_real_t + sizeA,
nthreads
);
taxpy_large(buffer_real_t, 1., g_A, sizeA, nthreads);
}
if (Ib != NULL)
{
set_to_zero_(buffer_real_t, sizeB, nthreads);
f += collective_fun_grad_bin(
B, k_totB, Db, k_item + k,
buffer_real_t, g_Db,
Ib,
n_ibin, qbin, k_item + k,
!Ib_has_NA, w_item,
buffer_real_t + sizeB,
nthreads
);
taxpy_large(buffer_real_t, 1., g_B, sizeB, nthreads);
}
/* Now account for the regulatization parameter
grad = grad + lambda * var
f = f + (lambda / 2) * || var ||^2 */
/* If all matrices have the same regulatization, can do it in one pass */
if (lam_unique == NULL) {
taxpy_large(values, lam, grad, nvars, nthreads);
f += (lam / 2.) * sum_squares(values, nvars, nthreads);
}
/* otherwise, add it one by one */
else {
long double freg = 0;
/* Note: Cbin is in memory right next to C, so there's not need to
account for it separately - can be passed extra elements to C */
if (user_bias) cblas_taxpy(m_max, lam_unique[0], biasA, 1, g_biasA, 1);
if (item_bias) cblas_taxpy(n_max, lam_unique[1], biasB, 1, g_biasB, 1);
taxpy_large(A, lam_unique[2],g_A,(size_t)m_max*(size_t)k_totA,nthreads);
taxpy_large(B, lam_unique[3],g_B,(size_t)n_max*(size_t)k_totB,nthreads);
if (U != NULL || U_sp != NULL || U_csr != NULL || Ub != NULL)
taxpy_large(C, lam_unique[4], g_C,
(size_t)(p+pbin)*(size_t)(k_user+k), nthreads);
if (II != NULL || I_sp != NULL || I_csr != NULL || Ib != NULL)
taxpy_large(D, lam_unique[5], g_D,
(size_t)(q+qbin)*(size_t)(k_item+k), nthreads);
if (user_bias)
freg += (lam_unique[0] / 2.) * cblas_tdot(m_max, biasA, 1, biasA,1);
if (item_bias)
freg += (lam_unique[1] / 2.) * cblas_tdot(n_max, biasB, 1, biasB,1);
freg += (lam_unique[2]/2.) * sum_squares(A,(size_t)m_max*(size_t)k_totA,
nthreads);
freg += (lam_unique[3]/2.) * sum_squares(B,(size_t)n_max*(size_t)k_totB,
nthreads);
if (U != NULL || U_sp != NULL || U_csr != NULL || Ub != NULL)
freg += (lam_unique[4] / 2.)
* sum_squares(C, (size_t)(p+pbin)*(size_t)(k_user+k),
nthreads);
if (II != NULL || I_sp != NULL || I_csr != NULL || Ib != NULL)
freg += (lam_unique[5] / 2.)
* sum_squares(D, (size_t)(q+qbin)*(size_t)(k_item+k),
nthreads);
f += (real_t)freg;
}
return (real_t) f;
}
/* This function calculates the gradient for squared error on
sigmoid-transformed approximations */
real_t collective_fun_grad_bin
(
real_t *restrict A, int_t lda, real_t *restrict Cb, int_t ldc,
real_t *restrict g_A, real_t *restrict g_Cb,
real_t *restrict Ub,
int_t m, int_t pbin, int_t k,
bool Ub_has_NA, double w_user,
real_t *restrict buffer_real_t,
int nthreads
)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
double f = 0;
/* Buffer = exp(-A * t(Cb)) */
cblas_tgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
m, pbin, k,
1., A, lda, Cb, ldc,
0., buffer_real_t, pbin);
exp_neg_x(buffer_real_t, (size_t)m * (size_t)pbin, nthreads);
/* f = sum_sq(Ub - 1/(1+Buffer))
See explanation at the top for the gradient formula */
if (Ub_has_NA)
{
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(Ub, buffer_real_t) reduction(+:f)
for (size_t_for ix = 0; ix < (size_t)m*(size_t)pbin; ix++)
f += (!isnan(Ub[ix]))?
square(Ub[ix] - 1./(1.+buffer_real_t[ix])) : (0);
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(buffer_real_t, m, pbin, Ub)
for (size_t_for ix = 0; ix < (size_t)m*(size_t)pbin; ix++)
buffer_real_t[ix] = (!isnan(Ub[ix]))?
( (1./(1.+buffer_real_t[ix]) - Ub[ix])
* buffer_real_t[ix]
/ square(buffer_real_t[ix]+1.)
) : (0);
}
else
{
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(Ub, buffer_real_t) reduction(+:f)
for (size_t_for ix = 0; ix < (size_t)m*(size_t)pbin; ix++)
f += square(Ub[ix] - 1./(1.+buffer_real_t[ix]));
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(buffer_real_t, m, pbin, Ub)
for (size_t_for ix = 0; ix < (size_t)m*(size_t)pbin; ix++)
buffer_real_t[ix] = (
(1./(1.+buffer_real_t[ix]) - Ub[ix])
* buffer_real_t[ix]
/ square(buffer_real_t[ix]+1.)
);
}
f *= (w_user / 2);
cblas_tgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
m, k, pbin,
w_user, buffer_real_t, pbin, Cb, ldc,
0., g_A, lda);
cblas_tgemm(CblasRowMajor, CblasTrans, CblasNoTrans,
pbin, k, m,
w_user, buffer_real_t, pbin, A, lda,
0., g_Cb, ldc);
return f;
}
/*******************************************************************************
Function and Gradient for Single Row
------------------------------------
This is a shorter version of the main function and gradient function
applied to only one row of the A matrix. The purpose of this function
is to be used for determining the optimal values of A through the L-BFGS
solver in cases in which the closed-form is not possible to obtain.
The formula is the exact same as in the larger case, with the only
difference that the bias is not taken into consideration here, as it can be
obtained by a simple average on the non-missing entries of X for this
particular row, assuming no regularization applied on that bias.
The X matrix can be passed either as a dense vector or as a sparse vector,
and must have al ready been centered according to previously-fit model
biases and the user/row bias to determine for this data.
Note that this function does not attempt to exploit any parallelization
as calculating the gradient for a single row is supposed to be very fast
for typical problem sizes.
Parameters
----------
a_vec[k_user + k + k_main]
The current values of the factors/variables for this row.
g_A[k_user + k + k_main] (out)
Array into which to write the gradient evaluated at 'a_vec'.
k, k_user, k_item, k_main
Dimensionality of the factorizing matrix. See description at the top
for a better idea.
u_vec[p]
User attributes for this row, in dense format. If passing them in both
dense and sparse formats, the dense one will be preferred. Pass NULL
if no data is available or is passed as sparse.
p
The number of attributes for users.
u_vec_ixB[nnz_u_vec], u_vec_sp[nnz_u_vec], nnz_u_vec
User attributes for this row, in sparse format. Pass NULL if not
available or if passed as dense.
u_bin_vec[pbin]
User binary attributes for this row. Pass NULL if the model had no
binary user attributes.
B[n*(k_item + k + k_main)]
The B matrices with which a_vec is multiplied to approximate X.
n
The number of columns in X and number of rows in B.
C[p * (k_user + k)]
The C matrix used to approximate the U matrix in the model.
Cb[pbin * (k_user + k)]
The portion of the C matrix used to approximate the binary columns
in the U matrix (Ub here).
Xa[nnz], ixB[nnz], nnz
The row of the X matrix for this row to evaluate the gradient, as
a sparse vector. Pass NULL if not applicable.
Xa_dense[n]
The row of the X matrix in dense format, with missing entries as NAN.
If passing it in both sparse and dense formats, the dense one will
be preferred. Pass NULL if not applicable.
weight[nnz or n]
Observation weights for each non-missing entry in X. Must have the same
shape as the X matrix that was passed - that is, if Xa is passed, must
have 'nnz' entries, if Xa_dense is passed, must have 'n' entries.
Pass NULL for uniform weights.
buffer_real_t[n or p or pbin]
Temporary array into which to write values. Must be able to hold the
largest dense vector which is passed. Pass NULL if all the input
vectors are sparse.
lam
Regularization parameter applied to A.
w_main
Weight of the squared error of the X factorization.
w_user
Weight of the squared error of the U and Ub factorizations.
Returns
-------
f : the function value evaluated at 'a_vec'.
*******************************************************************************/
real_t collective_fun_grad_single
(
real_t *restrict a_vec, real_t *restrict g_A,
int_t k, int_t k_user, int_t k_item, int_t k_main,
real_t *restrict u_vec, int_t p,
int_t u_vec_ixB[], real_t *restrict u_vec_sp, size_t nnz_u_vec,
real_t *restrict u_bin_vec, int_t pbin,
bool u_vec_has_NA, bool u_bin_vec_has_NA,
real_t *restrict B, int_t n,
real_t *restrict C, real_t *restrict Cb,
real_t *restrict Xa, int_t ixB[], size_t nnz,
real_t *restrict Xa_dense,
real_t *restrict weight,
real_t *restrict buffer_real_t,
real_t lam, real_t w_main, real_t w_user, real_t lam_last
)
{
int_t ldb = k_item + k + k_main;
int_t k_pred = k + k_main;
set_to_zero(g_A, k_user + k + k_main);
real_t f = 0;
real_t err;
real_t *restrict a_vec_pred = a_vec + k_user;
real_t *restrict g_A_pred = g_A + k_user;
real_t *restrict Bm = B + k_item;
if (Xa_dense == NULL && Xa != NULL) /* sparse X */
{
for (size_t ix = 0; ix < nnz; ix++) {
err = cblas_tdot(k_pred, a_vec_pred, 1,
Bm + (size_t)ixB[ix]*(size_t)ldb, 1)
- Xa[ix];
f += square(err) * ((weight != NULL)? weight[ix] : 1);
err *= (weight != NULL)? weight[ix] : 1;
cblas_taxpy(k_pred, err, Bm + (size_t)ixB[ix]*(size_t)ldb, 1,
g_A_pred, 1);
}
f *= w_main / 2.;
if (w_main != 1.)
cblas_tscal(k_pred, w_main, g_A_pred, 1);
}
else if (Xa_dense != NULL) /* dense X */
{
memcpy(buffer_real_t, Xa_dense, (size_t)n*sizeof(real_t));
cblas_tgemv(CblasRowMajor, CblasNoTrans,
n, k + k_main,
1, B + k_item, ldb,
a_vec + k_user, 1,
-1, buffer_real_t, 1);
if (weight != NULL)
mult_if_non_nan(buffer_real_t, Xa_dense, weight, n, 1);
else
nan_to_zero(buffer_real_t, Xa_dense, n, 1);
cblas_tgemv(CblasRowMajor, CblasTrans,
n, k + k_main,
w_main, B + k_item, ldb,
buffer_real_t, 1,
0, g_A + k_user, 1);
if (weight == NULL)
f = (w_main / 2.) * cblas_tdot(n, buffer_real_t, 1,buffer_real_t,1);
else
f = (w_main / 2.) * sum_sq_div_w(buffer_real_t, weight, n, false,1);
}
if (u_vec != NULL)
{
memcpy(buffer_real_t, u_vec, (size_t)p*sizeof(real_t));
cblas_tgemv(CblasRowMajor, CblasNoTrans,
p, k_user + k,
1., C, k_user + k,
a_vec, 1,
-1., buffer_real_t, 1);
if (u_vec_has_NA)
nan_to_zero(buffer_real_t, u_vec, p, 1);
cblas_tgemv(CblasRowMajor, CblasTrans,
p, k_user + k,
w_user, C, k_user + k,
buffer_real_t, 1,
1., g_A, 1);
f += (w_user / 2.) * cblas_tdot(p, buffer_real_t, 1, buffer_real_t, 1);
}
else if (u_vec_sp != NULL)
{
real_t err_sp = 0;
k_pred = k_user + k;
for (size_t ix = 0; ix < nnz_u_vec; ix++) {
err = cblas_tdot(k_pred, a_vec, 1,
C + (size_t)u_vec_ixB[ix]*(size_t)k_pred, 1)
- u_vec_sp[ix];
err_sp += square(err);
cblas_taxpy(k_pred, w_user*err,
C + (size_t)u_vec_ixB[ix]*(size_t)k_pred, 1, g_A,1);
}
f += (w_user / 2.) * err_sp;
}
if (u_bin_vec != NULL)
{
real_t err_bin = 0;
cblas_tgemv(CblasRowMajor, CblasNoTrans,
pbin, k_user + k,
1, Cb, k_user + k,
a_vec, 1,
0, buffer_real_t, 1);
exp_neg_x(buffer_real_t, (size_t)pbin, 1);
if (u_bin_vec_has_NA)
{
for (int_t ix = 0; ix < pbin; ix++)
err_bin += (!isnan(u_bin_vec[ix]))?
square(1./(1.+buffer_real_t[ix]) - u_bin_vec[ix])
: (0);
for (int_t ix = 0; ix < pbin; ix++)
buffer_real_t[ix] = (!isnan(u_bin_vec[ix]))?
( (1./(1.+buffer_real_t[ix]) -u_bin_vec[ix])
* buffer_real_t[ix]
/ square(buffer_real_t[ix]+1.) )
: (0);
}
else
{
for (int_t ix = 0; ix < pbin; ix++)
err_bin += square(1./(1.+buffer_real_t[ix]) - u_bin_vec[ix]);
for (int_t ix = 0; ix < pbin; ix++)
buffer_real_t[ix] = (1./(1.+buffer_real_t[ix]) - u_bin_vec[ix])
* buffer_real_t[ix]
/ square(buffer_real_t[ix]+1.);
}
cblas_tgemv(CblasRowMajor, CblasTrans,
pbin, k_user + k,
w_user, Cb, k_user + k,
buffer_real_t, 1,
1, g_A, 1);
f += (w_user / 2.) * err_bin;
}
f += (lam / 2.) * cblas_tdot(k_user+k+k_main, a_vec, 1, a_vec, 1);
cblas_taxpy(k_user+k+k_main, lam, a_vec, 1, g_A, 1);
if (lam_last != lam && k_main) {
f += (lam_last-lam)/2. * square(a_vec[k_user+k+k_main-1]);
g_A[k_user+k+k_main-1] += (lam_last-lam) * a_vec[k_user+k+k_main-1];
}
return (real_t) f;
}
/* These functions find the optimal values for a single row of A using the
gradient function above, passing it to the L-BFGS solver */
real_t wrapper_factors_fun_grad
(
void *instance,
real_t *x,
real_t *g,
const size_t n,
const real_t step
)
{
data_factors_fun_grad *data = (data_factors_fun_grad*)instance;
return collective_fun_grad_single(
x, g,
data->k, data->k_user, data->k_item, data->k_main,
data->u_vec, data->p,
data->u_vec_ixB, data->u_vec_sp, data->nnz_u_vec,
data->u_bin_vec, data->pbin,
data->u_vec_has_NA, data->u_bin_vec_has_NA,
data->B, data->n,
data->C, data->Cb,
data->Xa, data->ixB, data->nnz,
data->Xa_dense,
data->weight,
data->buffer_real_t,
data->lam, data->w_main, data->w_user, data->lam_last
);
}
int_t collective_factors_lbfgs
(
real_t *restrict a_vec,
int_t k, int_t k_user, int_t k_item, int_t k_main,
real_t *restrict u_vec, int_t p,
int_t u_vec_ixB[], real_t *restrict u_vec_sp, size_t nnz_u_vec,
real_t *restrict u_bin_vec, int_t pbin,
bool u_vec_has_NA, bool u_bin_vec_has_NA,
real_t *restrict B, int_t n,
real_t *restrict C, real_t *restrict Cb,
real_t *restrict Xa, int_t ixB[], real_t *restrict weight, size_t nnz,
real_t *restrict Xa_dense,
real_t *restrict buffer_real_t,
real_t lam, real_t w_main, real_t w_user, real_t lam_last
)
{
data_factors_fun_grad data = {
k, k_user, k_item, k_main,
u_vec, p,
u_vec_ixB, u_vec_sp, nnz_u_vec,
u_bin_vec, pbin,
u_vec_has_NA, u_bin_vec_has_NA,
B, n,
C, Cb,
Xa, ixB, weight, nnz,
Xa_dense,
buffer_real_t,
lam, w_main, w_user, lam_last
};
lbfgs_parameter_t lbfgs_params = {
5, 1e-5, 0, 1e-5,
250, LBFGS_LINESEARCH_MORETHUENTE, 20,
1e-20, 1e20, 1e-4, 0.9, 0.9, EPSILON_T,
0.0, 0, -1,
};
lbfgs_progress_t callback = (lbfgs_progress_t)NULL;
size_t nvars = k_user + k + k_main;
/* Starting point_t can be set to zero, since the other matrices
are already at their local optima. */
set_to_zero(a_vec, nvars);
int_t retval = lbfgs(
nvars,
a_vec,
(real_t*)NULL,
wrapper_factors_fun_grad,
callback,
(void*) &data,
&lbfgs_params,
(real_t*)NULL,
(iteration_data_t*)NULL
);
if (retval == LBFGSERR_OUTOFMEMORY)
return 1;
else
return 0;
}
#ifdef AVOID_BLAS_SYR
#undef cblas_tsyr
#define cblas_tsyr(order, Uplo, N, alpha, X, incX, A, lda) \
custom_syr(N, alpha, X, A, lda)
#endif
/* TODO: for better numerical precision in the Cholesky method, could keep
some temporary arrays matching to t(Be)*Be and 'a_vec', zero them out,
sum the parts from X and U separately, then add them to the other arrays.
That way could also take the 'w_user' scaling after having already summed.
Maybe add it as an extra parameter 'extra_precision' or so. */
/* TODO: for an even more efficient version with dense inputs, could have
an array that keeps track of which values are missing, set the missing ones
to zero in the original array, then use 'gemv' at once and subtract the
missing ones later. */
void collective_closed_form_block
(
real_t *restrict a_vec,
int_t k, int_t k_user, int_t k_item, int_t k_main,
real_t *restrict Xa_dense,
real_t *restrict Xa, int_t ixB[], size_t nnz,
int_t u_vec_ixB[], real_t *restrict u_vec_sp, size_t nnz_u_vec,
real_t *restrict u_vec,
bool NA_as_zero_X, bool NA_as_zero_U,
real_t *restrict B, int_t n, int_t ldb,
real_t *restrict C, int_t p,
real_t *restrict Bi, int_t k_main_i, bool add_implicit_features,
real_t *restrict Xones, int_t incXones,
real_t *restrict weight,
real_t lam, real_t w_user, real_t w_implicit, real_t lam_last,
real_t l1_lam, real_t l1_lam_bias,
bool scale_lam, bool scale_lam_sideinfo, bool scale_bias_const, real_t wsum,
real_t *restrict precomputedBtB, int_t cnt_NA_x,
real_t *restrict precomputedCtCw, int_t cnt_NA_u,
real_t *restrict precomputedBeTBeChol, int_t n_BtB,
real_t *restrict precomputedBiTBi,
bool add_X, bool add_U,
bool use_cg, int_t max_cg_steps,/* <- 'cg' should not be used for new data*/
bool nonneg, int_t max_cd_steps,
real_t *restrict bias_BtX, real_t *restrict bias_X, real_t bias_X_glob,
real_t *restrict bias_CtU,
real_t *restrict buffer_real_t
)
{
/* Note: for CG, CtC should not be scaled by 'w_user', but for Cholesky
it should be. In neither case should it have lambda added.
BtB should also never have lambda added. */
/* Potential bad inputs - should not reach this point_t */
if ( ( (Xa_dense != NULL && cnt_NA_x == n) ||
( Xa_dense == NULL && nnz == 0
&& !(NA_as_zero_X && bias_BtX != NULL)) )
&&
( (u_vec != NULL && cnt_NA_u == p) ||
(u_vec == NULL && nnz_u_vec == 0) ) )
{
zero_out:
set_to_zero(a_vec, k_user + k + k_main);
return;
}
if (Xa_dense != NULL && cnt_NA_x == n) {
Xa_dense = NULL;
nnz = 0;
NA_as_zero_X = false;
}
if (u_vec != NULL && cnt_NA_u == p) {
u_vec = NULL;
nnz_u_vec = 0;
NA_as_zero_U = false;
}
if (cnt_NA_x || cnt_NA_u) {
add_X = true;
add_U = true;
}
if (scale_lam || scale_lam_sideinfo)
{
real_t multiplier_lam = 0.;
if (wsum > 0)
{
multiplier_lam = wsum;
}
else
{
if (weight == NULL)
{
if (Xa_dense != NULL)
multiplier_lam = (real_t)(n - cnt_NA_x);
else if (NA_as_zero_X)
multiplier_lam = (real_t)n;
else
multiplier_lam = (real_t)nnz;
}
else
{
wsum = 0.;
if (Xa_dense != NULL) {
for (int_t ix = 0; ix < n; ix++)
wsum += isnan(Xa_dense[ix])? 0. : weight[ix];
}
else {
for (size_t ix = 0; ix < nnz; ix++)
wsum += weight[ix];
}
if (NA_as_zero_X && Xa_dense == NULL)
wsum += (real_t)((size_t)n - nnz);
multiplier_lam = wsum;
if (fabs_t(wsum) < EPSILON_T && bias_BtX == NULL &&
((u_vec != NULL && cnt_NA_u == p) ||
(u_vec == NULL && !nnz_u_vec && bias_CtU == NULL)))
{
goto zero_out;
}
}
if ((Xa_dense != NULL && cnt_NA_x == n) ||
(Xa_dense == NULL && !nnz && !NA_as_zero_X))
{
multiplier_lam = 1;
}
if (scale_lam_sideinfo)
{
if (u_vec != NULL)
multiplier_lam += (real_t)(p - cnt_NA_u);
else if (NA_as_zero_U)
multiplier_lam += (real_t)p;
else
multiplier_lam += (real_t)nnz_u_vec;
}
}
lam *= multiplier_lam;
lam_last *= multiplier_lam;
if (!scale_bias_const) {
l1_lam *= multiplier_lam;
l1_lam_bias *= multiplier_lam;
}
}
int_t k_totA = k_user + k + k_main;
size_t k_totC = k_user + k;
size_t offset_square = (size_t)k_user + (size_t)k_user*(size_t)k_totA;
int_t ld_BtB = k + k_main;
char lo = 'L';
int_t one = 1;
int_t ignore;
if (n_BtB == 0) n_BtB = n;
if (precomputedBeTBeChol != NULL && !nonneg && !l1_lam & !l1_lam_bias &&
( (Xa_dense != NULL && cnt_NA_x == 0 &&
weight == NULL && n_BtB == n) ||
(Xa_dense == NULL && NA_as_zero_X &&
(weight == NULL || nnz == 0)) ) &&
( (u_vec != NULL && cnt_NA_u == 0) ||
(u_vec == NULL && NA_as_zero_U) ))
{
if (add_U && (p || (NA_as_zero_U && bias_CtU != NULL)))
{
if (u_vec != NULL) {
cblas_tgemv(CblasRowMajor, CblasTrans,
p, k_user+k,
add_X? (1.) : (w_user), C, k_user+k,
u_vec, 1,
add_X? 0. : 1., a_vec, 1);
if (k_main && add_X)
set_to_zero(a_vec + k_user+k, k_main);
}
else {
if (add_X)
set_to_zero(a_vec, k_user+k+k_main);
else if (k_user)
set_to_zero(a_vec, k_user);
if (p)
tgemv_dense_sp(
p, k_user+k,
add_X? (1.) : (w_user), C, k_user+k,
u_vec_ixB, u_vec_sp, nnz_u_vec,
a_vec
);
if (bias_CtU != NULL && NA_as_zero_U)
cblas_taxpy(k_user+k, 1., bias_CtU, 1, a_vec, 1);
}
if (w_user != 1. && add_X && (u_vec != NULL || nnz_u_vec))
cblas_tscal(k_user+k, w_user, a_vec, 1);
}
if (add_X)
{
if (!add_U || (!p && !nnz_u_vec &&
!(NA_as_zero_U && bias_CtU != NULL)))
{
set_to_zero(a_vec + k_user+k, k_main);
}
if (Xa_dense != NULL)
cblas_tgemv(CblasRowMajor, CblasTrans,
n, k+k_main,
1., B + k_item, ldb,
Xa_dense, 1,
1., a_vec + k_user, 1);
else
tgemv_dense_sp(
n, k+k_main,
1., B + k_item, (size_t)ldb,
ixB, Xa, nnz,
a_vec + k_user
);
if (add_implicit_features)
{
if (Xa_dense != NULL)
cblas_tgemv(CblasRowMajor, CblasTrans,
n, k+k_main_i,
w_implicit, Bi, k+k_main_i,
Xones, incXones,
1., a_vec + k_user, 1);
else
tgemv_dense_sp(
n, k+k_main_i,
w_implicit, Bi, k+k_main_i,
ixB, Xones, nnz,
a_vec + k_user
);
}
if (bias_BtX != NULL && NA_as_zero_X && Xa_dense == NULL)
cblas_taxpy(k+k_main, 1., bias_BtX, 1, a_vec + k_user, 1);
}
tpotrs_(&lo, &k_totA, &one,
precomputedBeTBeChol, &k_totA,
a_vec, &k_totA,
&ignore);
return;
}
if (!p && !nnz_u_vec && !NA_as_zero_U)
precomputedCtCw = NULL;
#ifdef TEST_CG
if (!nonneg && !l1_lam && !l1_lam_bias)
{
use_cg = true;
max_cg_steps = 10000;
if (add_implicit_features)
{
precomputedBiTBi = (real_t*)malloc((size_t)square(k+k_main_i)
*sizeof(real_t));
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k+k_main_i, n,
1.,
Bi, k+k_main_i,
0., precomputedBiTBi, k+k_main_i);
}
if (precomputedCtCw != NULL)
{
precomputedCtCw = (real_t*)malloc((size_t)square(k_user+k)
*sizeof(real_t));
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k_user+k, p,
1., C, k_user+k,
0., precomputedCtCw, k_user+k);
}
}
#endif
if (add_implicit_features && precomputedBiTBi == NULL)
{
precomputedBiTBi = buffer_real_t;
buffer_real_t += square(k+k_main_i);
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k+k_main_i, n,
use_cg? (1.) : (w_implicit),
Bi, k+k_main_i,
0., precomputedBiTBi, k+k_main_i);
}
if (use_cg && add_X && add_U)
{
collective_block_cg(
a_vec,
k, k_user, k_item, k_main,
Xa_dense,
Xa, ixB, nnz,
u_vec_ixB, u_vec_sp, nnz_u_vec,
u_vec,
NA_as_zero_X, NA_as_zero_U,
B, n, ldb,
C, p,
add_implicit_features,
Xones, incXones,
Bi, precomputedBiTBi, k_main_i,
weight,
lam, w_user, w_implicit, lam_last,
cnt_NA_x, cnt_NA_u,
precomputedBtB, precomputedCtCw,
max_cg_steps,
bias_BtX, bias_X, bias_X_glob,
bias_CtU,
buffer_real_t
);
#ifdef TEST_CG
if (!nonneg && !l1_lam && !l1_lam_bias)
{
free(precomputedCtCw);
if (precomputedBiTBi != NULL && add_implicit_features)
free(precomputedBiTBi);
}
#endif
return;
}
real_t *restrict bufferBeTBe = buffer_real_t;
buffer_real_t += square(k_totA);
set_to_zero(bufferBeTBe, square(k_totA));
if (add_X && add_U)
set_to_zero(a_vec, k_totA);
else if (add_U)
set_to_zero(a_vec, k_user);
else if (add_X)
set_to_zero(a_vec + k_user+k, k_main);
bool prefer_BtB = (cnt_NA_x + (n_BtB-n) < 2*(k+k_main+1)) ||
(nnz > (size_t)(2*(k+k_main+1))) ||
(NA_as_zero_X);
bool prefer_CtC = (cnt_NA_u < 2*(k+k_user)) ||
(nnz_u_vec > (size_t)(2*(k+k_user))) ||
(NA_as_zero_U);
if (precomputedBtB == NULL)
prefer_BtB = false;
if (precomputedCtCw == NULL)
prefer_CtC = false;
/* TODO: for a more cache-friendly version, should move pt1 and pt2
after pt3 and pt4. */
/* =================== Part 1 =====================
Constructing t(Be)*Be, upper-left square (from C) */
if (u_vec != NULL || NA_as_zero_U) /* Dense u_vec */
{
/* If it's full or near full, can use the precomputed matrix
and subtract missing entries from it if necessary */
if (prefer_CtC || NA_as_zero_U)
{
if (precomputedCtCw != NULL) {
copy_mat(k_user+k, k_user+k,
precomputedCtCw, k_user+k,
bufferBeTBe, k_totA);
} else {
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k_user+k, p,
(cnt_NA_u > 0 && u_vec != NULL)? (1.) : (w_user),
C, k_user+k,
0., bufferBeTBe, k_totA);
}
if (cnt_NA_u && u_vec != NULL)
{
for (size_t ix = 0; ix < (size_t)p; ix++)
if (isnan(u_vec[ix]))
{
cblas_tsyr(CblasRowMajor, CblasUpper,
k_user+k,
(precomputedCtCw != NULL)? (-w_user) : (-1.),
C + ix*k_totC, 1,
bufferBeTBe, k_totA);
}
}
if (precomputedCtCw == NULL && w_user != 1. &&
(cnt_NA_u && u_vec != NULL) && p)
{
cblas_tscal(square(k_totA) - k_main*k_totA - k_main,
w_user, bufferBeTBe, 1);
}
}
/* Otherwise, will need to construct it one-by-one */
else
{
for (size_t ix = 0; ix < (size_t)p; ix++)
if (!isnan(u_vec[ix]))
cblas_tsyr(CblasRowMajor, CblasUpper,
k_user+k, 1.,
C + ix*k_totC, 1,
bufferBeTBe, k_totA);
if (w_user != 1.)
cblas_tscal(square(k_totA) - k_main*k_totA - k_main,
w_user, bufferBeTBe, 1);
}
}
else if (nnz_u_vec) /* Sparse u_vec */
{
for (size_t ix = 0; ix < nnz_u_vec; ix++)
cblas_tsyr(CblasRowMajor, CblasUpper,
k_user+k, 1.,
C + (size_t)u_vec_ixB[ix]*k_totC, 1,
bufferBeTBe, k_totA);
if (w_user != 1.)
cblas_tscal(square(k_totA) - k_main*k_totA - k_main,
w_user, bufferBeTBe, 1);
}
/* =================== Part 2 ======================
Constructing t(Be)*Be, lower-right square (from B) */
if ((Xa_dense != NULL && weight == NULL && prefer_BtB) ||
(Xa_dense == NULL && NA_as_zero_X))
{
if (precomputedBtB != NULL) {
sum_mat(k+k_main, k+k_main,
precomputedBtB, ld_BtB,
bufferBeTBe + offset_square, k_totA);
} else {
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k+k_main, n,
1., B + k_item, ldb,
1., bufferBeTBe + offset_square, k_totA);
}
if ((cnt_NA_x || n_BtB > n) && Xa_dense != NULL) {
for (size_t ix = 0; ix < (size_t)n; ix++)
if (isnan(Xa_dense[ix]))
cblas_tsyr(CblasRowMajor, CblasUpper,
k+k_main, -1.,
B + (size_t)k_item + ix*(size_t)ldb, 1,
bufferBeTBe + offset_square, k_totA);
for (size_t ix = (size_t)n; ix < (size_t)n_BtB; ix++)
cblas_tsyr(CblasRowMajor, CblasUpper,
k+k_main, -1.,
B + (size_t)k_item + ix*(size_t)ldb, 1,
bufferBeTBe + offset_square, k_totA);
}
else if (Xa_dense == NULL && NA_as_zero_X && weight != NULL) {
for (size_t ix = 0; ix < nnz; ix++)
cblas_tsyr(CblasRowMajor, CblasUpper,
k+k_main, weight[ix]-1.,
B + (size_t)k_item + (size_t)ixB[ix]*(size_t)ldb, 1,
bufferBeTBe + offset_square, k_totA);
}
/* Note: nothing extra is required when having 'NA_as_zero_X' without
weights, hence the if-else chain stops without checking for it. */
}
else if (Xa_dense != NULL)
{
if (weight == NULL) {
for (size_t ix = 0; ix < (size_t)n; ix++)
if (!isnan(Xa_dense[ix]))
cblas_tsyr(CblasRowMajor, CblasUpper,
k+k_main, 1.,
B + (size_t)k_item + ix*(size_t)ldb, 1,
bufferBeTBe + offset_square, k_totA);
}
else {
for (size_t ix = 0; ix < (size_t)n; ix++)
if (!isnan(Xa_dense[ix]))
cblas_tsyr(CblasRowMajor, CblasUpper,
k+k_main, weight[ix],
B + (size_t)k_item + ix*(size_t)ldb, 1,
bufferBeTBe + offset_square, k_totA);
}
}
else /* Sparse Xa - this is the expected scenario for most use-cases */
{
for (size_t ix = 0; ix < nnz; ix++)
cblas_tsyr(CblasRowMajor, CblasUpper,
k+k_main,
(weight == NULL)? (1.) : (weight[ix]),
B + (size_t)k_item + (size_t)ixB[ix]*(size_t)ldb, 1,
bufferBeTBe + offset_square, k_totA);
}
/* TODO: could add BiTBi to BtB beforehand and save one operation that way,
along with decreasing memory usage. */
if (add_implicit_features)
sum_mat(k+k_main_i, k+k_main_i,
precomputedBiTBi, k+k_main_i,
bufferBeTBe + offset_square, k_totA);
/* ================ Part 3 =================
Constructing Be*t(Xe), upper part (from X) */
if (add_X)
{
if (Xa_dense != NULL)
{
if (weight == NULL && cnt_NA_x == 0)
{
cblas_tgemv(CblasRowMajor, CblasTrans,
n, k+k_main,
1., B + k_item, ldb, Xa_dense, 1,
add_U? 0. : 1., a_vec + k_user, 1);
}
else
{
for (size_t ix = 0; ix < (size_t)n; ix++)
if (!isnan(Xa_dense[ix]))
cblas_taxpy(k+k_main,
((weight == NULL)?
1. : weight[ix]) * Xa_dense[ix],
B + (size_t)k_item + ix*(size_t)ldb, 1,
a_vec + k_user, 1);
}
}
else
{
if (weight == NULL)
tgemv_dense_sp(n, k+k_main,
1., B + k_item, (size_t)ldb,
ixB, Xa, nnz,
a_vec + k_user);
else
for (size_t ix = 0; ix < nnz; ix++)
cblas_taxpy(k+k_main,
(weight[ix]*Xa[ix])
-
(weight[ix]-1.)
*
(bias_X_glob + ((bias_X == NULL)?
0. : bias_X[ixB[ix]])),
B+(size_t)k_item+(size_t)ixB[ix]*(size_t)ldb, 1,
a_vec + k_user, 1);
}
if (add_implicit_features)
{
if (Xa_dense != NULL)
cblas_tgemv(CblasRowMajor, CblasTrans,
n, k+k_main_i,
w_implicit, Bi, k+k_main_i,
Xones, incXones,
1., a_vec + k_user, 1);
else
tgemv_dense_sp(
n, k+k_main_i,
w_implicit, Bi, k+k_main_i,
ixB, Xones, nnz,
a_vec + k_user
);
}
if (Xa_dense == NULL && bias_BtX != NULL && NA_as_zero_X)
cblas_taxpy(k+k_main, 1., bias_BtX, 1, a_vec + k_user, 1);
}
/* TODO: maybe this part should be moved before Part 3, so that it can
scale by 'w_user' in a post-hoc pass for more numerical precision. */
/* ================ Part 4 =================
Constructing Be*t(Xe), lower part (from U) */
if (add_U && (p || (NA_as_zero_U && bias_CtU != NULL)))
{
if (u_vec != NULL && cnt_NA_u == 0)
{
cblas_tgemv(CblasRowMajor, CblasTrans,
p, k_user+k,
w_user, C, k_user+k, u_vec, 1,
1., a_vec, 1);
}
else if (u_vec != NULL)
{
for (size_t ix = 0; ix < (size_t)p; ix++)
if (!isnan(u_vec[ix]))
cblas_taxpy(k_user+k, w_user * u_vec[ix],
C + ix*k_totC, 1,
a_vec, 1);
}
else
{
if (p && nnz_u_vec)
tgemv_dense_sp(p, k_user+k,
w_user, C, k_totC,
u_vec_ixB, u_vec_sp, nnz_u_vec,
a_vec);
if (NA_as_zero_U && bias_CtU != NULL)
cblas_taxpy(k_user+k, 1., bias_CtU, 1, a_vec, 1);
}
}
/* =================== Part 5 ======================
Solving A = inv(t(Be)*Be + diag(lam)) * (Be*t(Xe)) */
add_to_diag(bufferBeTBe, lam, k_totA);
if (lam_last != lam) bufferBeTBe[square(k_totA)-1] += (lam_last - lam);
if (!nonneg && !l1_lam && !l1_lam_bias)
tposv_(&lo, &k_totA, &one,
bufferBeTBe, &k_totA,
a_vec, &k_totA,
&ignore);
else if (!nonneg)
solve_elasticnet(
bufferBeTBe,
a_vec,
buffer_real_t,
k_totA,
l1_lam, l1_lam_bias,
max_cd_steps,
true
);
else
solve_nonneg(
bufferBeTBe,
a_vec,
buffer_real_t,
k_totA,
l1_lam, l1_lam_bias,
max_cd_steps,
true
);
}
void collective_closed_form_block_implicit
(
real_t *restrict a_vec,
int_t k, int_t k_user, int_t k_item, int_t k_main,
real_t *restrict B, int_t n, real_t *restrict C, int_t p,
real_t *restrict Xa, int_t ixB[], size_t nnz,
real_t *restrict u_vec, int_t cnt_NA_u,
real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec,
bool NA_as_zero_U,
real_t lam, real_t l1_lam, real_t w_user,
real_t *restrict bias_CtU,
real_t *restrict precomputedBeTBe,
real_t *restrict precomputedBtB, /* for cg, should NOT have lambda added */
real_t *restrict precomputedBeTBeChol,
real_t *restrict precomputedCtCw,
bool add_U, bool shapes_match,
bool use_cg, int_t max_cg_steps,/* <- 'cg' should not be used for new data*/
bool nonneg, int_t max_cd_steps,
real_t *restrict buffer_real_t
)
{
/* Potential bad inputs - should not reach this point_t */
if (u_vec != NULL && cnt_NA_u == p) {
u_vec = NULL;
nnz_u_vec = 0;
}
if ( (nnz == 0)
&&
((u_vec != NULL && cnt_NA_u == p) ||
(u_vec == NULL && nnz_u_vec == 0))
)
{
set_to_zero(a_vec, k_user + k + k_main);
return;
}
char lo = 'L';
int_t one = 1;
int_t ignore;
int_t k_totA = k_user + k + k_main;
size_t k_totB = k_item + k + k_main;
int_t k_totC = k_user + k;
size_t offset_square = k_user + k_user*k_totA;
int_t ld_BtB = k + k_main;
bool few_NAs = (u_vec != NULL && cnt_NA_u < k_user+k);
if (cnt_NA_u)
add_U = true;
if ((add_U || cnt_NA_u) && !use_cg)
set_to_zero(a_vec, k_totA);
real_t *restrict BtB = buffer_real_t;
bool add_C = false;
if (nnz == 0 && ((u_vec != NULL && cnt_NA_u == 0) || NA_as_zero_U) &&
precomputedBeTBeChol != NULL && (!use_cg || p < k_totA) &&
!nonneg && !l1_lam)
{
if (use_cg || add_U)
{
if (u_vec != NULL) {
cblas_tgemv(CblasRowMajor, CblasTrans,
p, k_user+k,
1., C, k_user+k,
u_vec, 1,
0., a_vec, 1);
if (k_main)
set_to_zero(a_vec + k_user+k, k_main);
}
else {
set_to_zero(a_vec, k_totA);
tgemv_dense_sp(
p, k_user+k,
1., C, k_user+k,
u_vec_ixB, u_vec_sp, nnz_u_vec,
a_vec
);
}
if (w_user != 1.)
cblas_tscal(k_user+k, w_user, a_vec, 1);
if (bias_CtU != NULL)
cblas_taxpy(k_user+k, 1., bias_CtU, 1, a_vec, 1);
}
tpotrs_(&lo, &k_totA, &one,
precomputedBeTBeChol, &k_totA,
a_vec, &k_totA,
&ignore);
return;
}
else if (use_cg)
{
collective_block_cg_implicit(
a_vec,
k, k_user, k_item, k_main,
Xa, ixB, nnz,
u_vec_ixB, u_vec_sp, nnz_u_vec,
u_vec,
NA_as_zero_U,
B, n,
C, p,
lam, w_user,
cnt_NA_u,
max_cg_steps,
bias_CtU,
precomputedBtB,
precomputedCtCw,
buffer_real_t
);
return;
}
buffer_real_t += square(k);
if ((u_vec != NULL && few_NAs) || (u_vec == NULL && NA_as_zero_U))
{
if (precomputedBeTBe != NULL)
memcpy(BtB, precomputedBeTBe,(size_t)square(k_totA)*sizeof(real_t));
else {
set_to_zero(BtB, square(k_totA));
if (precomputedCtCw == NULL)
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k_user+k, p,
w_user, C, k_totC,
0., BtB, k_totA);
else
copy_mat(k_user+k, k_user+k,
precomputedCtCw, k_totC,
BtB, k_totA);
if (precomputedBtB == NULL) {
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k+k_main, n,
1., B + k_item, k_totB,
1., BtB + offset_square, k_totA);
add_to_diag(BtB, lam, k_totA);
} else {
sum_mat(k+k_main, k+k_main,
precomputedBtB, k+k_main,
BtB + offset_square, k_totA);
for (size_t ix = 0; ix < (size_t)k_user; ix++)
BtB[ix + ix*k_totA] += lam;
}
}
}
else
{
add_C = true;
if (ld_BtB != k_totA)
set_to_zero(BtB, square(k_totA));
if (precomputedBtB != NULL) {
copy_mat(ld_BtB, ld_BtB,
precomputedBtB, ld_BtB,
BtB + offset_square, k_totA);
if (ld_BtB != k_totA)
for (int_t ix = 0; ix < k_user; ix++)
BtB[ix + ix*k_totA] += lam;
}
else {
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k+k_main, n,
1., B + k_item, k_totB,
0., BtB + offset_square, k_totA);
add_to_diag(BtB, lam, k_totA);
}
}
/* t(Be)*Be, upper-left square (from C)
AND
Be*t(Xe), lower part (from U) */
if (u_vec == NULL)
{
if (add_C)
{
for (size_t ix = 0; ix < nnz_u_vec; ix++)
cblas_tsyr(CblasRowMajor, CblasUpper,
k_user+k, w_user,
C + (size_t)u_vec_ixB[ix]*(size_t)k_totC, 1,
BtB, k_totA);
}
if (add_U)
{
tgemv_dense_sp(p, k_user+k,
w_user, C, (size_t)k_totC,
u_vec_ixB, u_vec_sp, nnz_u_vec,
a_vec);
if (bias_CtU != NULL)
cblas_taxpy(k_user+k, 1., bias_CtU, 1, a_vec, 1);
}
}
else
{
if (few_NAs && cnt_NA_u)
{
for (size_t ix = 0; ix < (size_t)p; ix++) {
if (isnan(u_vec[ix])) {
cblas_tsyr(CblasRowMajor, CblasUpper,
k_totC, -w_user,
C + ix*(size_t)k_totC, 1,
BtB, k_totA);
}
}
}
else if (add_C)
{
for (size_t ix = 0; ix < (size_t)p; ix++) {
if (!isnan(u_vec[ix]))
cblas_tsyr(CblasRowMajor, CblasUpper,
k_totC, w_user,
C + ix*(size_t)k_totC, 1,
BtB, k_totA);
}
}
if (add_U || cnt_NA_u)
{
if (cnt_NA_u == 0)
{
cblas_tgemv(CblasRowMajor, CblasTrans,
p, k_user+k,
w_user, C, k_user+k, u_vec, 1,
0., a_vec, 1);
}
else
{
set_to_zero(a_vec, k_user+k);
for (size_t ix = 0; ix < (size_t)p; ix++)
if (!isnan(u_vec[ix]))
cblas_taxpy(k_user+k, u_vec[ix],
C + ix*(size_t)k_totC, 1,
a_vec, 1);
if (w_user != 1.)
cblas_tscal(k_user+k, w_user, a_vec, 1);
}
}
}
/* t(Be)*Be, lower-right square (from B)
AND
Be*t(Xe), upper part (from X) */
for (size_t ix = 0; ix < nnz; ix++) {
cblas_taxpy(k + k_main, Xa[ix] + 1.,
B + (size_t)k_item + (size_t)ixB[ix]*k_totB, 1,
a_vec + k_user, 1);
}
for (size_t ix = 0; ix < nnz; ix++) {
cblas_tsyr(CblasRowMajor, CblasUpper, k+k_main,
Xa[ix],
B + (size_t)k_item + (size_t)ixB[ix]*k_totB, 1,
BtB + offset_square, k_totA);
}
if (!nonneg && !l1_lam)
tposv_(&lo, &k_totA, &one,
BtB, &k_totA,
a_vec, &k_totA,
&ignore);
else if (!nonneg)
solve_elasticnet(
BtB,
a_vec,
buffer_real_t,
k_totA,
l1_lam, l1_lam,
max_cd_steps,
true
);
else
solve_nonneg(
BtB,
a_vec,
buffer_real_t,
k_totA,
l1_lam, l1_lam,
max_cd_steps,
true
);
}
void collective_block_cg
(
real_t *restrict a_vec,
int_t k, int_t k_user, int_t k_item, int_t k_main,
real_t *restrict Xa_dense,
real_t *restrict Xa, int_t ixB[], size_t nnz,
int_t u_vec_ixB[], real_t *restrict u_vec_sp, size_t nnz_u_vec,
real_t *restrict u_vec,
bool NA_as_zero_X, bool NA_as_zero_U,
real_t *restrict B, int_t n, int_t ldb,
real_t *restrict C, int_t p,
bool add_implicit_features,
real_t *restrict Xones, int_t incXones,
real_t * restrict Bi, real_t *restrict precomputedBiTBi, int_t k_main_i,
real_t *restrict weight,
real_t lam, real_t w_user, real_t w_implicit, real_t lam_last,
int_t cnt_NA_x, int_t cnt_NA_u,
real_t *restrict precomputedBtB,
real_t *restrict precomputedCtC, /* should NOT be multiplied by 'w_user' */
int_t max_cg_steps,
real_t *restrict bias_BtX, real_t *restrict bias_X, real_t bias_X_glob,
real_t *restrict bias_CtU,
real_t *restrict buffer_real_t
)
{
/* TODO: when using BtB or CtC, should sum from the loop first, then
call symv, for higher numeric precision */
int_t k_totA = k_user + k + k_main;
int_t k_totC = k_user + k;
int_t ld_BtB = k + k_main;
real_t *restrict Ap = buffer_real_t;
real_t *restrict pp = Ap + k_totA;
real_t *restrict r = pp + k_totA;
real_t *restrict wr = NULL;
if (Xa_dense == NULL && NA_as_zero_X && weight != NULL)
wr = r + k_totA; /* has length 'n' */
set_to_zero(r, k_totA);
real_t r_new, r_old;
real_t a, coef;
if (u_vec != NULL && cnt_NA_u == p) {
u_vec = NULL;
nnz_u_vec = 0;
NA_as_zero_U = false;
}
if (Xa_dense != NULL && cnt_NA_x == n) {
Xa_dense = NULL;
nnz = 0;
NA_as_zero_X = false;
}
bool prefer_BtB = (cnt_NA_x < 2*(k+k_main)) ||
(nnz > (size_t)(2*(k+k_main))) ||
(NA_as_zero_X && (k+k_main) < n);
bool prefer_CtC = (cnt_NA_u < 2*(k+k_user)) ||
(nnz_u_vec > (size_t)(2*(k+k_user))) ||
(NA_as_zero_U && (k_user+k) < p);
if (precomputedBtB == NULL)
prefer_BtB = false;
if (precomputedCtC == NULL || (!p && !nnz_u_vec))
prefer_CtC = false;
/* TODO: this function can be simplified. Many of the code paths are
redundant and/or do not provide a speed up - these are a result of
earlier experimentation which was not porperly cleaned up later. */
/* TODO: here should do the parts from C first as then they can be rescaled
after being summed. */
/* t(B)*t(X) - t(B)*B*a */
if (Xa_dense != NULL && cnt_NA_x == 0 && weight == NULL)
{
cblas_tgemv(CblasRowMajor, CblasTrans,
n, k+k_main,
1., B + k_item, ldb,
Xa_dense, 1,
0., r + k_user, 1);
if (prefer_BtB)
cblas_tsymv(CblasRowMajor, CblasUpper, k+k_main,
-1., precomputedBtB, ld_BtB,
a_vec + k_user, 1,
1., r + k_user, 1);
else
for (size_t ix = 0; ix < (size_t)n; ix++) {
coef = cblas_tdot(k + k_main,
B + (size_t)k_item + ix*(size_t)ldb, 1,
a_vec + k_user, 1);
cblas_taxpy(k + k_main, -coef,
B + (size_t)k_item + ix*(size_t)ldb, 1,
r + k_user, 1);
}
}
else if (Xa_dense != NULL)
{
if (weight == NULL && prefer_BtB)
{
for (size_t ix = 0; ix < (size_t)n; ix++)
{
if (isnan(Xa_dense[ix])) {
coef = cblas_tdot(k + k_main,
B + (size_t)k_item + ix*(size_t)ldb, 1,
a_vec + k_user, 1);
cblas_taxpy(k + k_main, coef,
B + (size_t)k_item + ix*(size_t)ldb, 1,
r + k_user, 1);
}
else {
cblas_taxpy(k + k_main,
Xa_dense[ix],
B + (size_t)k_item + ix*(size_t)ldb, 1,
r + k_user, 1);
}
}
cblas_tsymv(CblasRowMajor, CblasUpper, k+k_main,
-1., precomputedBtB, ld_BtB,
a_vec + k_user, 1,
1., r + k_user, 1);
}
else
for (size_t ix = 0; ix < (size_t)n; ix++)
if (!isnan(Xa_dense[ix])) {
coef = cblas_tdot(k + k_main,
B + (size_t)k_item + ix*(size_t)ldb, 1,
a_vec + k_user, 1);
cblas_taxpy(k + k_main,
(-coef + Xa_dense[ix])
*
((weight == NULL)? 1. : weight[ix]),
B + (size_t)k_item + ix*(size_t)ldb, 1,
r + k_user, 1);
}
}
else if (NA_as_zero_X)
{
if (weight == NULL)
{
tgemv_dense_sp(
n, k+k_main,
1., B + k_item, (size_t)ldb,
ixB, Xa, nnz,
r + k_user
);
if (prefer_BtB)
cblas_tsymv(CblasRowMajor, CblasUpper, k+k_main,
-1., precomputedBtB, ld_BtB,
a_vec + k_user, 1,
1., r + k_user, 1);
else
for (size_t ix = 0; ix < (size_t)n; ix++) {
coef = cblas_tdot(k + k_main,
B + (size_t)k_item + ix*(size_t)ldb, 1,
a_vec + k_user, 1);
cblas_taxpy(k + k_main, -coef,
B + (size_t)k_item + ix*(size_t)ldb, 1,
r + k_user, 1);
}
}
else
{
if (prefer_BtB)
{
cblas_tsymv(CblasRowMajor, CblasUpper, k+k_main,
-1., precomputedBtB, ld_BtB,
a_vec + k_user, 1,
0., r + k_user, 1);
for (size_t ix = 0; ix < nnz; ix++)
{
coef = cblas_tdot(k + k_main,
B
+ (size_t)k_item
+ (size_t)ixB[ix]*(size_t)ldb, 1,
a_vec + k_user, 1);
cblas_taxpy(k + k_main,
-(weight[ix]-1.)
*
(coef + bias_X_glob + ((bias_X == NULL)?
0 : bias_X[ixB[ix]]))
+
(weight[ix] * Xa[ix]),
B
+ (size_t)k_item
+ (size_t)ixB[ix]*(size_t)ldb, 1,
r + k_user, 1);
}
}
else
{
cblas_tgemv(CblasRowMajor, CblasNoTrans,
n, k+k_main,
-1., B + k_item, ldb,
a_vec + k_user, 1,
0., wr, 1);
for (size_t ix = 0; ix < nnz; ix++)
wr[ixB[ix]] = weight[ix] * (wr[ixB[ix]] + Xa[ix]);
if (bias_X != NULL) {
for (size_t ix = 0; ix < nnz; ix++)
wr[ixB[ix]] -= (weight[ix] - 1.)
*
(bias_X[ixB[ix]] + bias_X_glob);
}
else if (bias_X_glob != 0.) {
for (size_t ix = 0; ix < nnz; ix++)
wr[ixB[ix]] -= (weight[ix] - 1.) * bias_X_glob;
}
cblas_tgemv(CblasRowMajor, CblasTrans,
n, k+k_main,
1., B + k_item, ldb,
wr, 1,
1., r + k_user, 1);
}
}
if (bias_BtX != NULL)
{
cblas_taxpy(k+k_main, 1., bias_BtX, 1, r + k_user, 1);
}
}
else
{
for (size_t ix = 0; ix < nnz; ix++)
{
coef = cblas_tdot(k + k_main,
B
+ (size_t)k_item
+ (size_t)ixB[ix]*(size_t)ldb, 1,
a_vec + k_user, 1);
cblas_taxpy(k + k_main,
(-coef + Xa[ix]) * ((weight == NULL)? 1. : weight[ix]),
B + (size_t)k_item + (size_t)ixB[ix]*(size_t)ldb, 1,
r + k_user, 1);
}
}
/* t(C)*t(U) - t(C)*C*a */
if (u_vec != NULL && cnt_NA_u == 0)
{
cblas_tgemv(CblasRowMajor, CblasTrans,
p, k_user+k,
w_user, C, k_totC,
u_vec, 1,
1., r, 1);
if (prefer_CtC)
cblas_tsymv(CblasRowMajor, CblasUpper, k_totC,
-w_user, precomputedCtC, k_totC,
a_vec, 1,
1., r, 1);
else
for (size_t ix = 0; ix < (size_t)p; ix++) {
coef = cblas_tdot(k_user+k,
C + ix*(size_t)k_totC, 1,
a_vec, 1);
cblas_taxpy(k_user+k, -w_user * coef,
C + ix*(size_t)k_totC, 1,
r, 1);
}
}
else if (u_vec != NULL)
{
if (prefer_CtC)
{
cblas_tsymv(CblasRowMajor, CblasUpper, k_totC,
-w_user, precomputedCtC, k_totC,
a_vec, 1,
1., r, 1);
for (size_t ix = 0; ix < (size_t)p; ix++)
{
if (isnan(u_vec[ix])) {
coef = cblas_tdot(k_user+k,
C + ix*(size_t)k_totC, 1,
a_vec, 1);
cblas_taxpy(k_user+k, w_user * coef,
C + ix*(size_t)k_totC, 1,
r, 1);
}
else {
cblas_taxpy(k_user+k,
w_user * u_vec[ix],
C + ix*(size_t)k_totC, 1,
r, 1);
}
}
}
else
for (size_t ix = 0; ix < (size_t)p; ix++)
if (!isnan(u_vec[ix])) {
coef = cblas_tdot(k_user+k,
C + ix*(size_t)k_totC, 1,
a_vec, 1);
cblas_taxpy(k_user+k, w_user * (-coef + u_vec[ix]),
C + ix*(size_t)k_totC, 1,
r, 1);
}
}
else if (u_vec == NULL && NA_as_zero_U)
{
tgemv_dense_sp(
p, k_user+k,
w_user, C, k_totC,
u_vec_ixB, u_vec_sp, nnz_u_vec,
r
);
if (prefer_CtC)
cblas_tsymv(CblasRowMajor, CblasUpper, k_totC,
-w_user, precomputedCtC, k_totC,
a_vec, 1,
1., r, 1);
else
for (size_t ix = 0; ix < (size_t)p; ix++) {
coef = cblas_tdot(k_user+k,
C + ix*(size_t)k_totC, 1,
a_vec, 1);
cblas_taxpy(k_user+k, -w_user * coef,
C + ix*(size_t)k_totC, 1,
r, 1);
}
if (bias_CtU != NULL)
cblas_taxpy(k_user+k, 1., bias_CtU, 1, r, 1);
}
else if (u_vec_sp != NULL)
{
for (size_t ix = 0; ix < nnz_u_vec; ix++)
{
coef = cblas_tdot(k_user+k,
C + (size_t)u_vec_ixB[ix]*(size_t)k_totC, 1,
a_vec, 1);
cblas_taxpy(k_user+k, w_user * (-coef + u_vec_sp[ix]),
C + (size_t)u_vec_ixB[ix]*(size_t)k_totC, 1,
r, 1);
}
}
/* t(Bi)*t(Xi) - t(Bi)*Bi*a */
if (add_implicit_features)
{
cblas_tsymv(CblasRowMajor, CblasUpper, k+k_main_i,
-w_implicit, precomputedBiTBi, k+k_main_i,
a_vec + k_user, 1,
1., r + k_user, 1);
if (Xa_dense != NULL)
cblas_tgemv(CblasRowMajor, CblasTrans,
n, k+k_main_i,
w_implicit, Bi, k+k_main_i,
Xones, incXones,
1., r + k_user, 1);
else
tgemv_dense_sp(
n, k+k_main_i,
w_implicit, Bi, k+k_main_i,
ixB, Xones, nnz,
r + k_user
);
}
/* diag(lam) */
cblas_taxpy(k_totA, -lam, a_vec, 1, r, 1);
if (lam != lam_last)
r[k_totA-1] -= (lam_last-lam) * a_vec[k_totA-1];
/* p := r */
copy_arr(r, pp, k_totA);
r_old = cblas_tdot(k_totA, r, 1, r, 1);
#ifdef TEST_CG
if (r_old <= 1e-15)
return;
#else
if (r_old <= 1e-12)
return;
#endif
for (int_t cg_step = 0; cg_step < max_cg_steps; cg_step++)
{
set_to_zero(Ap, k_totA);
/* t(B)*B*p */
if ((Xa_dense != NULL && cnt_NA_x == 0) ||
(Xa_dense == NULL && NA_as_zero_X && weight == NULL))
{
if (weight == NULL && prefer_BtB)
cblas_tsymv(CblasRowMajor, CblasUpper, k+k_main,
1., precomputedBtB, ld_BtB,
pp + k_user, 1,
0., Ap + k_user, 1);
else
for (size_t ix = 0; ix < (size_t)n; ix++)
{
coef = cblas_tdot(k+k_main,
B + (size_t)k_item + ix*(size_t)ldb, 1,
pp + k_user, 1);
cblas_taxpy(k+k_main,
coef * ((weight == NULL)? 1. : weight[ix]),
B + (size_t)k_item + ix*(size_t)ldb, 1,
Ap + k_user, 1);
}
}
else if (Xa_dense == NULL && NA_as_zero_X && weight != NULL)
{
if (prefer_BtB)
{
cblas_tsymv(CblasRowMajor, CblasUpper, k+k_main,
1., precomputedBtB, ld_BtB,
pp + k_user, 1,
0., Ap + k_user, 1);
for (size_t ix = 0; ix < nnz; ix++) {
coef = cblas_tdot(k+k_main,
B
+ (size_t)k_item
+ (size_t)ixB[ix]*(size_t)ldb,
1,
pp + k_user, 1);
cblas_taxpy(k+k_main, (weight[ix] - 1.) * coef,
B
+ (size_t)k_item
+ (size_t)ixB[ix]*(size_t)ldb, 1,
Ap + k_user, 1);
}
}
else
{
cblas_tgemv(CblasRowMajor, CblasNoTrans,
n, k+k_main,
1., B + k_item, ldb,
pp + k_user, 1,
0., wr, 1);
for (size_t ix = 0; ix < nnz; ix++)
wr[ixB[ix]] *= weight[ix];
cblas_tgemv(CblasRowMajor, CblasTrans,
n, k+k_main,
1., B + k_item, ldb,
wr, 1,
0., Ap + k_user, 1);
}
}
else if (Xa_dense != NULL)
{
if (weight == NULL && prefer_BtB)
{
cblas_tsymv(CblasRowMajor, CblasUpper, k+k_main,
1., precomputedBtB, ld_BtB,
pp + k_user, 1,
0., Ap + k_user, 1);
for (size_t ix = 0; ix < (size_t)n; ix++)
if (isnan(Xa_dense[ix])) {
coef = cblas_tdot(k+k_main,
B
+ (size_t)k_item
+ ix*(size_t)ldb, 1,
pp + k_user, 1);
cblas_taxpy(k+k_main, -coef,
B + (size_t)k_item + ix*(size_t)ldb, 1,
Ap + k_user, 1);
}
}
else
for (size_t ix = 0; ix < (size_t)n; ix++)
{
if (!isnan(Xa_dense[ix])) {
coef = cblas_tdot(k+k_main,
B
+ (size_t)k_item
+ ix*(size_t)ldb, 1,
pp + k_user, 1);
cblas_taxpy(k+k_main,
coef * ((weight == NULL)? 1. : weight[ix]),
B + (size_t)k_item + ix*(size_t)ldb, 1,
Ap + k_user, 1);
}
}
}
else
{
for (size_t ix = 0; ix < nnz; ix++)
{
coef = cblas_tdot(k+k_main,
B
+ (size_t)k_item
+ (size_t)ixB[ix]*(size_t)ldb, 1,
pp + k_user, 1);
cblas_taxpy(k+k_main, coef * ((weight == NULL)? 1. : weight[ix]),
B
+ (size_t)k_item
+ (size_t)ixB[ix]*(size_t)ldb, 1,
Ap + k_user, 1);
}
}
/* t(C)*C*p */
if ((u_vec != NULL && cnt_NA_u == 0) ||
(u_vec == NULL && NA_as_zero_U))
{
if (prefer_CtC)
cblas_tsymv(CblasRowMajor, CblasUpper, k_totC,
w_user, precomputedCtC, k_totC,
pp, 1,
1., Ap, 1);
else
for (size_t ix = 0; ix < (size_t)p; ix++) {
coef = cblas_tdot(k_user+k,
C + ix*(size_t)k_totC, 1,
pp, 1);
cblas_taxpy(k_user+k, w_user * coef,
C + ix*(size_t)k_totC, 1,
Ap, 1);
}
}
else if (u_vec != NULL)
{
if (prefer_CtC)
{
cblas_tsymv(CblasRowMajor, CblasUpper, k_user+k,
w_user, precomputedCtC, k_user+k,
pp, 1,
1., Ap, 1);
for (size_t ix = 0; ix < (size_t)p; ix++)
if (isnan(u_vec[ix])) {
coef = cblas_tdot(k_user+k,
C + ix*(size_t)k_totC, 1,
pp, 1);
cblas_taxpy(k_user+k, -w_user * coef,
C + ix*(size_t)k_totC, 1,
Ap, 1);
}
}
else
for (size_t ix = 0; ix < (size_t)p; ix++)
{
if (!isnan(u_vec[ix])) {
coef = cblas_tdot(k_user+k,
C + ix*(size_t)k_totC, 1,
pp, 1);
cblas_taxpy(k_user+k, w_user * coef,
C + ix*(size_t)k_totC, 1,
Ap, 1);
}
}
}
else if (u_vec_sp != NULL)
{
for (size_t ix = 0; ix < nnz_u_vec; ix++)
{
coef = cblas_tdot(k_user+k,
C + (size_t)u_vec_ixB[ix]*(size_t)k_totC, 1,
pp, 1);
cblas_taxpy(k_user+k, w_user * coef,
C + (size_t)u_vec_ixB[ix]*(size_t)k_totC, 1,
Ap, 1);
}
}
/* t(Bi)*Bi*p */
if (add_implicit_features)
{
cblas_tsymv(CblasRowMajor, CblasUpper, k+k_main_i,
w_implicit, precomputedBiTBi, k+k_main_i,
pp + k_user, 1,
1., Ap + k_user, 1);
}
/* diag(lam) */
cblas_taxpy(k_totA, lam, pp, 1, Ap, 1);
if (lam != lam_last)
Ap[k_totA-1] += (lam_last-lam) * pp[k_totA-1];
/* rest of the procedure */
a = r_old / cblas_tdot(k_totA, pp, 1, Ap, 1);
cblas_taxpy(k_totA, a, pp, 1, a_vec, 1);
cblas_taxpy(k_totA, -a, Ap, 1, r, 1);
r_new = cblas_tdot(k_totA, r, 1, r, 1);
#ifdef TEST_CG
if (r_new <= 1e-15)
break;
#else
if (r_new <= 1e-8)
break;
#endif
cblas_tscal(k_totA, r_new / r_old, pp, 1);
cblas_taxpy(k_totA, 1., r, 1, pp, 1);
r_old = r_new;
}
}
void collective_block_cg_implicit
(
real_t *restrict a_vec,
int_t k, int_t k_user, int_t k_item, int_t k_main,
real_t *restrict Xa, int_t ixB[], size_t nnz,
int_t u_vec_ixB[], real_t *restrict u_vec_sp, size_t nnz_u_vec,
real_t *restrict u_vec,
bool NA_as_zero_U,
real_t *restrict B, int_t n,
real_t *restrict C, int_t p,
real_t lam, real_t w_user,
int_t cnt_NA_u,
int_t max_cg_steps,
real_t *restrict bias_CtU,
real_t *restrict precomputedBtB,
real_t *restrict precomputedCtC, /* should NOT be multiplied by weight */
real_t *restrict buffer_real_t
)
{
int_t k_totA = k_user + k + k_main;
int_t k_totC = k_user + k;
size_t ldb = k_item + k + k_main;
int_t ld_BtB = k + k_main;
real_t *restrict Ap = buffer_real_t;
real_t *restrict pp = Ap + k_totA;
real_t *restrict r = pp + k_totA;
set_to_zero(r, k_user);
real_t r_new, r_old;
real_t a, coef;
bool prefer_CtC = (cnt_NA_u < 2*(k+k_user)) ||
(nnz_u_vec > (size_t)(2*(k+k_user)));
if (NA_as_zero_U)
prefer_CtC = true;
if (precomputedCtC == NULL)
prefer_CtC = false;
if (u_vec != NULL && cnt_NA_u == p) {
u_vec = NULL;
nnz_u_vec = 0;
NA_as_zero_U = false;
}
/* t(B)*t(X) - t(B)*B*a */
cblas_tsymv(CblasRowMajor, CblasUpper, k+k_main,
-1., precomputedBtB, ld_BtB,
a_vec + k_user, 1,
0., r + k_user, 1);
for (size_t ix = 0; ix < nnz; ix++)
{
coef = cblas_tdot(k+k_main,
B + (size_t)k_item + (size_t)ixB[ix]*ldb, 1,
a_vec + k_user, 1);
cblas_taxpy(k+k_main,
-(coef - 1.) * Xa[ix] - coef,
B + (size_t)k_item + (size_t)ixB[ix]*ldb, 1,
r + k_user, 1);
}
/* t(C)*t(U) - t(C)*C*a */
if (u_vec != NULL && cnt_NA_u == 0)
{
cblas_tgemv(CblasRowMajor, CblasTrans,
p, k_user+k,
w_user, C, k_totC,
u_vec, 1,
1., r, 1);
if (prefer_CtC)
cblas_tsymv(CblasRowMajor, CblasUpper, k_totC,
-w_user, precomputedCtC, k_totC,
a_vec, 1,
1., r, 1);
else
for (size_t ix = 0; ix < (size_t)p; ix++) {
coef = cblas_tdot(k_user+k,
C + ix*(size_t)k_totC, 1,
a_vec, 1);
cblas_taxpy(k_user+k, -w_user * coef,
C + ix*(size_t)k_totC, 1,
r, 1);
}
}
else if (u_vec != NULL)
{
if (prefer_CtC)
{
cblas_tsymv(CblasRowMajor, CblasUpper, k_totC,
-w_user, precomputedCtC, k_totC,
a_vec, 1,
1., r, 1);
for (size_t ix = 0; ix < (size_t)p; ix++)
{
if (isnan(u_vec[ix])) {
coef = cblas_tdot(k_user+k,
C + ix*(size_t)k_totC, 1,
a_vec, 1);
cblas_taxpy(k_user+k, w_user * coef,
C + ix*(size_t)k_totC, 1,
r, 1);
}
else {
cblas_taxpy(k_user+k,
w_user * u_vec[ix],
C + ix*(size_t)k_totC, 1,
r, 1);
}
}
}
else
for (size_t ix = 0; ix < (size_t)p; ix++)
if (!isnan(u_vec[ix])) {
coef = cblas_tdot(k_user+k,
C + ix*(size_t)k_totC, 1,
a_vec, 1);
cblas_taxpy(k_user+k, w_user * (-coef + u_vec[ix]),
C + ix*(size_t)k_totC, 1,
r, 1);
}
}
else if (u_vec == NULL && NA_as_zero_U)
{
tgemv_dense_sp(
p, k_user+k,
w_user, C, k_totC,
u_vec_ixB, u_vec_sp, nnz_u_vec,
r
);
if (prefer_CtC)
cblas_tsymv(CblasRowMajor, CblasUpper, k_totC,
-w_user, precomputedCtC, k_totC,
a_vec, 1,
1., r, 1);
else
for (size_t ix = 0; ix < (size_t)p; ix++) {
coef = cblas_tdot(k_user+k,
C + ix*(size_t)k_totC, 1,
a_vec, 1);
cblas_taxpy(k_user+k, -w_user * coef,
C + ix*(size_t)k_totC, 1,
r, 1);
}
if (bias_CtU != NULL)
cblas_taxpy(k_user+k, 1., bias_CtU, 1, r, 1);
}
else if (u_vec_sp != NULL)
{
for (size_t ix = 0; ix < nnz_u_vec; ix++)
{
coef = cblas_tdot(k_user+k,
C + (size_t)u_vec_ixB[ix]*(size_t)k_totC, 1,
a_vec, 1);
cblas_taxpy(k_user+k, w_user * (-coef + u_vec_sp[ix]),
C + (size_t)u_vec_ixB[ix]*(size_t)k_totC, 1,
r, 1);
}
}
/* diag(lam) */
cblas_taxpy(k_totA, -lam, a_vec, 1, r, 1);
/* p := r */
copy_arr(r, pp, k_totA);
r_old = cblas_tdot(k_totA, r, 1, r, 1);
#ifdef TEST_CG
if (r_old <= 1e-15)
return;
#else
if (r_old <= 1e-12)
return;
#endif
for (int_t cg_step = 0; cg_step < max_cg_steps; cg_step++)
{
/* t(B)*B*p */
if (k_user)
set_to_zero(Ap, k_user);
cblas_tsymv(CblasRowMajor, CblasUpper, k+k_main,
1., precomputedBtB, ld_BtB,
pp + k_user, 1,
0., Ap + k_user, 1);
for (size_t ix = 0; ix < nnz; ix++) {
coef = cblas_tdot(k+k_main,
B + (size_t)k_item + (size_t)ixB[ix]*ldb, 1,
pp + k_user, 1);
cblas_taxpy(k+k_main,
coef * (Xa[ix] - 1.) + coef,
B + (size_t)k_item + (size_t)ixB[ix]*ldb, 1,
Ap + k_user, 1);
}
/* t(C)*C*p */
if ((u_vec != NULL && cnt_NA_u == 0) ||
(u_vec == NULL && NA_as_zero_U))
{
if (prefer_CtC)
cblas_tsymv(CblasRowMajor, CblasUpper, k_totC,
w_user, precomputedCtC, k_totC,
pp, 1,
1., Ap, 1);
else
for (size_t ix = 0; ix < (size_t)p; ix++) {
coef = cblas_tdot(k_user+k,
C + ix*(size_t)k_totC, 1,
pp, 1);
cblas_taxpy(k_user+k, w_user * coef,
C + ix*(size_t)k_totC, 1,
Ap, 1);
}
}
else if (u_vec != NULL)
{
if (prefer_CtC)
{
cblas_tsymv(CblasRowMajor, CblasUpper, k_user+k,
w_user, precomputedCtC, k_user+k,
pp, 1,
1., Ap, 1);
for (size_t ix = 0; ix < (size_t)p; ix++)
if (isnan(u_vec[ix])) {
coef = cblas_tdot(k_user+k,
C + ix*(size_t)k_totC, 1,
pp, 1);
cblas_taxpy(k_user+k, -w_user * coef,
C + ix*(size_t)k_totC, 1,
Ap, 1);
}
}
else
for (size_t ix = 0; ix < (size_t)p; ix++)
{
if (!isnan(u_vec[ix])) {
coef = cblas_tdot(k_user+k,
C + ix*(size_t)k_totC, 1,
pp, 1);
cblas_taxpy(k_user+k, w_user * coef,
C + ix*(size_t)k_totC, 1,
Ap, 1);
}
}
}
else if (u_vec_sp != NULL)
{
for (size_t ix = 0; ix < nnz_u_vec; ix++)
{
coef = cblas_tdot(k_user+k,
C + (size_t)u_vec_ixB[ix]*(size_t)k_totC, 1,
pp, 1);
cblas_taxpy(k_user+k, w_user * coef,
C + (size_t)u_vec_ixB[ix]*(size_t)k_totC, 1,
Ap, 1);
}
}
/* diag(lam) */
cblas_taxpy(k_totA, lam, pp, 1, Ap, 1);
/* rest of the procedure */
a = r_old / cblas_tdot(k_totA, Ap, 1, pp, 1);
cblas_taxpy(k_totA, a, pp, 1, a_vec, 1);
cblas_taxpy(k_totA, -a, Ap, 1, r, 1);
r_new = cblas_tdot(k_totA, r, 1, r, 1);
#ifdef TEST_CG
if (r_new <= 1e-15)
break;
#else
if (r_new <= 1e-8)
break;
#endif
cblas_tscal(k_totA, r_new / r_old, pp, 1);
cblas_taxpy(k_totA, 1., r, 1, pp, 1);
r_old = r_new;
}
}
/*******************************************************************************
Cold-Start Predictions
----------------------
This function aims at determining the optimal values of a single row of the
A matrix given only information about U and/or Ubin, with no data for X.
The same function works for both implicit and explicit feedback cases
(given that there's no X vector).
The intended purpose is for cold-start recommendations, which are then
obtained by multiplying the obtained vector with the B matrix.
If there are no binary variables, it's possible to use the closed form
solution as explained at the top of this file, otherwise it's posssible
to use a gradient-based approach with the function that calculates the
gradient for a single observation.
Note that the values for U passed to this function must already be centered
by columns (if this step was performed when fitting the model).
See documentation of the single-row gradient function for details
about the input parameters.
This function can be sped-up using precomputed multiplications of C:
(a) inv(t(C)*C + diag(lam))*t(C), if passing a full u_vec with no NAs.
(b) t(C)*C+diag(lam), if passing u_vec with <= 10% NAs.
(c) t(C)*C+diag(lam), if passing sparse u_vec with 'NA_as_zero_U=true'
(this is a different model formulation from the others)
Will return 0 (zero) if everything went correctly, one (1) if it
ran out of memory, (2) if the parameters were invalid (basically, cannot
have 'NA_as_zero_U=true' if there's u_bin_vec and/or Cb.
The obtained factors will be available in 'a_vec', while the obtained bias
will be available in 'a_bias'.
*******************************************************************************/
int_t collective_factors_cold
(
real_t *restrict a_vec,
real_t *restrict u_vec, int_t p,
real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec,
real_t *restrict u_bin_vec, int_t pbin,
real_t *restrict C, real_t *restrict Cb,
real_t *restrict TransCtCinvCt,
real_t *restrict CtCw,
real_t *restrict col_means,
real_t *restrict CtUbias,
int_t k, int_t k_user, int_t k_main,
real_t lam, real_t l1_lam, real_t w_main, real_t w_user,
bool scale_lam_sideinfo,
bool NA_as_zero_U,
bool nonneg
)
{
if (NA_as_zero_U && u_bin_vec != NULL) {
fprintf(stderr, "Cannot use 'NA_as_zero_U' when there is 'u_bin'\n");
fflush(stderr);
return 2;
}
int_t retval = 0;
int_t cnt_NA_u_vec = 0;
int_t cnt_NA_u_bin_vec = 0;
bool free_u_vec = false;
bool free_u_sp = false;
if (u_vec != NULL || (u_vec_sp != NULL && !NA_as_zero_U))
retval = preprocess_vec(&u_vec, p, u_vec_ixB, &u_vec_sp, nnz_u_vec,
0., 0., col_means, (real_t*)NULL, &cnt_NA_u_vec,
&free_u_vec, &free_u_sp);
if (retval != 0) return retval;
if (u_bin_vec != NULL)
cnt_NA_u_bin_vec = count_NAs(u_bin_vec, (size_t)pbin, 1);
if (k_main > 0)
set_to_zero(a_vec + k_user+k, k_main);
real_t *restrict buffer_real_t = NULL;
size_t size_buffer = 0;
if (w_main != 1.) {
lam /= w_main;
l1_lam /= w_main;
w_user /= w_main;
}
/* If there is no data, just return zeros */
if ( ((u_vec != NULL && cnt_NA_u_vec == p) ||
(u_vec == NULL && nnz_u_vec == 0 &&
(CtUbias == NULL || !NA_as_zero_U)))
&&
(u_bin_vec == NULL || cnt_NA_u_bin_vec == pbin) )
{
set_to_zero(a_vec, k_user+k);
goto cleanup;
}
/* If there are no binary variables, solution can be obtained through
closed form */
if (u_bin_vec == NULL)
{
size_buffer = square(k_user + k);
if (nonneg)
size_buffer += k_user + k;
else if (l1_lam)
size_buffer += 3*(k_user+k);
if (TransCtCinvCt != NULL && !nonneg && !l1_lam &&
((cnt_NA_u_vec == 0 && u_vec != NULL) ||
(u_vec == NULL && NA_as_zero_U)))
{
size_buffer = 0;
}
if (size_buffer) {
buffer_real_t = (real_t*)malloc(size_buffer * sizeof(real_t));
if (buffer_real_t == NULL) goto throw_oom;
}
factors_closed_form(a_vec, k_user + k,
C, p, k_user + k,
u_vec, cnt_NA_u_vec==0,
u_vec_sp, u_vec_ixB, nnz_u_vec,
(real_t*)NULL,
buffer_real_t, lam/w_user, lam/w_user,
l1_lam/w_user, l1_lam/w_user, scale_lam_sideinfo,
scale_lam_sideinfo, 0.,
TransCtCinvCt, CtCw, cnt_NA_u_vec, k_user + k,
false, true, w_user, p,
(real_t*)NULL, NA_as_zero_U,
false, 0,
nonneg, max2(k_user+k, (int_t)10*(k_user+k)),
NA_as_zero_U? CtUbias : (real_t*)NULL,
(real_t*)NULL, 0., w_user, true);
}
else
{
/* Otherwise, need to take a gradient-based approach with a solver. */
buffer_real_t = (real_t*)malloc((size_t)max2(p, pbin)*sizeof(real_t));
if (buffer_real_t == NULL) goto throw_oom;
retval = collective_factors_lbfgs(
a_vec,
k, k_user, 0, 0,
u_vec, p,
u_vec_ixB, u_vec_sp, nnz_u_vec,
u_bin_vec, pbin,
cnt_NA_u_vec>0, cnt_NA_u_bin_vec>0,
(real_t*)NULL, 0,
C, Cb,
(real_t*)NULL, (int_t*)NULL, (real_t*)NULL, 0,
(real_t*)NULL,
buffer_real_t,
lam, 1., w_user, lam
);
}
cleanup:
free(buffer_real_t);
if (free_u_vec)
free(u_vec);
if (free_u_sp)
free(u_vec_sp);
return retval;
throw_oom:
{
retval = 1;
goto cleanup;
}
}
int_t collective_factors_cold_implicit
(
real_t *restrict a_vec,
real_t *restrict u_vec, int_t p,
real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec,
real_t *restrict B, int_t n,
real_t *restrict C,
real_t *restrict BeTBe,
real_t *restrict BtB,
real_t *restrict BeTBeChol,
real_t *restrict col_means,
real_t *restrict CtUbias,
int_t k, int_t k_user, int_t k_item, int_t k_main,
real_t lam, real_t l1_lam,
real_t w_main, real_t w_user, real_t w_main_multiplier,
bool NA_as_zero_U,
bool nonneg
)
{
int_t retval = 0;
int_t k_totA = k_user + k + k_main;
int_t cnt_NA_u_vec = 0;
bool free_u_vec = false;
bool free_u_sp = false;
real_t *restrict buffer_real_t = NULL;
size_t size_buffer = square(k_totA);
if (u_vec != NULL || (u_vec_sp != NULL && !NA_as_zero_U))
retval = preprocess_vec(&u_vec, p, u_vec_ixB, &u_vec_sp, nnz_u_vec,
0., 0., col_means, (real_t*)NULL, &cnt_NA_u_vec,
&free_u_vec, &free_u_sp);
if (retval != 0) return retval;
if ((u_vec != NULL && cnt_NA_u_vec == p)
||
(u_vec_sp != NULL && nnz_u_vec == 0))
{
set_to_zero(a_vec, k_totA);
goto cleanup;
}
if (nonneg)
size_buffer += k_totA;
else if (l1_lam)
size_buffer += 3*k_totA;
buffer_real_t = (real_t*)malloc(size_buffer*sizeof(real_t));
if (buffer_real_t == NULL) goto throw_oom;
if (w_main_multiplier != 1.)
w_main *= w_main_multiplier;
if (w_main != 1.) {
w_user /= w_main;
lam /= w_main;
l1_lam /= w_main;
}
collective_closed_form_block_implicit(
a_vec,
k, k_user, 0, k_main,
B, n, C, p,
(real_t*)NULL, (int_t*)NULL, (size_t)0,
u_vec, cnt_NA_u_vec,
u_vec_sp, u_vec_ixB, nnz_u_vec,
NA_as_zero_U,
lam, l1_lam, w_user,
CtUbias,
BeTBe,
BtB,
BeTBeChol,
(real_t*)NULL,
true, true, false, 0,
nonneg, max2(k_totA, (int_t)10*k_totA),
buffer_real_t
);
cleanup:
free(buffer_real_t);
if (free_u_vec)
free(u_vec);
if (free_u_sp)
free(u_vec_sp);
return retval;
throw_oom:
{
retval = 1;
goto cleanup;
}
}
/*******************************************************************************
Warm-Start Predictions
----------------------
Note that the values for U passed to this function must already be centered
by columns (if this step was performed when fitting the model).
See documentation of the single-row gradient function for details
about the input parameters.
Additionally:
- If using user bias, must pass a pointer to a single value in which to
output it ('a_bias').
- Must pass the column means for 'U' if it is passed, as it will center
them here.
- The values for X and U will be modified in-place.
Will return 0 (zero) if everything went correctly, one (1) if it ran
out of memory, two (2) if the parameter combinations were invalid
(basically, cannot pass NAs as zero if there is Ub or Cb).
*******************************************************************************/
int_t collective_factors_warm
(
real_t *restrict a_vec, real_t *restrict a_bias,
real_t *restrict u_vec, int_t p,
real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec,
real_t *restrict u_bin_vec, int_t pbin,
real_t *restrict C, real_t *restrict Cb,
real_t glob_mean, real_t *restrict biasB,
real_t *restrict col_means,
real_t *restrict Xa, int_t ixB[], size_t nnz,
real_t *restrict Xa_dense, int_t n,
real_t *restrict weight,
real_t *restrict B,
real_t *restrict Bi, bool add_implicit_features,
int_t k, int_t k_user, int_t k_item, int_t k_main,
real_t lam, real_t w_main, real_t w_user, real_t w_implicit,real_t lam_bias,
real_t l1_lam, real_t l1_lam_bias,
bool scale_lam, bool scale_lam_sideinfo, bool scale_bias_const,
int_t n_max, bool include_all_X,
real_t *restrict TransBtBinvBt,
real_t *restrict BtXbias,
real_t *restrict BtB,
real_t *restrict BeTBeChol,
real_t *restrict BiTBi,
real_t *restrict CtCw,
real_t *restrict CtUbias,
bool NA_as_zero_U, bool NA_as_zero_X,
bool nonneg,
real_t *restrict B_plus_bias
)
{
if (u_bin_vec != NULL && (NA_as_zero_X || NA_as_zero_U)) {
fprintf(stderr, "Cannot use 'NA_as_zero' when there is 'u_bin'\n");
fflush(stderr);
return 2;
}
if (u_bin_vec != NULL && add_implicit_features) {
fprintf(stderr, "Cannot use implicit features when there is 'u_bin'\n");
fflush(stderr);
return 2;
}
int_t retval = 0;
bool free_BtX = false;
real_t *restrict Xones = NULL;
real_t *restrict buffer_real_t = NULL;
size_t size_buffer;
real_t *restrict a_plus_bias = NULL;
int_t cnt_NA_u_vec = 0;
int_t cnt_NA_u_bin_vec = 0;
int_t cnt_NA_x = 0;
bool free_u_vec = false;
bool free_u_sp = false;
bool free_xdense = false;
bool free_xsp = false;
bool append_bias = (B_plus_bias != NULL && a_bias != NULL);
if (u_bin_vec != NULL)
cnt_NA_u_bin_vec = count_NAs(u_bin_vec, (size_t)pbin, 1);
if (u_vec != NULL || (u_vec_sp != NULL && !NA_as_zero_U))
retval = preprocess_vec(&u_vec, p, u_vec_ixB, &u_vec_sp, nnz_u_vec,
0., 0., col_means, (real_t*)NULL, &cnt_NA_u_vec,
&free_u_vec, &free_u_sp);
if (retval != 0) goto throw_oom;
if (!NA_as_zero_X || Xa_dense != NULL)
retval = preprocess_vec(&Xa_dense, n, ixB, &Xa, nnz,
glob_mean, lam_bias, biasB,
(B_plus_bias == NULL)? a_bias : (real_t*)NULL,
&cnt_NA_x, &free_xdense, &free_xsp);
if (retval != 0) goto throw_oom;
scale_lam = scale_lam || scale_lam_sideinfo;
if (a_bias == NULL) scale_bias_const = false;
if (Xa_dense != NULL || !NA_as_zero_X)
BtXbias = NULL;
/* If there is no data, can just set it to zero */
if (
((Xa_dense != NULL && cnt_NA_x == n) ||
(Xa_dense == NULL && nnz == 0 &&
!(NA_as_zero_X && (BtXbias!= NULL|| glob_mean!= 0.|| biasB != NULL))))
&&
( (u_vec != NULL && cnt_NA_u_vec == p)
||
(u_vec == NULL && nnz_u_vec == 0 &&
(CtUbias == NULL || !NA_as_zero_U))
)
&&
(u_bin_vec == NULL || cnt_NA_u_bin_vec == 0)
)
{
if (append_bias) *a_bias = 0;
set_to_zero(a_vec, k_user + k + k_main);
goto cleanup;
}
/* If there is no 'X' data but there is 'U', should call the cold version */
else if (
!add_implicit_features &&
((Xa_dense != NULL && cnt_NA_x == n) ||
(Xa_dense == NULL && nnz == 0 && !NA_as_zero_X))
)
{
if (append_bias) *a_bias = 0;
retval = collective_factors_cold(
a_vec,
u_vec, p,
u_vec_sp, u_vec_ixB, nnz_u_vec,
u_bin_vec, pbin,
C, Cb,
(real_t*)NULL,
CtCw,
col_means,
CtUbias,
k, k_user, k_main,
lam, l1_lam, w_main, w_user,
scale_lam_sideinfo,
NA_as_zero_U,
nonneg
);
if (retval == 1) goto throw_oom;
goto cleanup;
}
/* Otherwise (expected case), calculate the 'warm' factors */
if (add_implicit_features)
{
if (Xa_dense != NULL)
Xones = (real_t*)malloc((size_t)n*sizeof(real_t));
else
Xones = (real_t*)malloc(nnz*sizeof(real_t));
if (Xones == NULL) goto throw_oom;
if (Xa_dense != NULL)
for (int_t ix = 0; ix < n; ix++)
Xones[ix] = isnan(Xa_dense[ix])? 0. : 1.;
else
for (size_t ix = 0; ix < nnz; ix++)
Xones[ix] = 1.;
}
if (append_bias) {
a_plus_bias = (real_t*)malloc((size_t)(k_user+k+k_main+1)
* sizeof(real_t));
if (a_plus_bias == NULL) goto throw_oom;
}
if (w_main != 1.) {
w_user /= w_main;
w_implicit /= w_main;
lam /= w_main;
lam_bias /= w_main;
l1_lam /= w_main;
l1_lam_bias /= w_main;
w_main = 1.;
}
if (NA_as_zero_X && BtXbias == NULL && (glob_mean != 0. || biasB != NULL))
{
BtXbias = (real_t*)calloc(k+k_main+append_bias, sizeof(real_t));
if (BtXbias == NULL) goto throw_oom;
free_BtX = true;
if (biasB != NULL)
{
if (glob_mean != 0. && n_max > n)
{
sum_by_cols((append_bias? B_plus_bias : B)
+ k_item
+ (size_t)n*(size_t)
(k_item+k+k_main+append_bias),
BtXbias,
n_max - n, k+k_main,
k_item+k+k_main+append_bias, 1);
if (append_bias)
BtXbias[k+k_main] = (real_t)(n_max - n);
cblas_tscal(k+k_main+append_bias, -glob_mean, BtXbias, 1);
}
for (size_t col = 0; col < (size_t)n; col++)
cblas_taxpy(k+k_main+append_bias,
-(biasB[col] + glob_mean),
(append_bias? B_plus_bias : B)
+ (size_t)k_item
+ col*(size_t)(k_item+k+k_main+append_bias), 1,
BtXbias, 1);
}
else if (glob_mean != 0.)
{
sum_by_cols((append_bias? B_plus_bias : B) + k_item, BtXbias,
n_max, k+k_main,
k_item+k+k_main+append_bias, 1);
if (append_bias)
BtXbias[k+k_main] = (real_t)n_max;
cblas_tscal(k+k_main+append_bias, -glob_mean, BtXbias, 1);
}
}
/* If there's no side info, just need to apply the closed-form
on the X data */
if (u_vec == NULL && (nnz_u_vec == 0 && !NA_as_zero_U) &&
u_bin_vec == NULL && !add_implicit_features)
{
size_buffer = square(k + k_main + (int)append_bias);
if (nonneg)
size_buffer += k+k_main+append_bias;
else if (l1_lam || l1_lam_bias)
size_buffer += 3*(k+k_main+append_bias);
if (TransBtBinvBt != NULL && weight == NULL &&
!nonneg && !l1_lam && !l1_lam_bias &&
((cnt_NA_x == 0 && Xa_dense != NULL) ||
(Xa_dense == NULL && NA_as_zero_X && BtXbias == NULL)) )
{
size_buffer = 0;
}
if (size_buffer) {
buffer_real_t = (real_t*)malloc(size_buffer*sizeof(real_t));
if (buffer_real_t == NULL) goto throw_oom;
}
if (k_user > 0) {
if (a_plus_bias == NULL)
set_to_zero(a_vec, k_user);
else
set_to_zero(a_plus_bias, k_user);
}
if (!append_bias)
factors_closed_form(a_vec + k_user, k+k_main,
B + k_item, n, k_item+k+k_main,
Xa_dense, cnt_NA_x==0,
Xa, ixB, nnz,
weight,
buffer_real_t,
lam, lam, l1_lam, l1_lam,
scale_lam, scale_lam, 0.,
TransBtBinvBt, BtB,
cnt_NA_x, k+k_main,
false, false, 1., include_all_X? n_max : n,
(real_t*)NULL, NA_as_zero_X,
false, 0,
nonneg, max2(k+k_main, (int_t)10*(k+k_main)),
BtXbias, biasB, glob_mean, 1.,
true);
else
factors_closed_form(a_plus_bias + k_user, k+k_main+1,
B_plus_bias + k_item, n, k_item+k+k_main+1,
Xa_dense, cnt_NA_x==0,
Xa, ixB, nnz,
weight,
buffer_real_t,
lam, lam_bias, l1_lam, l1_lam_bias,
scale_lam, scale_bias_const, 0.,
TransBtBinvBt, BtB,
cnt_NA_x, k+k_main+1,
false, false, 1., include_all_X? n_max : n,
(real_t*)NULL, NA_as_zero_X,
false, 0,
nonneg, max2(k+k_main+1,(int_t)10*(k+k_main+1)),
BtXbias, biasB, glob_mean, 1.,
true);
}
/* If there are binary variables, there's no closed form solution,
so it will follow a gradient-based optimization approach with
the L-BFGS solver */
else if (u_bin_vec != NULL)
{
size_t size_buffer = max2(p, pbin);
if (Xa_dense != NULL)
size_buffer = max2(size_buffer, (size_t)n);
buffer_real_t = (real_t*)malloc(size_buffer*sizeof(real_t));
if (buffer_real_t == NULL) goto throw_oom;
if (!append_bias)
retval = collective_factors_lbfgs(
a_vec,
k, k_user, k_item, k_main,
u_vec, p,
u_vec_ixB, u_vec_sp, nnz_u_vec,
u_bin_vec, pbin,
cnt_NA_u_vec!=0, cnt_NA_u_bin_vec!=0,
B, n,
C, Cb,
Xa, ixB, weight, nnz,
Xa_dense,
buffer_real_t,
lam, w_main, w_user, lam
);
else
retval = collective_factors_lbfgs(
a_plus_bias,
k, k_user, k_item, k_main+1,
u_vec, p,
u_vec_ixB, u_vec_sp, nnz_u_vec,
u_bin_vec, pbin,
cnt_NA_u_vec!=0, cnt_NA_u_bin_vec!=0,
B_plus_bias, n,
C, Cb,
Xa, ixB, weight, nnz,
Xa_dense,
buffer_real_t,
lam, w_main, w_user, lam_bias
);
}
/* If there's no binary data, can apply the closed form on extended block
matrices Xe and Be, whose composition differs according to the
independent components */
else
{
size_buffer = square(k_user+k+k_main+(int)append_bias);
if (add_implicit_features && BiTBi == NULL)
size_buffer += square(k+k_main);
if (nonneg)
size_buffer += k_user+k+k_main+append_bias;
else if (l1_lam || l1_lam_bias)
size_buffer += 3*(k_user+k+k_main+append_bias);
buffer_real_t = (real_t*)malloc(size_buffer*sizeof(real_t));
if (buffer_real_t == NULL) goto throw_oom;
if (!append_bias)
collective_closed_form_block(
a_vec,
k, k_user, k_item, k_main,
Xa_dense,
Xa, ixB, nnz,
u_vec_ixB, u_vec_sp, nnz_u_vec,
u_vec,
NA_as_zero_X, NA_as_zero_U,
B, n, k_item+k+k_main,
C, p,
Bi, k_main, add_implicit_features,
Xones, 1,
weight,
lam, w_user, w_implicit, lam,
l1_lam, l1_lam,
scale_lam, scale_lam_sideinfo,
scale_bias_const, 0.,
BtB, cnt_NA_x,
CtCw, cnt_NA_u_vec,
BeTBeChol, include_all_X? n_max : n,
BiTBi,
true, true, false, 0,
nonneg, max2(k_user+k+k_main, (int_t)10*(k_user+k+k_main)),
BtXbias, biasB, glob_mean,
CtUbias,
buffer_real_t
);
else
collective_closed_form_block(
a_plus_bias,
k, k_user, k_item, k_main+1,
Xa_dense,
Xa, ixB, nnz,
u_vec_ixB, u_vec_sp, nnz_u_vec,
u_vec,
NA_as_zero_X, NA_as_zero_U,
B_plus_bias, n, k_item+k+k_main+1,
C, p,
Bi, k_main, add_implicit_features,
Xones, 1,
weight,
lam, w_user, w_implicit, lam_bias,
l1_lam, l1_lam_bias,
scale_lam, scale_lam_sideinfo,
scale_bias_const, 0.,
BtB, cnt_NA_x,
CtCw, cnt_NA_u_vec,
BeTBeChol, include_all_X? n_max : n,
BiTBi,
true, true, false, 0,
nonneg, max2(k_user+k+k_main+1, (int_t)10*(k_user+k+k_main+1)),
BtXbias, biasB, glob_mean,
CtUbias,
buffer_real_t
);
retval = 0;
}
if (append_bias) {
memcpy(a_vec, a_plus_bias, (size_t)(k_user+k+k_main)*sizeof(real_t));
*a_bias = a_plus_bias[k_user+k+k_main];
}
cleanup:
free(buffer_real_t);
free(a_plus_bias);
free(Xones);
if (free_BtX)
free(BtXbias);
if (free_u_vec)
free(u_vec);
if (free_u_sp)
free(u_vec_sp);
if (free_xdense)
free(Xa_dense);
if (free_xsp)
free(Xa);
return retval;
throw_oom:
{
retval = 1;
goto cleanup;
}
}
int_t collective_factors_warm_implicit
(
real_t *restrict a_vec,
real_t *restrict u_vec, int_t p,
real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec,
bool NA_as_zero_U,
bool nonneg,
real_t *restrict col_means,
real_t *restrict B, int_t n, real_t *restrict C,
real_t *restrict Xa, int_t ixB[], size_t nnz,
int_t k, int_t k_user, int_t k_item, int_t k_main,
real_t lam, real_t l1_lam, real_t alpha, real_t w_main, real_t w_user,
real_t w_main_multiplier,
real_t *restrict BeTBe,
real_t *restrict BtB,
real_t *restrict BeTBeChol,
real_t *restrict CtUbias
)
{
int_t retval = 0;
int_t cnt_NA_u_vec = 0;
int_t k_totA = k_user + k + k_main;
real_t *restrict buffer_real_t = NULL;
size_t size_buffer = square(k_totA);
bool free_u_vec = false;
bool free_u_sp = false;
bool free_xsp = false;
if (nonneg)
size_buffer += k_totA;
else if (l1_lam)
size_buffer += 3*k_totA;
buffer_real_t = (real_t*)malloc(size_buffer*sizeof(real_t));
if (buffer_real_t == NULL) goto throw_oom;
w_main *= w_main_multiplier;
if (w_main != 1.) {
lam /= w_main;
w_user /= w_main;
}
if (alpha != 1.)
{
real_t *restrict temp = (real_t*)malloc(nnz*sizeof(real_t));
if (temp == NULL) goto throw_oom;
copy_arr(Xa, temp, nnz);
Xa = temp;
free_xsp = true;
tscal_large(Xa, alpha, nnz, 1);
}
if (u_vec != NULL || nnz_u_vec || NA_as_zero_U) {
if (u_vec != NULL || nnz_u_vec)
retval = preprocess_vec(&u_vec, p, u_vec_ixB, &u_vec_sp, nnz_u_vec,
0., 0., col_means, (real_t*)NULL,
&cnt_NA_u_vec, &free_u_vec, &free_u_sp);
if (retval != 0) goto throw_oom;
collective_closed_form_block_implicit(
a_vec,
k, k_user, k_item, k_main,
B, n, C, p,
Xa, ixB, nnz,
u_vec, cnt_NA_u_vec,
u_vec_sp, u_vec_ixB, nnz_u_vec,
NA_as_zero_U,
lam, l1_lam, w_user,
CtUbias,
BeTBe,
BtB,
BeTBeChol,
(real_t*)NULL,
true, true, false, 0,
nonneg, max2(k_totA, (int_t)10*k_totA),
buffer_real_t
);
}
else {
set_to_zero(a_vec, k_user+k+k_main);
factors_implicit_chol(
a_vec + k_user, k+k_main,
B + k_item, (size_t)(k_item+k+k_main),
Xa, ixB, nnz,
lam, l1_lam,
BtB, k+k_main,
nonneg, max2(k_totA, (int_t)10*k_totA),
buffer_real_t
);
}
cleanup:
free(buffer_real_t);
if (free_u_vec)
free(u_vec);
if (free_u_sp)
free(u_vec_sp);
if (free_xsp)
free(Xa);
return retval;
throw_oom:
{
retval = 1;
goto cleanup;
}
}
real_t fun_grad_A_collective
(
real_t *restrict A, real_t *restrict g_A,
real_t *restrict B, real_t *restrict C,
int_t m, int_t m_u, int_t n, int_t p,
int_t k, int_t k_main, int_t k_user, int_t k_item, int_t padding,
real_t *restrict Xfull, bool full_dense,
size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr,
real_t *restrict weight,
size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr,
real_t *restrict U, bool full_dense_u,
real_t lam, real_t w_main, real_t w_user, real_t lam_last,
bool do_B,
int nthreads,
real_t *restrict buffer_real_t
)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ia;
#endif
int_t k_totA = k_user + k + k_main + padding;
int_t k_totB = k_item + k + k_main + padding;
int_t k_totC = k_user + k;
int_t k_totX = k + k_main;
int_t m_max = max2(m, m_u);
real_t f = 0.;
real_t err;
size_t ib;
set_to_zero_(g_A, (size_t)m_max*(size_t)k_totA, nthreads);
if (Xfull != NULL)
{
if (!do_B)
f = fun_grad_Adense(
g_A + k_user,
A + k_user, k_totA,
B + k_item, k_totB,
m, n, k + k_main,
Xfull, weight,
0., w_main, 0.,
false, true,
nthreads,
buffer_real_t
);
else
f = fun_grad_Adense(
g_A + k_user,
B + k_item, k_totB,
A + k_user, k_totA,
n, m, k + k_main,
Xfull, weight,
0., w_main, 0.,
true, true,
nthreads,
buffer_real_t
);
}
else
{
real_t *restrict Ax = A + k_user;
real_t *restrict Bx = B + k_item;
real_t *restrict g_Ax = g_A + k_user;
real_t err_row = 0;
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
shared(Xcsr_p, Xcsr_i, Xcsr, Ax, Bx, g_Ax, \
k_totA, k_totB, weight, w_main) \
private(ib, err) firstprivate(err_row) reduction(+:f)
for (size_t_for ia = 0; ia < (size_t)m; ia++)
{
err_row = 0;
for (size_t ix = Xcsr_p[ia]; ix < Xcsr_p[ia+(size_t)1]; ix++)
{
ib = (size_t)Xcsr_i[ix];
err = cblas_tdot(k_totX, Ax + ia*(size_t)k_totA, 1,
Bx + ib*(size_t)k_totB, 1)
- Xcsr[ix];
err_row += square(err) * ((weight == NULL)? 1. : weight[ix]);
err *= w_main * ((weight == NULL)? 1. : weight[ix]);
cblas_taxpy(k_totX, err, Bx + ib*(size_t)k_totB, 1,
g_Ax + ia*(size_t)k_totA,1);
}
f += err_row;
}
f *= w_main / 2.;
}
if (U != NULL)
{
f += fun_grad_Adense(
g_A,
A, k_totA,
C, k_totC,
m_u, p, k_user + k,
U, (real_t*)NULL,
0., w_user, 0.,
false, false,
nthreads,
buffer_real_t
);
}
else
{
real_t f_user = 0;
real_t err_row = 0;
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
shared(U_csr_p, U_csr_i, U_csr, A, C, \
g_A, k_totA, k_totC, w_user) \
private(ib, err, err_row) reduction(+:f_user)
for (size_t_for ia = 0; ia < (size_t)m_u; ia++)
{
err_row = 0;
for (size_t ix = U_csr_p[ia]; ix < U_csr_p[ia+(size_t)1]; ix++)
{
ib = (size_t)U_csr_i[ix];
err = cblas_tdot(k_totC, A + ia*(size_t)k_totA, 1,
C + ib*(size_t)k_totC, 1)
- U_csr[ix];
err_row += square(err);
cblas_taxpy(k_totC, err * w_user,
C + ib*(size_t)k_totC, 1,
g_A + ia*(size_t)k_totA, 1);
}
f_user += err_row;
}
f += (w_user / 2.) * f_user;
}
real_t f_reg = 0;
add_lam_to_grad_and_fun(&f_reg, g_A, A, m_max, k_totA,
k_totA, lam, nthreads);
if (lam != 0. && lam_last != lam && k_main >= 1) {
cblas_taxpy(m, lam_last-lam, A + k_user + k + k_main, k_totA,
g_A + k_user + k + k_main, k_totA);
f += (lam_last-lam) * cblas_tdot(m, A + k_user + k + k_main, k_totA,
A + k_user + k + k_main, k_totA);
}
return f + (f_reg / 2.);
}
real_t wrapper_fun_grad_Adense_col
(
void *instance,
real_t *x,
real_t *g,
const size_t n,
const real_t step
)
{
data_fun_grad_Adense_col *data = (data_fun_grad_Adense_col*)instance;
return fun_grad_A_collective(
x, g,
data->B, data->C,
data->m, data->m_u, data->n, data->p,
data->k, data->k_main, data->k_user, data->k_item,data->padding,
data->Xfull, data->full_dense,
data->Xcsr_p, data->Xcsr_i, data->Xcsr,
data->weight,
data->U_csr_p, data->U_csr_i, data->U_csr,
data->U, data->full_dense_u,
data->lam, data->w_main, data->w_user, data->lam_last,
data->do_B,
data->nthreads,
data->buffer_real_t
);
}
size_t buffer_size_optimizeA_collective
(
size_t m, size_t m_u, size_t n, size_t p,
size_t k, size_t k_main, size_t k_user,
bool full_dense, bool near_dense, bool some_full, bool do_B,
bool has_dense, bool has_sparse, bool has_weights, bool NA_as_zero_X,
bool has_dense_U, bool has_sparse_U,
bool full_dense_u, bool near_dense_u, bool some_full_u, bool NA_as_zero_U,
bool add_implicit_features, size_t k_main_i,
size_t nthreads,
bool use_cg, bool finalize_chol,
bool nonneg, bool has_l1,
bool keep_precomputed,
bool pass_allocated_BtB,
bool pass_allocated_CtCw,
bool pass_allocated_BeTBeChol,
bool pass_allocated_BiTBi
)
{
if (finalize_chol && use_cg)
{
return max2(
buffer_size_optimizeA_collective(
m, m_u, n, p,
k, k_main, k_user,
full_dense, near_dense, some_full, do_B,
has_dense, has_sparse, has_weights, NA_as_zero_X,
has_dense_U, has_sparse_U,
full_dense_u, near_dense_u, some_full_u, NA_as_zero_U,
add_implicit_features, k_main_i,
nthreads,
true, false,
nonneg, has_l1,
keep_precomputed,
pass_allocated_BtB,
pass_allocated_CtCw,
pass_allocated_BeTBeChol,
pass_allocated_BiTBi
),
buffer_size_optimizeA_collective(
m, m_u, n, p,
k, k_main, k_user,
full_dense, near_dense, some_full, do_B,
has_dense, has_sparse, has_weights, NA_as_zero_X,
has_dense_U, has_sparse_U,
full_dense_u, near_dense_u, some_full_u, NA_as_zero_U,
add_implicit_features, k_main_i,
nthreads,
false, false,
nonneg, has_l1,
keep_precomputed,
pass_allocated_BtB,
pass_allocated_CtCw,
pass_allocated_BeTBeChol,
pass_allocated_BiTBi
)
);
}
size_t m_x = m;
size_t k_totA = k_user + k + k_main;
size_t size_optimizeA = 0;
size_t buffer_size = 0;
size_t buffer_thread = 0;
size_t size_alt = 0;
size_t min_size = 0;
bool will_use_BtB_here = false;
bool will_use_CtC_here = false;
if (nonneg || has_l1)
use_cg = false;
if (!has_dense) do_B = false;
if (m_x > m_u && m_u > 0)
{
if (
!has_weights &&
( (has_dense && (full_dense || near_dense)) ||
(!has_dense && has_sparse && NA_as_zero_X) ) &&
( (has_dense_U && (full_dense_u || near_dense_u)) ||
(!has_dense_U && has_sparse_U && NA_as_zero_U) )
)
{
will_use_BtB_here = true;
}
else {
will_use_BtB_here = true;
if (!has_dense && has_sparse && !NA_as_zero_X)
will_use_BtB_here = false;
if (has_dense && has_weights)
will_use_BtB_here = false;
}
if (will_use_BtB_here && !pass_allocated_BtB)
{
min_size += square(k+k_main);
pass_allocated_BtB = true;
}
if (add_implicit_features && !pass_allocated_BiTBi) {
pass_allocated_BiTBi = true;
min_size += square(k+k_main_i);
}
if (!add_implicit_features)
size_optimizeA = buffer_size_optimizeA(
n, full_dense, near_dense, some_full, do_B,
has_dense, has_weights, NA_as_zero_X,
nonneg, has_l1,
k+k_main, nthreads,
false,
pass_allocated_BtB, keep_precomputed || will_use_BtB_here,
use_cg, finalize_chol
);
else
size_optimizeA = buffer_size_optimizeA_collective(
m_x - m_u, 0, n, 0,
k, k_main, k_user,
full_dense, near_dense, some_full, do_B,
has_dense, has_sparse, has_weights, NA_as_zero_X,
false, false,
false, false, false, false,
add_implicit_features, k_main_i,
nthreads,
use_cg, finalize_chol,
nonneg, has_l1,
keep_precomputed,
pass_allocated_BtB,
false,
pass_allocated_BeTBeChol,
pass_allocated_BiTBi
);
}
else if (m_u > m_x)
{
if (
!has_weights &&
( (has_dense && (full_dense || near_dense)) ||
(!has_dense && has_sparse && NA_as_zero_X) ) &&
( (has_dense_U && (full_dense_u || near_dense_u)) ||
(!has_dense_U && has_sparse_U && NA_as_zero_U) )
)
{
will_use_CtC_here = true;
}
else {
will_use_CtC_here = true;
if (!has_dense_U && has_sparse_U && !NA_as_zero_U)
will_use_CtC_here = false;
}
if (will_use_CtC_here && !pass_allocated_CtCw) {
min_size += square(k_user+k);
pass_allocated_CtCw = true;
}
if (!add_implicit_features)
size_optimizeA = buffer_size_optimizeA(
p, full_dense_u, near_dense_u, some_full_u, false,
has_dense_U, false, NA_as_zero_U,
nonneg, has_l1,
k_user+k, nthreads,
false,
pass_allocated_CtCw, keep_precomputed || will_use_CtC_here,
use_cg, finalize_chol
);
else {
if (!pass_allocated_BiTBi) {
min_size += square(k+k_main_i);
pass_allocated_BiTBi = true;
}
size_t m_diff = m_u - m + 2; /* <- extra padding just in case */
if (sizeof(size_t) > sizeof(real_t))
m_diff *= (size_t)ceill((long double)(sizeof(size_t))
/
(long double)(sizeof(real_t)));
size_optimizeA = m_diff;
size_optimizeA += buffer_size_optimizeA_collective(
m_diff, m_diff, n, p,
k, k_main_i, k_user,
false, false, false, false,
false, true, false, true,
has_dense_U, has_sparse_U,
full_dense_u, near_dense_u, some_full_u, NA_as_zero_U,
false, 0,
nthreads,
use_cg, finalize_chol,
nonneg, has_l1,
keep_precomputed,
true,
pass_allocated_CtCw,
pass_allocated_BeTBeChol,
false
);
}
}
if (
!has_weights &&
( (has_dense && (full_dense || near_dense)) ||
(!has_dense && has_sparse && NA_as_zero_X) ) &&
( (has_dense_U && (full_dense_u || near_dense_u)) ||
(!has_dense_U && has_sparse_U && NA_as_zero_U) ||
(!p) )
)
{
/* TODO: here can decrease memory usage by determining when will
the BtB and CtC matrices be filled from 'optimizeA' */
// bool filled_BtB = false || will_use_BtB_here;
// bool filled_CtCw = false || will_use_CtC_here;
bool filled_BtB = true;
bool filled_CtCw = true;
if (add_implicit_features)
{
if (!pass_allocated_BiTBi)
buffer_size += square(k+k_main_i);
}
if ( ((has_dense && full_dense) ||
(!has_dense && NA_as_zero_X)) &&
((has_dense_U && full_dense_u) ||
(!has_dense_U && NA_as_zero_U)) &&
!(filled_BtB) && !(filled_CtCw) && !keep_precomputed )
{
if (pass_allocated_BeTBeChol)
buffer_size += 0;
else
buffer_size += square(k_user+k+k_main);
}
else
{
if (pass_allocated_BtB)
buffer_size += 0;
else {
buffer_size += square(k+k_main);
}
if (pass_allocated_CtCw)
buffer_size += 0;
else if (p) {
buffer_size += square(k_user+k);
}
if (pass_allocated_BeTBeChol)
size_alt += 0;
else
size_alt += square(k_user+k+k_main);
}
if (nonneg)
size_alt += k_totA;
else if (has_l1)
size_alt += (size_t)3*k_totA*nthreads;
if ((has_dense && !full_dense) || (has_dense_U && !full_dense_u))
{
if (do_B)
buffer_thread += n;
buffer_thread += use_cg? (3*k_totA) : (square(k_totA));
if (use_cg && NA_as_zero_X && !has_dense && (k+k_main) >= n)
buffer_thread += n;
if (nonneg)
buffer_thread += k_totA;
else if (has_l1)
buffer_thread += 3*k_totA;
buffer_thread *= nthreads;
}
buffer_thread = max2(buffer_thread, size_alt);
buffer_size += buffer_thread;
}
else
{
bool prefer_BtB = true;
bool prefer_CtC = true;
if (!has_dense && has_sparse && !NA_as_zero_X)
prefer_BtB = false;
if (has_dense && has_weights)
prefer_BtB = false;
if (!has_dense_U && has_sparse_U && !NA_as_zero_U)
prefer_CtC = false;
if (add_implicit_features)
{
if (!pass_allocated_BiTBi)
buffer_size += square(k+k_main_i);
}
if (prefer_BtB)
{
if (pass_allocated_BtB)
buffer_size += 0;
else {
buffer_size += square(k+k_main);
}
if (!nonneg
&&
prefer_CtC
&&
(NA_as_zero_X || ((has_dense && !has_weights) &&
(near_dense || full_dense)) )
&&
((has_dense_U && (near_dense_u || full_dense_u)) ||
(!has_dense_U && NA_as_zero_U) || (!p)))
{
if (pass_allocated_BeTBeChol)
buffer_size += 0;
else {
buffer_size += square(k_user+k+k_main);
}
}
}
if (prefer_CtC)
{
if (pass_allocated_CtCw)
buffer_size += 0;
else {
buffer_size += square(k_user+k);
}
}
if (do_B)
buffer_size += n * nthreads;
if (do_B && has_weights)
buffer_size += n * (nthreads+1);
buffer_thread += use_cg? (3*k_totA) : (square(k_totA));
if (nonneg)
buffer_thread += k_totA;
else if (has_l1)
buffer_thread += (size_t)3*k_totA;
if (use_cg && !has_dense && NA_as_zero_X && (k+k_main) >= n)
buffer_thread += n;
buffer_size += buffer_thread * nthreads;
}
return max2(buffer_size, size_optimizeA) + min_size;
}
size_t buffer_size_optimizeA_collective_implicit
(
size_t m, size_t m_u, size_t p,
size_t k, size_t k_main, size_t k_user,
bool has_sparse_U,
bool NA_as_zero_U,
size_t nthreads,
bool use_cg,
bool nonneg, bool has_l1,
bool pass_allocated_BtB,
bool pass_allocated_BeTBe,
bool pass_allocated_BeTBeChol,
bool pass_allocated_CtC,
bool finalize_chol
)
{
if (finalize_chol)
{
return max2(
buffer_size_optimizeA_collective_implicit(
m, m_u, p,
k, k_main, k_user,
has_sparse_U,
NA_as_zero_U,
nthreads,
true,
nonneg, has_l1,
pass_allocated_BtB,
pass_allocated_BeTBe,
pass_allocated_BeTBeChol,
pass_allocated_CtC,
false
),
buffer_size_optimizeA_collective_implicit(
m, m_u, p,
k, k_main, k_user,
has_sparse_U,
NA_as_zero_U,
nthreads,
false,
nonneg, has_l1,
pass_allocated_BtB,
pass_allocated_BeTBe,
pass_allocated_BeTBeChol,
pass_allocated_CtC,
false
)
);
}
size_t k_totA = k_user + k + k_main;
size_t size_buffer = 0;
if (!pass_allocated_BtB)
size_buffer += square(k+k_main);
size_t size_from_single = 0;
if (m > m_u)
size_from_single = buffer_size_optimizeA_implicit(
k + k_main, nthreads,
true,
nonneg, has_l1,
use_cg, finalize_chol
);
bool precomputedBeTBeChol_is_NULL = true;
if (m_u > m &&
!(has_sparse_U && !NA_as_zero_U) &&
(!use_cg || p > 2 * k_totA))
{
precomputedBeTBeChol_is_NULL = false;
if (!pass_allocated_BeTBeChol)
size_buffer += square(k_totA);
}
bool prefer_CtC = !(has_sparse_U && !NA_as_zero_U);
if (use_cg && prefer_CtC)
{
if (!pass_allocated_CtC)
size_buffer += square(k_user+k);
}
if (!use_cg || !precomputedBeTBeChol_is_NULL)
{
if (!pass_allocated_BeTBe)
size_buffer += square(k_totA);
}
size_t size_buffer_thread = use_cg? ((size_t)3 * k_totA) : (square(k_totA));
if (nonneg)
size_buffer_thread += k_totA;
else if (has_l1)
size_buffer_thread += (size_t)3*k_totA;
size_buffer += nthreads * size_buffer_thread;
size_buffer = max2(size_buffer, size_from_single);
return size_buffer;
}
void optimizeA_collective
(
real_t *restrict A, int_t lda, real_t *restrict B, int_t ldb,
real_t *restrict C,
real_t *restrict Bi,
int_t m, int_t m_u, int_t n, int_t p,
int_t k, int_t k_main, int_t k_user, int_t k_item,
size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr,
real_t *restrict Xfull, int_t ldX,
bool full_dense, bool near_dense, bool some_full,
int_t cnt_NA_x[], real_t *restrict weight, bool NA_as_zero_X,
real_t *restrict Xones, int_t k_main_i, int_t ldXones,
bool add_implicit_features,
size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr,
real_t *restrict U, int_t cnt_NA_u[], real_t *restrict U_colmeans,
bool full_dense_u, bool near_dense_u, bool some_full_u, bool NA_as_zero_U,
real_t lam, real_t w_user, real_t w_implicit, real_t lam_last,
real_t l1_lam, real_t l1_lam_bias,
bool scale_lam, bool scale_lam_sideinfo,
bool scale_bias_const, real_t *restrict wsumA,
bool do_B,
int nthreads,
bool use_cg, int_t max_cg_steps,
bool nonneg, int_t max_cd_steps,
real_t *restrict bias_restore,
real_t *restrict bias_BtX, real_t *restrict bias_X, real_t bias_X_glob,
bool keep_precomputed,
real_t *restrict precomputedBtB,
real_t *restrict precomputedCtCw,
real_t *restrict precomputedBeTBeChol,
real_t *restrict precomputedBiTBi,
real_t *restrict precomputedCtUbias,
bool *filled_BtB, bool *filled_CtCw,
bool *filled_BeTBeChol, bool *filled_CtUbias,
bool *CtC_is_scaled,
real_t *restrict buffer_real_t
)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
char lo = 'L';
int_t ignore = 0;
bool ignore_bool = false;
bool ignore_bool2 = false;
bool ignore_bool3 = false;
bool ignore_bool4 = false;
#ifdef TEST_CG
use_cg = true;
max_cg_steps = 10000;
set_to_zero_(A, (size_t)max2(m, m_u)*(size_t)lda - (size_t)(lda-k_totA),
nthreads);
#endif
if (nonneg || l1_lam || l1_lam_bias)
use_cg = false;
*filled_BtB = false;
*filled_CtCw = false;
*filled_BeTBeChol = false;
*filled_CtUbias = false;
bool filled_BiTBi = false;
*CtC_is_scaled = false;
real_t multiplier_lam = scale_lam_sideinfo? (n+p) : (scale_lam? n : 1);
real_t scaled_lam = lam;
real_t scaled_lam_last = lam_last;
real_t scaled_l1_lam = l1_lam;
real_t scaled_l1_lam_last = l1_lam_bias;
if (multiplier_lam != 1.)
{
scaled_lam *= multiplier_lam;
scaled_lam_last *= multiplier_lam;
if (!scale_bias_const) {
scaled_l1_lam *= multiplier_lam;
scaled_l1_lam_last *= multiplier_lam;
}
}
/* TODO: could reduce number of operations and save memory by determining
when the BiTBi matrix could be added to BtB and when not. */
int_t k_totA = k_user + k + k_main;
int_t k_totC = k_user + k;
int_t k_pred = k_user + k + k_main;
int_t m_x = m; /* 'm' will be overwritten later */
size_t offset_square = k_user + k_user*(size_t)k_totA;
if (Xfull == NULL) do_B = false;
/* TODO: here should only need to set straight away the lower half,
and only when there are un-even entries in each matrix */
bool zeroed_out_A = false;
if (!use_cg || nonneg || l1_lam || l1_lam_bias)
{
set_to_zero_(A, (size_t)max2(m, m_u)*(size_t)lda - (size_t)(lda-k_totA),
nthreads);
zeroed_out_A = true;
}
/* If one of the matrices has more rows than the other, the rows
for the larger matrix will be independent and can be obtained
from the single-matrix formula instead.
Note: if using 'NA_as_zero_X', m_x >= m_u,
whereas if using 'NA_as_zero_U', m_x <= m_u,
and if using both, then m_x == m_u. */
/* TODO: refactor this, maybe move it after the end */
if (m_x > m_u && m_u > 0)
{
bool will_use_BtB_here = false;
if (
weight == NULL &&
( (Xfull != NULL && (full_dense || near_dense)) ||
(Xfull == NULL && Xcsr_p != NULL && NA_as_zero_X) ) &&
( (U != NULL && (full_dense_u || near_dense_u)) ||
(U == NULL && U_csr_p != NULL && NA_as_zero_U) )
)
{
will_use_BtB_here = true;
}
else {
will_use_BtB_here = true;
if (Xfull == NULL && Xcsr_p != NULL && !NA_as_zero_X)
will_use_BtB_here = false;
if (Xfull != NULL && weight != NULL)
will_use_BtB_here = false;
}
if (will_use_BtB_here && precomputedBtB == NULL)
{
precomputedBtB = buffer_real_t;
buffer_real_t += square(k+k_main);
}
if (add_implicit_features && precomputedBiTBi == NULL)
{
precomputedBiTBi = buffer_real_t;
buffer_real_t += square(k+k_main_i);
}
int_t m_diff = m - m_u;
if (!add_implicit_features)
optimizeA(
A + (size_t)k_user + (size_t)m_u*(size_t)lda, lda,
B + k_item, ldb,
m_diff, n, k + k_main,
(Xfull != NULL)? ((size_t*)NULL) : (Xcsr_p + m_u),
(Xfull != NULL)? ((int_t*)NULL) : Xcsr_i,
(Xfull != NULL)? ((real_t*)NULL) : Xcsr,
(Xfull == NULL)?
((real_t*)NULL)
:
(do_B? (Xfull + m_u) : (Xfull + (size_t)m_u*(size_t)n)),
ldX,
full_dense, near_dense, some_full,
(Xfull == NULL)? ((int_t*)NULL) : (cnt_NA_x + m_u),
(weight == NULL)? ((real_t*)NULL)
: ( (Xfull == NULL)?
(weight)
:
(do_B?
(weight + m_u)
:
(weight + (size_t)m_u*(size_t)n)) ),
NA_as_zero_X,
lam, lam_last,
l1_lam, l1_lam_bias,
scale_lam, scale_bias_const,
(weight == NULL || wsumA == NULL)? (real_t*)NULL : (wsumA+m_u),
false,
nthreads,
use_cg, max_cg_steps,
nonneg, max_cd_steps,
(bias_restore == NULL)? (real_t*)NULL : (bias_restore + m_u),
bias_BtX, bias_X, bias_X_glob, (real_t*)NULL, 1.,
keep_precomputed || will_use_BtB_here,
precomputedBtB,
(keep_precomputed || will_use_BtB_here)?
filled_BtB : &ignore_bool,
buffer_real_t
);
else
optimizeA_collective(
A + (size_t)m_u*(size_t)lda, lda, B, ldb,
(real_t*)NULL,
Bi,
m_diff, 0, n, 0,
k, k_main, k_user, k_item,
(Xfull != NULL)? ((size_t*)NULL) : (Xcsr_p + m_u),
(Xfull != NULL)? ((int_t*)NULL) : Xcsr_i,
(Xfull != NULL)? ((real_t*)NULL) : Xcsr,
(Xfull == NULL)?
((real_t*)NULL)
:
(do_B? (Xfull + m_u) : (Xfull + (size_t)m_u*(size_t)n)),
ldX,
full_dense, near_dense, some_full,
(Xfull == NULL)? ((int_t*)NULL) : (cnt_NA_x + m_u),
(weight == NULL)? ((real_t*)NULL)
: ( (Xfull == NULL)?
(weight)
:
(do_B?
(weight + m_u)
:
(weight+(size_t)m_u*(size_t)ldXones)) ),
NA_as_zero_X,
(Xfull == NULL)?
(Xones)
:
(do_B? (Xones + m_u) : (Xones+(size_t)m_u*(size_t)ldXones)),
k_main_i, ldXones, add_implicit_features,
(size_t*)NULL, (int_t*)NULL, (real_t*)NULL,
(real_t*)NULL, (int_t*)NULL, (real_t*)NULL,
false, false, false, false,
lam, w_user, w_implicit, lam_last,
l1_lam, l1_lam_bias,
scale_lam, false,
scale_bias_const,
(weight == NULL || wsumA == NULL)? (real_t*)NULL : (wsumA+m_u),
do_B,
nthreads,
use_cg, max_cg_steps,
nonneg, max_cd_steps,
(bias_restore == NULL)? (real_t*)NULL : (bias_restore + m_u),
bias_BtX, bias_X, bias_X_glob,
keep_precomputed,
precomputedBtB,
(real_t*)NULL,
precomputedBeTBeChol,
precomputedBiTBi,
(real_t*)NULL,
filled_BtB, &ignore_bool,
&ignore_bool2, &ignore_bool3,
&ignore_bool4,
buffer_real_t
);
m_x = m_u;
}
else if (m_u > m_x)
{
bool will_use_CtC_here = false;
if (
weight == NULL &&
( (Xfull != NULL && (full_dense || near_dense)) ||
(Xfull == NULL && Xcsr_p != NULL && NA_as_zero_X) ) &&
( (U != NULL && (full_dense_u || near_dense_u)) ||
(U == NULL && U_csr_p != NULL && NA_as_zero_U) )
)
{
will_use_CtC_here = true;
}
else {
will_use_CtC_here = true;
if (U == NULL && U_csr_p != NULL && !NA_as_zero_U)
will_use_CtC_here = false;
}
if (will_use_CtC_here && precomputedCtCw == NULL) {
precomputedCtCw = buffer_real_t;
buffer_real_t += square(k_user+k);
}
int_t m_diff = m_u - m;
if (!add_implicit_features)
{
optimizeA(
A + (size_t)m*(size_t)lda, lda,
C, k_totC,
m_diff, p, k_user + k,
(U != NULL)? ((size_t*)NULL) : (U_csr_p + m),
(U != NULL)? ((int_t*)NULL) : U_csr_i,
(U != NULL)? ((real_t*)NULL) : U_csr,
(U == NULL)? ((real_t*)NULL) : (U + (size_t)m*(size_t)p),
p,
full_dense_u, near_dense_u, some_full_u,
(U == NULL)? ((int_t*)NULL) : (cnt_NA_u + m),
(real_t*)NULL,
NA_as_zero_U,
lam/w_user, lam/w_user,
l1_lam/w_user, l1_lam/w_user,
scale_lam, false, (real_t*)NULL,
false,
nthreads,
use_cg, max_cg_steps,
nonneg, max_cd_steps,
(real_t*)NULL,
(U_colmeans == NULL)? (real_t*)NULL : precomputedCtUbias,
(real_t*)NULL, 0.,
(real_t*)NULL, w_user,
keep_precomputed || will_use_CtC_here,
precomputedCtCw,
(keep_precomputed || will_use_CtC_here)?
filled_CtCw : &ignore_bool,
buffer_real_t
);
}
else
{
if (precomputedBiTBi == NULL)
{
precomputedBiTBi = buffer_real_t;
buffer_real_t += square(k+k_main_i);
}
/* TODO: find a faster way of doing this that wouldn't involve
iterating over 'n' if not required. */
size_t *buffer_empty_csr_p = (size_t*)buffer_real_t;
memset(buffer_empty_csr_p, 0, (size_t)(m_diff+1)*sizeof(size_t));
optimizeA_collective(
A + (size_t)m*(size_t)lda, lda,
Bi, k+k_main_i,
C,
(real_t*)NULL,
m_diff, m_diff, n, p,
k, k_main_i, k_user, 0,
buffer_empty_csr_p, (int_t*)NULL, (real_t*)NULL,
(real_t*)NULL, 0, false, false, false,
(int_t*)NULL, (real_t*)NULL, true,
(real_t*)NULL, 0, 0,
false,
(U != NULL)? ((size_t*)NULL) : (U_csr_p + m),
(U != NULL)? ((int_t*)NULL) : U_csr_i,
(U != NULL)? ((real_t*)NULL) : U_csr,
(U == NULL)? ((real_t*)NULL) : (U + (size_t)m*(size_t)p),
(U == NULL)? ((int_t*)NULL) : (cnt_NA_u + m),
U_colmeans,
full_dense_u, near_dense_u, some_full_u, NA_as_zero_U,
lam/w_implicit, w_user/w_implicit, 1., lam/w_implicit,
(l1_lam/w_implicit) / (real_t)(scale_lam_sideinfo? n : 1),
(l1_lam/w_implicit) / (real_t)(scale_lam_sideinfo? n : 1),
false || scale_lam_sideinfo, scale_lam_sideinfo,
false, (real_t*)NULL,
false,
nthreads,
use_cg, max_cg_steps,
nonneg, max_cd_steps,
(real_t*)NULL,
(real_t*)NULL, (real_t*)NULL, 0.,
true,
precomputedBiTBi,
precomputedCtCw,
precomputedBeTBeChol,
(real_t*)NULL,
precomputedCtUbias,
&filled_BiTBi, filled_CtCw, &ignore_bool, filled_CtUbias,
CtC_is_scaled,
(real_t*)(((size_t*)buffer_real_t) + (m_diff + 1))
);
}
if (add_implicit_features && w_implicit != 1.)
{
if (filled_BiTBi)
cblas_tscal(square(k+k_main_i), w_implicit, precomputedBiTBi,1);
if (*filled_CtCw && precomputedCtCw != NULL && *CtC_is_scaled)
cblas_tscal(square(k_user+k), w_implicit, precomputedCtCw, 1);
}
if (precomputedCtCw == NULL)
{
*filled_CtCw = false;
*CtC_is_scaled = false;
}
if (!(*filled_CtCw))
*CtC_is_scaled = false;
m_u = m_x;
}
m = max2(m_x, m_u); /* <- should be equal by this point */
if (U == NULL && U_csr_p != NULL && NA_as_zero_U && U_colmeans != NULL &&
!(*filled_CtUbias))
{
cblas_tgemv(CblasRowMajor, CblasTrans, p, k_user+k,
w_user, C, k_user+k,
U_colmeans, 1,
0., precomputedCtUbias, 1);
*filled_CtUbias = true;
}
if (U_colmeans == NULL)
precomputedCtUbias = NULL;
/* Case 1: both matrices are either (a) dense with few missing values and
no weights, or (b) sparse with missing-as-zero.
Here can use the closed-form solution on all the observations
at once, and then do corrections one-by-one if there are any
missing values. */
if (
weight == NULL &&
( (Xfull != NULL && (full_dense || near_dense)) ||
(Xfull == NULL && Xcsr_p != NULL && NA_as_zero_X) ) &&
( (U != NULL && (full_dense_u || near_dense_u)) ||
(U == NULL && U_csr_p != NULL && NA_as_zero_U) ||
(!p && U_csr_p == NULL) )
)
{
real_t *restrict bufferBtB = NULL;
real_t *restrict bufferCtC = NULL;
real_t *restrict bufferBeTBeChol = NULL;
real_t *restrict bufferBiTBi = NULL;
if (add_implicit_features)
{
/* Note: this won't be needed if next condition is met */
if (precomputedBiTBi == NULL) {
bufferBiTBi = buffer_real_t;
buffer_real_t += square(k+k_main_i);
} else {
bufferBiTBi = precomputedBiTBi;
}
}
if ( ((Xfull != NULL && full_dense) ||
(Xfull == NULL && NA_as_zero_X)) &&
((U != NULL && full_dense_u) ||
(U == NULL && NA_as_zero_U) || (!p && U_csr_p == NULL)) &&
!(*filled_BtB) && !(*filled_CtCw) && !filled_BiTBi &&
!keep_precomputed )
{
if (add_implicit_features &&
bufferBiTBi == buffer_real_t - square(k+k_main_i))
{
buffer_real_t -= square(k+k_main_i);
}
if (precomputedBeTBeChol != NULL)
bufferBeTBeChol = precomputedBeTBeChol;
else
bufferBeTBeChol = buffer_real_t;
/* Note: buffer_real_t won't be used any further if reaching here */
build_BeTBe(
bufferBeTBeChol,
B, ldb, C,
k, k_user, k_main, k_item,
n, p,
scaled_lam,
w_user
);
if (add_implicit_features)
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k+k_main_i, n,
w_implicit, Bi, k+k_main_i,
1., bufferBeTBeChol + offset_square, k_totA);
if (lam_last != lam)
bufferBeTBeChol[square(k_totA)-1]
+=
(scaled_lam_last-scaled_lam);
}
else
{
if (precomputedBtB != NULL)
bufferBtB = precomputedBtB;
else {
bufferBtB = buffer_real_t;
buffer_real_t += square(k+k_main);
}
if (precomputedCtCw != NULL)
bufferCtC = precomputedCtCw;
else if (p) {
bufferCtC = buffer_real_t;
buffer_real_t += square(k_user+k);
}
if (precomputedBeTBeChol != NULL)
bufferBeTBeChol = precomputedBeTBeChol;
else
bufferBeTBeChol = buffer_real_t;
/* Note: the Cholesky won't not be needed any more after this, and
in such case the memory will get reused */
if (add_implicit_features && !filled_BiTBi)
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k+k_main_i, n,
w_implicit, Bi, k+k_main_i,
0., bufferBiTBi, k+k_main_i);
build_BtB_CtC(
(*filled_BtB)? (real_t*)NULL : bufferBtB,
(*filled_CtCw)? (real_t*)NULL : bufferCtC,
B, n, ldb,
C, p,
k, k_user, k_main, k_item,
1.,
(real_t*)NULL
);
if (*filled_CtCw && *CtC_is_scaled) {
cblas_tscal(square(k_user+k), 1./w_user, bufferCtC, 1);
*CtC_is_scaled = false;
}
else if (!(*filled_CtCw)) {
*CtC_is_scaled = false;
}
if (k_user || k_main || (!p && U_csr_p == NULL))
set_to_zero(bufferBeTBeChol, square(k_totA));
if (p || U_csr_p != NULL)
copy_mat(k_totC, k_totC,
bufferCtC, k_totC,
bufferBeTBeChol, k_totA);
if (w_user != 1. && p && !(*CtC_is_scaled))
cblas_tscal(square(k_totA) - k_main - k_main*k_totA, w_user,
bufferBeTBeChol, 1);
sum_mat(k+k_main, k+k_main,
bufferBtB, k+k_main,
bufferBeTBeChol + offset_square, k_totA);
if (add_implicit_features)
sum_mat(k+k_main_i, k+k_main_i,
bufferBiTBi, k+k_main_i,
bufferBeTBeChol + offset_square, k_totA);
add_to_diag(bufferBeTBeChol, scaled_lam, k_totA);
if (lam_last != lam)
bufferBeTBeChol[square(k_totA)-1]
+=
(scaled_lam_last - scaled_lam);
if (w_user != 1. && !use_cg && p &&
bufferCtC != NULL && !(*CtC_is_scaled) &&
(keep_precomputed || ((Xfull != NULL && !full_dense) ||
(U != NULL && !full_dense_u))))
{
cblas_tscal(square(k_totC), w_user, bufferCtC, 1);
*CtC_is_scaled = true;
}
*filled_BtB = true;
*filled_CtCw = true;
*filled_BeTBeChol = true;
}
/* Note: this messes up the current values when there are NAs and using
conjugate gradient, so it will reset them later. Could alternatively
keep another matrix with the values before this override to restore
them later. */
/* TODO: keep track of older values when using CG method */
if (!zeroed_out_A && Xfull == NULL)
{
set_to_zero_(A,
(size_t)max2(m_x, m_u)*(size_t)lda
- (size_t)(lda-k_totA),
nthreads);
}
if (Xfull != NULL)
{
build_XBw(
A + k_user, lda,
B + k_item, ldb,
Xfull, ldX,
m_x, n, k + k_main,
1.,
do_B, true
);
#ifdef FORCE_NO_NAN_PROPAGATION
if (!nonneg && !l1_lam && !l1_lam_bias)
{
if (!full_dense)
#pragma omp parallel for schedule(static) \
num_threads(min2(4, nthreads)) \
shared(A, m, k_totA, lda)
for (size_t_for ix = 0;
ix < (size_t)m*(size_t)lda - (size_t)(lda - k_totA);
ix++)
A[ix] = isnan(A[ix])? (0.) : (A[ix]);
}
#endif
}
else if (Xcsr_p != NULL)
{
tgemm_sp_dense(
m_x, k+k_main, 1.,
Xcsr_p, Xcsr_i, Xcsr,
B + k_item, (size_t)ldb,
A + k_user, (size_t)lda,
nthreads
);
}
if (U != NULL)
{
cblas_tgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
m_u, k_user + k, p,
w_user, U, p, C, k_user + k,
1., A, lda);
#ifdef FORCE_NO_NAN_PROPAGATION
if (!nonneg && !l1_lam && !l1_lam_bias)
{
if (!full_dense_u)
#pragma omp parallel for schedule(static) \
num_threads(min2(4, nthreads)) \
shared(A, m, k_totA, lda)
for (size_t_for ix = 0;
ix < (size_t)m*(size_t)lda - (size_t)(lda - k_totA);
ix++)
A[ix] = isnan(A[ix])? (0.) : (A[ix]);
}
#endif
}
else if (U_csr_p != NULL)
{
tgemm_sp_dense(
m_u, k_user+k, w_user,
U_csr_p, U_csr_i, U_csr,
C, (size_t)k_totC,
A, (size_t)lda,
nthreads
);
}
if (add_implicit_features)
{
if (Xfull != NULL)
build_XBw(
A + k_user, lda,
Bi, k+k_main_i,
Xones, ldXones,
m_x, n, k + k_main_i,
w_implicit,
do_B, false
);
else
tgemm_sp_dense(
m_x, k+k_main_i, w_implicit,
Xcsr_p, Xcsr_i, Xones,
Bi, k+k_main_i,
A + k_user, (size_t)lda,
nthreads
);
}
if (bias_BtX != NULL && Xfull == NULL && NA_as_zero_X)
{
for (size_t row = 0; row < (size_t)m; row++)
for (size_t ix = 0; ix < (size_t)(k+k_main); ix++)
A[(size_t)k_user + row*(size_t)lda + ix] += bias_BtX[ix];
}
if (U == NULL && U_csr_p != NULL && NA_as_zero_U && U_colmeans != NULL)
{
for (size_t row = 0; row < (size_t)m; row++)
for (size_t ix = 0; ix < (size_t)(k_user+k); ix++)
A[row*(size_t)lda + ix] += precomputedCtUbias[ix];
}
if (!nonneg && !l1_lam && !l1_lam_bias)
tposv_(&lo, &k_pred, &m,
bufferBeTBeChol, &k_pred,
A, &lda,
&ignore);
else if (!nonneg) {
solve_elasticnet_batch(
bufferBeTBeChol,
A,
buffer_real_t,
m, k_pred, lda,
scaled_l1_lam,
scaled_l1_lam_last,
max_cd_steps,
nthreads
);
*filled_BeTBeChol = false;
}
else {
solve_nonneg_batch(
bufferBeTBeChol,
A,
buffer_real_t,
m, k_pred, lda,
scaled_l1_lam,
scaled_l1_lam_last,
max_cd_steps,
nthreads
);
*filled_BeTBeChol = false;
}
if (add_implicit_features && use_cg && w_implicit != 1. &&
(keep_precomputed || (Xfull != NULL && !full_dense) ||
(U != NULL && !full_dense_u)))
{
cblas_tscal(square(k+k_main_i), 1./w_implicit, bufferBiTBi, 1);
}
if ((Xfull != NULL && !full_dense) || (U != NULL && !full_dense_u))
{
if (w_user != 1. && p && use_cg &&
*CtC_is_scaled && bufferCtC != NULL)
{
cblas_tscal(square(k_user+k), 1./w_user, bufferCtC, 1);
*CtC_is_scaled = false;
}
/* When doing the B matrix, the X matrix will be transposed
and need to make a copy of the column for each observation,
whereas the U matrix will be in the right order. */
if (Xfull == NULL)
do_B = false;
/* TODO: do away with the 'bufferX', replace it instead with an
'incX' parameter */
real_t *restrict bufferX = buffer_real_t;
if (do_B)
buffer_real_t += (size_t)n*(size_t)nthreads;
size_t size_buffer = use_cg? (3*k_totA) : (square(k_totA));
if (use_cg && NA_as_zero_X && Xfull == NULL && (k+k_main) >= n)
size_buffer += n;
if (nonneg)
size_buffer += k_totA;
else if (l1_lam || l1_lam_bias)
size_buffer += (size_t)3*(size_t)k_totA;
int nthreads_restore = 1;
set_blas_threads(1, &nthreads_restore);
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
shared(A, k_totA, B, C, k, k_user, k_item, k_main, \
m, m_x, m_u, n, p, lda, ldb, \
scale_lam, scale_lam_sideinfo, \
scale_bias_const, wsumA, \
lam, lam_last, l1_lam, l1_lam_bias, w_user, \
Xfull, cnt_NA_x, ldX, full_dense, \
Xcsr, Xcsr_i, Xcsr_p, \
add_implicit_features, Xones, w_implicit, k_main_i, \
U, cnt_NA_u, full_dense_u, \
U_csr, U_csr_i, U_csr_p, \
buffer_real_t, size_buffer, do_B, \
bufferBtB, bufferCtC, nthreads, use_cg, \
nonneg, max_cd_steps, precomputedCtUbias) \
firstprivate(bufferX)
for (size_t_for ix = 0; ix < (size_t)m; ix++)
{
if ((Xfull != NULL && cnt_NA_x[ix]) ||
(U != NULL && cnt_NA_u[ix]))
{
if (Xfull != NULL)
{
if (!do_B)
bufferX = Xfull + ix*(size_t)n;
else
cblas_tcopy(n, Xfull + ix, ldX,
bufferX
+(size_t)n*(size_t)omp_get_thread_num(),
1);
}
if (use_cg)
{
set_to_zero(A + ix*(size_t)lda, k_totA);
/* this is compensated by higher 'max_cg_steps' below */
if (bias_restore != NULL)
A[ix*(size_t)lda + (size_t)(k_totA-1)]
=
bias_restore[ix];
}
collective_closed_form_block(
A + ix*(size_t)lda,
k, k_user, k_item, k_main,
(Xfull == NULL)?
((real_t*)NULL)
:
(bufferX
+ (do_B?
((size_t)n*(size_t)omp_get_thread_num())
:
((size_t)0))),
(Xfull != NULL)? ((real_t*)NULL) : (Xcsr + Xcsr_p[ix]),
(Xfull != NULL)? ((int_t*)NULL) : (Xcsr_i + Xcsr_p[ix]),
(Xfull != NULL)?
(size_t)0 : (Xcsr_p[ix+(size_t)1] - Xcsr_p[ix]),
(U != NULL || U_csr_p == NULL)?
((int_t*)NULL) : (U_csr_i + U_csr_p[ix]),
(U != NULL || U_csr_p == NULL)?
((real_t*)NULL) : (U_csr + U_csr_p[ix]),
(U != NULL || U_csr_p == NULL)?
(size_t)0 : (U_csr_p[ix+(size_t)1] - U_csr_p[ix]),
(U == NULL)? ((real_t*)NULL) : (U + ix*(size_t)p),
NA_as_zero_X, NA_as_zero_U,
B, n, ldb,
C, p,
Bi, k_main_i, add_implicit_features,
(Xfull == NULL)?
(Xones) : (Xones + (do_B? ix:(ix*(size_t)ldXones))),
(Xfull == NULL)? ((int_t)1) : (do_B? ldXones :(int_t)1),
(real_t*)NULL,
lam, w_user, w_implicit, lam_last,
l1_lam, l1_lam_bias,
scale_lam, scale_lam_sideinfo,
scale_bias_const,
(weight == NULL || wsumA == NULL)? 0 : wsumA[ix],
bufferBtB,
(Xfull == NULL)? (int_t)0 : cnt_NA_x[ix],
bufferCtC,
(U == NULL)? (int_t)0 : cnt_NA_u[ix],
(real_t*)NULL, n,
bufferBiTBi,
true, true,
use_cg, k_pred, /* <- more steps to reach optimum */
nonneg, max_cd_steps,
(real_t*)NULL, (real_t*)NULL, 0.,
precomputedCtUbias,
buffer_real_t
+ (size_buffer*(size_t)omp_get_thread_num())
);
}
}
set_blas_threads(nthreads_restore, (int*)NULL);
}
}
/* General case - construct one-by-one, use precomputed matrices
when beneficial, determined on a case-by-case basis. */
else
{
bool prefer_BtB = true;
bool prefer_CtC = true;
if (Xfull == NULL && Xcsr_p != NULL && !NA_as_zero_X)
prefer_BtB = false;
if (Xfull != NULL && weight != NULL)
prefer_BtB = false;
if (U == NULL && U_csr_p != NULL && !NA_as_zero_U)
prefer_CtC = false;
real_t *restrict bufferBtB = NULL;
real_t *restrict bufferBeTBeChol = NULL;
real_t *restrict bufferCtC = NULL;
real_t *restrict bufferBiTBi = NULL;
if (add_implicit_features)
{
if (precomputedBiTBi == NULL) {
bufferBiTBi = buffer_real_t;
buffer_real_t += square(k+k_main_i);
}
else {
bufferBiTBi = precomputedBiTBi;
}
}
if (prefer_BtB)
{
if (precomputedBtB != NULL)
bufferBtB = precomputedBtB;
else {
bufferBtB = buffer_real_t;
buffer_real_t += square(k+k_main);
}
if (!nonneg
&&
prefer_CtC
&&
(NA_as_zero_X || ((Xfull != NULL && weight == NULL) &&
(near_dense || full_dense)) )
&&
((U != NULL && (near_dense_u || full_dense_u)) ||
(U == NULL && NA_as_zero_U) || (!p && U_csr_p == NULL)))
{
if (precomputedBeTBeChol != NULL)
bufferBeTBeChol = precomputedBeTBeChol;
else {
bufferBeTBeChol = buffer_real_t;
buffer_real_t += square(k_user+k+k_main);
}
}
}
if (prefer_CtC && p)
{
if (precomputedCtCw != NULL)
bufferCtC = precomputedCtCw;
else {
bufferCtC = buffer_real_t;
buffer_real_t += square(k_user+k);
}
}
real_t *restrict bufferX = buffer_real_t;
real_t *restrict bufferX_zeros = bufferX + (do_B?
((size_t)n*(size_t)nthreads)
: ((size_t)0));
real_t *restrict bufferX_orig = bufferX;
real_t *restrict bufferW = bufferX_zeros + ((do_B && weight != NULL)?
(n) : (0));
real_t *restrict buffer_remainder = bufferW + (
(do_B && weight != NULL)?
((size_t)n*(size_t)nthreads)
: ((size_t)0));
if (weight == NULL) bufferW = NULL;
bool add_X = true;
bool add_U = true;
if (*filled_CtCw && !(*CtC_is_scaled) && w_user != 1. && !use_cg && p)
{
cblas_tscal(square(k_totC), w_user, bufferCtC, 1);
*CtC_is_scaled = true;
}
build_BtB_CtC(
*filled_BtB? ((real_t*)NULL) : bufferBtB,
*filled_CtCw? ((real_t*)NULL) : bufferCtC,
B, n, ldb,
C, p,
k, k_user, k_main, k_item,
use_cg? 1. : w_user,
(NA_as_zero_X && Xfull == NULL)?
((real_t*)NULL) : (weight)
);
if (!(*filled_CtCw))
{
if (use_cg)
*CtC_is_scaled = false;
else
*CtC_is_scaled = true;
}
if (bufferBtB == precomputedBtB) *filled_BtB = true;
if (bufferCtC == precomputedCtCw) *filled_CtCw = true;
if (weight != NULL)
{
if (!(NA_as_zero_X && Xfull == NULL))
{
*filled_BtB = false;
bufferBtB = NULL;
}
}
if (add_implicit_features && !filled_BiTBi)
{
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k+k_main_i, n,
w_implicit, Bi, k+k_main_i,
0., bufferBiTBi, k+k_main_i);
}
if (bufferBeTBeChol != NULL && *filled_BtB)
{
if (k_user || k_main)
set_to_zero(bufferBeTBeChol, square(k_totA));
if (p)
copy_mat(k_user+k, k_user+k,
bufferCtC, k_user+k,
bufferBeTBeChol, k_totA);
if (w_user != 1. && p && !(*CtC_is_scaled))
cblas_tscal(square(k_totA) - k_main - k_main*k_totA, w_user,
bufferBeTBeChol, 1);
sum_mat(k+k_main, k+k_main,
bufferBtB, k+k_main,
bufferBeTBeChol + offset_square, k_totA);
if (add_implicit_features)
sum_mat(k+k_main_i, k+k_main_i,
bufferBiTBi, k+k_main_i,
bufferBeTBeChol + offset_square, k_totA);
add_to_diag(bufferBeTBeChol, scaled_lam, k_totA);
if (lam_last != lam)
bufferBeTBeChol[square(k_totA)-1]
+=
(scaled_lam_last - scaled_lam);
tpotrf_(&lo, &k_totA, bufferBeTBeChol, &k_totA, &ignore);
if (bufferBeTBeChol == precomputedBeTBeChol)
*filled_BeTBeChol = true;
}
else {
bufferBeTBeChol = NULL;
}
if (add_implicit_features && use_cg && w_implicit != 1.)
cblas_tscal(square(k+k_main_i), 1./w_implicit, bufferBiTBi, 1);
if (use_cg) goto skip_chol_simplifications;
if (Xfull != NULL && (full_dense || near_dense) && weight == NULL)
{
add_X = false;
build_XBw(
A + k_user, lda,
B + k_item, ldb,
Xfull, ldX,
m_x, n, k + k_main,
1.,
do_B, true
);
#ifdef FORCE_NO_NAN_PROPAGATION
if (!nonneg && !l1_lam && !l1_lam_bias)
{
if (!full_dense)
#pragma omp parallel for schedule(static) \
num_threads(min2(4, nthreads)) \
shared(A, m, k_totA, lda)
for (size_t_for ix = 0;
ix < (size_t)m*(size_t)lda - (size_t)(lda - k_totA);
ix++)
A[ix] = isnan(A[ix])? (0.) : (A[ix]);
}
#endif
/* TODO: what's the point_t of this 'bufferX_zeros'? */
if (!full_dense && do_B)
set_to_zero(bufferX_zeros, n); /*still needs a placeholder*/
}
else if (Xfull == NULL && weight == NULL && NA_as_zero_X) {
add_X = false;
tgemm_sp_dense(
m_x, k+k_main, 1.,
Xcsr_p, Xcsr_i, Xcsr,
B + k_item, (size_t)ldb,
A + k_user, (size_t)lda,
nthreads
);
}
if (U != NULL && (full_dense_u || near_dense_u)) {
add_U = false;
cblas_tgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
m_u, k_user + k, p,
w_user, U, p, C, k_user + k,
add_X? 0. : 1., A, lda);
#ifdef FORCE_NO_NAN_PROPAGATION
if (!nonneg && !l1_lam && !l1_lam_bias)
{
if (!full_dense_u)
#pragma omp parallel for schedule(static) \
num_threads(min2(4, nthreads)) \
shared(A, m, k_totA, lda)
for (size_t_for ix = 0;
ix < (size_t)m*(size_t)lda - (size_t)(lda - k_totA);
ix++)
A[ix] = isnan(A[ix])? (0.) : (A[ix]);
}
#endif
}
else if (U == NULL && U_csr_p != NULL && NA_as_zero_U) {
add_U = false;
tgemm_sp_dense(
m_u, k_user+k, w_user,
U_csr_p, U_csr_i, U_csr,
C, (size_t)k_totC,
A, (size_t)lda,
nthreads
);
}
if (add_implicit_features && !add_X)
{
if (Xfull != NULL)
build_XBw(
A + k_user, lda,
Bi, k+k_main_i,
Xones, ldXones,
m_x, n, k+k_main_i,
w_implicit,
do_B, false
);
else
tgemm_sp_dense(
m_x, k+k_main_i, w_implicit,
Xcsr_p, Xcsr_i, Xones,
Bi, k+k_main_i,
A + k_user, (size_t)lda,
nthreads
);
}
if (bias_BtX != NULL && NA_as_zero_X && Xfull == NULL && !add_X)
{
for (size_t row = 0; row < (size_t)m_x; row++)
for (size_t ix = 0; ix < (size_t)(k+k_main); ix++)
A[(size_t)k_user + row*(size_t)lda + ix] += bias_BtX[ix];
}
if (U == NULL && U_csr_p != NULL && NA_as_zero_U &&
U_colmeans != NULL && !add_U)
{
for (size_t row = 0; row < (size_t)m; row++)
for (size_t ix = 0; ix < (size_t)(k_user+k); ix++)
A[row*(size_t)lda + ix] += precomputedCtUbias[ix];
}
skip_chol_simplifications:
{};
size_t size_buffer = use_cg? (3*k_totA) : (square(k_totA));
if (nonneg)
size_buffer += k_totA;
else if (l1_lam || l1_lam_bias)
size_buffer += (size_t)3*(size_t)k_totA;
if (use_cg && Xfull == NULL && NA_as_zero_X && (k+k_main) >= n)
size_buffer += n;
if (!p && U_csr_p == NULL) {
if (!use_cg) add_U = false;
}
if (w_user != 1. && p && use_cg &&
*CtC_is_scaled && bufferCtC != NULL)
{
cblas_tscal(square(k_user+k), 1./w_user, bufferCtC, 1);
*CtC_is_scaled = false;
}
int nthreads_restore = 1;
set_blas_threads(1, &nthreads_restore);
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
shared(A, k_totA, B, C, Bi, k, k_user, k_item, k_main,k_main_i,\
m, m_x, m_u, n, p, \
lam, lam_last, l1_lam, l1_lam_bias, w_user, w_implicit, \
scale_lam, scale_lam_sideinfo, scale_bias_const, wsumA, \
NA_as_zero_X, NA_as_zero_U, add_implicit_features, \
add_X, add_U, weight, \
Xfull, Xcsr_p, Xcsr_i, Xcsr, cnt_NA_x, ldX, \
U, U_csr_p, U_csr_i, U_csr, cnt_NA_u, \
bufferBtB, bufferCtC, bufferBiTBi, bufferBeTBeChol, \
buffer_remainder, size_buffer, \
do_B, nthreads, use_cg, nonneg, max_cd_steps, \
bias_BtX, bias_X, bias_X_glob, precomputedCtUbias) \
firstprivate(bufferX, bufferW)
for (size_t_for ix = 0; ix < (size_t)m; ix++)
{
/* TODO: do away with the 'bufferX', replace it instead with an
'incX' parameter */
if (Xfull != NULL)
{
if (!do_B)
bufferX = Xfull + ix*(size_t)n;
else if (add_X || cnt_NA_x[ix] || (U != NULL && cnt_NA_u[ix]))
{
cblas_tcopy(n, Xfull + ix, ldX,
bufferX_orig
+ (size_t)n*(size_t)omp_get_thread_num(),1);
bufferX = bufferX_orig;
}
else
bufferX = bufferX_zeros;
if (weight != NULL) {
if (!do_B)
bufferW = weight + ix*(size_t)n;
else
cblas_tcopy(n, weight + ix, ldX,
bufferW
+ (size_t)n*(size_t)omp_get_thread_num(),1);
}
}
collective_closed_form_block(
A + ix*(size_t)lda,
k, k_user, k_item, k_main,
(Xfull == NULL)? ((real_t*)NULL)
: (bufferX
+ (do_B?
((size_t)n*(size_t)omp_get_thread_num())
:
((size_t)0))),
(Xfull != NULL)? ((real_t*)NULL) : (Xcsr + Xcsr_p[ix]),
(Xfull != NULL)? ((int_t*)NULL) : (Xcsr_i + Xcsr_p[ix]),
(Xfull != NULL)? (size_t)0 : (Xcsr_p[ix+(size_t)1] -Xcsr_p[ix]),
(U != NULL || U_csr_p == NULL)?
((int_t*)NULL) : (U_csr_i + U_csr_p[ix]),
(U != NULL || U_csr_p == NULL)?
((real_t*)NULL) : (U_csr + U_csr_p[ix]),
(U != NULL || U_csr_p == NULL)?
(size_t)0 : (U_csr_p[ix+(size_t)1] - U_csr_p[ix]),
(U == NULL)? ((real_t*)NULL) : (U + ix*(size_t)p),
NA_as_zero_X, NA_as_zero_U,
B, n, ldb,
C, p,
Bi, k_main_i, add_implicit_features,
(Xfull == NULL)?
(Xones) : (Xones + (do_B? ix : (ix*(size_t)n))),
(Xfull == NULL)? ((int_t)1) : (do_B? ldXones : (int_t)1),
(weight == NULL)? ((real_t*)NULL)
: ( (Xfull != NULL)?
(bufferW
+ (do_B?
((size_t)n*(size_t)omp_get_thread_num())
: ((size_t)0)))
: (weight + Xcsr_p[ix]) ),
lam, w_user, w_implicit, lam_last,
l1_lam, l1_lam_bias,
scale_lam, scale_lam_sideinfo,
scale_bias_const,
(weight == NULL || wsumA == NULL)? 0 : wsumA[ix],
bufferBtB, (Xfull != NULL)? cnt_NA_x[ix] : (int_t)0,
bufferCtC,
(U == NULL)? (int_t)0 : cnt_NA_u[ix],
bufferBeTBeChol, n,
bufferBiTBi,
(Xfull == NULL)? (add_X) : (add_X || cnt_NA_x[ix] != 0),
(U == NULL)? (add_U) : (add_U || cnt_NA_u[ix] != 0),
use_cg, max_cg_steps,
nonneg, max_cd_steps,
bias_BtX, bias_X, bias_X_glob, precomputedCtUbias,
buffer_remainder + (size_buffer*(size_t)omp_get_thread_num())
);
}
set_blas_threads(nthreads_restore, (int*)NULL);
}
}
void optimizeA_collective_implicit
(
real_t *restrict A, real_t *restrict B, real_t *restrict C,
int_t m, int_t m_u, int_t n, int_t p,
int_t k, int_t k_main, int_t k_user, int_t k_item,
size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr,
size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr,
real_t *restrict U, int_t cnt_NA_u[], real_t *restrict U_colmeans,
bool full_dense_u, bool near_dense_u, bool NA_as_zero_U,
real_t lam, real_t l1_lam, real_t w_user,
int nthreads,
bool use_cg, int_t max_cg_steps,
bool nonneg, int_t max_cd_steps,
real_t *restrict precomputedBtB, /* will not have lambda with CG */
real_t *restrict precomputedBeTBe,
real_t *restrict precomputedBeTBeChol,
real_t *restrict precomputedCtC,
real_t *restrict precomputedCtUbias,
bool *filled_BeTBe,
bool *filled_BeTBeChol,
bool *filled_CtC,
bool *filled_CtUbias,
real_t *restrict buffer_real_t
)
{
int_t k_totA = k_user + k + k_main;
int_t k_totB = k_item + k + k_main;
int_t k_totC = k_user + k;
int_t ld_BtB = k + k_main;
int_t m_x = m; /* <- 'm' later gets overwritten */
*filled_BeTBe = false;
*filled_BeTBeChol = false;
*filled_CtC = false;
int_t ix = 0;
#ifdef TEST_CG
use_cg = true;
max_cg_steps = 10000;
set_to_zero_(A, (size_t)max2(m, m_u)*(size_t)k_totA, nthreads);
if (nonneg || l1_lam)
use_cg = false;
#endif
if (!use_cg)
set_to_zero_(A, (size_t)max2(m, m_u)*(size_t)k_totA, nthreads);
/* TODO: BtB can be skipped when using NA_as_zero_U */
if (precomputedBtB == NULL)
{
precomputedBtB = buffer_real_t;
buffer_real_t += square(ld_BtB);
}
/* TODO: should get rid of the tsymv, replacing them with tgemv as it's
faster, by filling up the lower half of the precomputed matrices. */
/* If the X matrix has more rows, the extra rows will be independent
from U and can be obtained from the single-matrix formula instead.
However, if the U matrix has more rows, those still need to be
considered as having a value of zero in X. */
if (m > m_u)
{
int_t m_diff = m - m_u;
if (Xcsr_p[m_u] < Xcsr_p[m])
optimizeA_implicit(
A + (size_t)k_user + (size_t)m_u*(size_t)k_totA, (size_t)k_totA,
B + k_item, (size_t)k_totB,
m_diff, n, k + k_main,
Xcsr_p + m_u, Xcsr_i, Xcsr,
lam, l1_lam,
nthreads, use_cg, max_cg_steps,
nonneg, max_cd_steps,
precomputedBtB,
buffer_real_t
);
m = m_u;
}
else {
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k+k_main, n,
1., B + k_item, k_totB,
0., precomputedBtB, ld_BtB);
if (!use_cg)
add_to_diag(precomputedBtB, lam, ld_BtB);
}
if (U == NULL && NA_as_zero_U && U_colmeans != NULL)
{
cblas_tgemv(CblasRowMajor, CblasTrans, p, k_user+k,
w_user, C, k_user+k,
U_colmeans, 1,
0., precomputedCtUbias, 1);
*filled_CtUbias = true;
}
if (m_u > m_x && !nonneg &&
!(U == NULL && U_csr_p != NULL && !NA_as_zero_U) &&
(!use_cg || p > 2 * k_totA))
{
if (precomputedBeTBeChol == NULL)
{
precomputedBeTBeChol = buffer_real_t;
buffer_real_t += square(k_totA);
}
}
else {
precomputedBeTBeChol = NULL;
}
bool prefer_CtC = !(U == NULL && U_csr_p != NULL && !NA_as_zero_U);
if (use_cg && prefer_CtC)
{
if (precomputedCtC == NULL)
{
precomputedCtC = buffer_real_t;
buffer_real_t += square(k_user+k);
}
}
else {
precomputedCtC = NULL;
}
if (use_cg && prefer_CtC)
{
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k_user+k, p,
1., C, k_user+k,
0., precomputedCtC, k_user+k);
*filled_CtC = true;
}
/* Lower-right square of Be */
if (!use_cg || precomputedBeTBeChol != NULL)
{
if (precomputedBeTBe == NULL)
{
precomputedBeTBe = buffer_real_t;
buffer_real_t += square(k_totA);
}
}
else {
precomputedBeTBe = NULL;
}
if (precomputedBeTBe != NULL)
{
if (ld_BtB != k_totA)
set_to_zero(precomputedBeTBe, square(k_totA));
copy_mat(k+k_main, k+k_main,
precomputedBtB, k+k_main,
precomputedBeTBe + k_user + k_user*k_totA, k_totA);
if (use_cg)
add_to_diag(precomputedBeTBe, lam, k_totA);
else
for (int_t ix = 0; ix < k_user; ix++)
precomputedBeTBe[ix + ix*k_totA] += lam;
*filled_BeTBe = true;
}
/* Upper-left square of Be if possible */
if (precomputedBeTBe != NULL && (U != NULL || NA_as_zero_U))
{
if (precomputedCtC != NULL)
{
if (w_user == 1.)
sum_mat(
k_user+k, k_user+k,
precomputedCtC, k_user+k,
precomputedBeTBe, k_totA
);
else {
for (size_t row = 0; row < (size_t)(k_user+k); row++)
for (size_t col = 0; col < (size_t)(k_user+k); col++)
precomputedBeTBe[col + row*(size_t)k_totA]
+=
w_user * precomputedCtC[col + row*(size_t)k_totC];
}
}
else
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k_user+k, p,
w_user, C, k_totC,
1., precomputedBeTBe, k_totA);
}
/* Lower half of Xe (reuse if possible) */
if (!use_cg)
{
if (U != NULL) {
cblas_tgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
m_u, k_user + k, p,
w_user, U, p, C, k_totC,
0., A, k_totA);
}
else {
tgemm_sp_dense(
m_u, k_user + k, w_user,
U_csr_p, U_csr_i, U_csr,
C, k_totC,
A, k_totA,
nthreads
);
}
}
/* If there are no positive entries for some X and no missing values
in U, can reuse a single Cholesky factorization for them. */
if (precomputedBeTBeChol != NULL)
{
copy_arr(precomputedBeTBe, precomputedBeTBeChol, square(k_totA));
char lo = 'L';
tpotrf_(&lo, &k_totA, precomputedBeTBeChol, &k_totA, &ix);
*filled_BeTBeChol = true;
}
m = max2(m, m_u);
size_t size_buffer = use_cg? (3 * k_totA) : (square(k_totA));
if (nonneg)
size_buffer += k_totA;
else if (l1_lam != 0.)
size_buffer += (size_t)3*(size_t)k_totA;
int nthreads_restore = 1;
set_blas_threads(1, &nthreads_restore);
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
shared(A, B, C, m, n, p, k, k_user, k_item, k_main, lam, l1_lam, \
Xcsr, Xcsr_p, Xcsr_i, U, U_csr, U_csr_i, U_csr_p, \
NA_as_zero_U, cnt_NA_u, \
precomputedBeTBe, precomputedBtB, precomputedBeTBeChol, \
k_totA, buffer_real_t, use_cg, m_x, \
nonneg, max_cd_steps)
for (ix = 0; ix < m; ix++)
collective_closed_form_block_implicit(
A + (size_t)ix*(size_t)k_totA,
k, k_user, k_item, k_main,
B, n, C, p,
(ix < m_x)? (Xcsr + Xcsr_p[ix]) : ((real_t*)NULL),
(ix < m_x)? (Xcsr_i + Xcsr_p[ix]) : ((int_t*)NULL),
(ix < m_x)? (Xcsr_p[ix+(size_t)1] - Xcsr_p[ix]) : ((size_t)0),
(U == NULL)? ((real_t*)NULL) : (U + (size_t)ix*(size_t)p),
(U == NULL)? (0) : (cnt_NA_u[ix]),
(U == NULL)? (U_csr + U_csr_p[ix]) : ((real_t*)NULL),
(U == NULL)? (U_csr_i + U_csr_p[ix]) : ((int_t*)NULL),
(U == NULL)? (U_csr_p[ix+(size_t)1] - U_csr_p[ix]) : ((size_t)0),
NA_as_zero_U,
lam, l1_lam, w_user,
precomputedCtUbias,
precomputedBeTBe,
precomputedBtB,
precomputedBeTBeChol,
precomputedCtC,
false, true, use_cg, max_cg_steps,
nonneg, max_cd_steps,
buffer_real_t + ((size_t)omp_get_thread_num() * size_buffer)
);
set_blas_threads(nthreads_restore, (int*)NULL);
}
void build_BeTBe
(
real_t *restrict bufferBeTBe,
real_t *restrict B, int_t ldb, real_t *restrict C,
int_t k, int_t k_user, int_t k_main, int_t k_item,
int_t n, int_t p,
real_t lam, real_t w_user
)
{
int_t k_totA = k_user + k + k_main;
set_to_zero(bufferBeTBe, square(k_totA));
if (p)
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k_user + k, p,
w_user, C, k_user + k,
0., bufferBeTBe, k_totA);
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k + k_main, n,
1., B + k_item, ldb,
1., bufferBeTBe + k_user + k_user*k_totA, k_totA);
add_to_diag(bufferBeTBe, lam, k_totA);
}
void build_BtB_CtC
(
real_t *restrict BtB, real_t *restrict CtC,
real_t *restrict B, int_t n, int_t ldb,
real_t *restrict C, int_t p,
int_t k, int_t k_user, int_t k_main, int_t k_item,
real_t w_user,
real_t *restrict weight
)
{
if (weight == NULL && BtB != NULL) {
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k + k_main, n,
1., B + k_item, ldb,
0., BtB, k+k_main);
}
if (CtC != NULL && p)
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k_user + k, p,
w_user, C, k_user + k,
0., CtC, k_user + k);
}
void build_XBw
(
real_t *restrict A, int_t lda,
real_t *restrict B, int_t ldb,
real_t *restrict Xfull, int_t ldX,
int_t m, int_t n, int_t k,
real_t w,
bool do_B, bool overwrite
)
{
if (!do_B)
cblas_tgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
m, k, n,
w, Xfull, n, B, ldb,
overwrite? 0. : 1., A, lda);
else
cblas_tgemm(CblasRowMajor, CblasTrans, CblasNoTrans,
m, k, n,
w, Xfull, ldX, B, ldb,
overwrite? 0. : 1., A, lda);
}
int_t preprocess_vec
(
real_t *restrict *vec_full_, int_t n,
int_t *restrict ix_vec, real_t *restrict *vec_sp_, size_t nnz,
real_t glob_mean, real_t lam,
real_t *restrict col_means,
real_t *restrict vec_mean,
int_t *restrict cnt_NA,
bool *modified_vec, bool *modified_vec_sp
)
{
*modified_vec = false;
*modified_vec_sp = false;
real_t *restrict vec_full = (vec_full_ == NULL)? NULL : (*vec_full_);
real_t *restrict vec_sp = (vec_sp_ == NULL)? NULL : (*vec_sp_);
if (col_means != NULL)
{
if (vec_full != NULL) {
real_t *restrict temp = (real_t*)malloc((size_t)n*sizeof(real_t));
if (temp == NULL) return 1;
copy_arr(vec_full, temp, n);
vec_full = temp;
*vec_full_ = vec_full;
*modified_vec = true;
for (int_t ix = 0; ix < n; ix++)
vec_full[ix] -= col_means[ix] + glob_mean;
}
else {
real_t *restrict temp = (real_t*)malloc(nnz*sizeof(real_t));
if (temp == NULL) return 1;
copy_arr(vec_sp, temp, nnz);
vec_sp = temp;
*vec_sp_ = vec_sp;
*modified_vec_sp = true;
for (size_t ix = 0; ix < nnz; ix++)
vec_sp[ix] -= col_means[ix_vec[ix]] + glob_mean;
}
}
else if (glob_mean != 0.)
{
if (vec_full != NULL) {
real_t *restrict temp = (real_t*)malloc((size_t)n*sizeof(real_t));
if (temp == NULL) return 1;
copy_arr(vec_full, temp, n);
vec_full = temp;
*vec_full_ = vec_full;
*modified_vec = true;
for (int_t ix = 0; ix < n; ix++)
vec_full[ix] -= glob_mean;
}
else {
real_t *restrict temp = (real_t*)malloc(nnz*sizeof(real_t));
if (temp == NULL) return 1;
copy_arr(vec_sp, temp, nnz);
vec_sp = temp;
*vec_sp_ = vec_sp;
*modified_vec_sp = true;
for (size_t ix = 0; ix < nnz; ix++)
vec_sp[ix] -= glob_mean;
}
}
if (vec_full != NULL)
*cnt_NA = count_NAs(vec_full, (size_t)n, 1);
/* Note: this is a heuristic to obtain the user bias when making
warm start predictions. It tends to assing higher weights to the
bias and lower weights to the actual coefficients.
This is not used in the final code as when there are user biases,
it will be multiplied against a B matrix with ones appended as the
last column, from which the bias will be obtained. */
if (vec_mean != NULL)
{
*vec_mean = 0;
if (vec_full == NULL) {
if (!(*modified_vec_sp)) {
real_t *restrict temp = (real_t*)malloc(nnz*sizeof(real_t));
if (temp == NULL) return 1;
copy_arr(vec_sp, temp, nnz);
vec_sp = temp;
*vec_sp_ = vec_sp;
*modified_vec_sp = true;
}
for (size_t ix = 0; ix < nnz; ix++)
*vec_mean += vec_sp[ix];
*vec_mean /= ((double)nnz + lam);
for (size_t ix = 0; ix < nnz; ix++)
vec_sp[ix] -= *vec_mean;
}
else {
if (*cnt_NA) {
for (int_t ix = 0; ix < n; ix++) {
*vec_mean += (!isnan(vec_full[ix]))? vec_full[ix] : 0;
}
*vec_mean /= ((double)(n - *cnt_NA) + lam);
}
else {
for (int_t ix = 0; ix < n; ix++)
*vec_mean += vec_full[ix];
*vec_mean /= ((double)n + lam);
}
if (!(*modified_vec)) {
real_t *restrict temp = (real_t*)malloc((size_t)n
* sizeof(real_t));
if (temp == NULL) return 1;
copy_arr(vec_full, temp, n);
vec_full = temp;
*vec_full_ = vec_full;
*modified_vec = true;
}
for (int_t ix = 0; ix < n; ix++)
vec_full[ix] -= *vec_mean;
}
}
return 0;
}
int_t convert_sparse_X
(
int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz,
size_t **Xcsr_p, int_t **Xcsr_i, real_t *restrict *Xcsr,
size_t **Xcsc_p, int_t **Xcsc_i, real_t *restrict *Xcsc,
real_t *restrict weight,
real_t *restrict *weightR, real_t *restrict *weightC,
int_t m, int_t n, int nthreads
)
{
*Xcsr_p = (size_t*)malloc(((size_t)m+(size_t)1)*sizeof(size_t));
*Xcsr_i = (int_t*)malloc(nnz*sizeof(int_t));
*Xcsr = (real_t*)malloc(nnz*sizeof(real_t));
if (weight != NULL)
*weightR = (real_t*)malloc(nnz*sizeof(real_t));
*Xcsc_p = (size_t*)malloc(((size_t)n+(size_t)1)*sizeof(size_t));
*Xcsc_i = (int_t*)malloc(nnz*sizeof(int_t));
*Xcsc = (real_t*)malloc(nnz*sizeof(real_t));
if (weight != NULL)
*weightC = (real_t*)malloc(nnz*sizeof(real_t));
if (*Xcsr_p == NULL || *Xcsr_i == NULL || *Xcsr == NULL ||
*Xcsc_p == NULL || *Xcsc_i == NULL || *Xcsc == NULL ||
(weight != NULL && (*weightR == NULL || *weightC == NULL)))
return 1;
coo_to_csr_and_csc(
ixA, ixB, X,
weight, m, n, nnz,
*Xcsr_p, *Xcsr_i, *Xcsr,
*Xcsc_p, *Xcsc_i, *Xcsc,
*weightR, *weightC,
nthreads
);
return 0;
}
int_t preprocess_sideinfo_matrix
(
real_t *restrict *U_, int_t m_u, int_t p,
int_t U_row[], int_t U_col[], real_t *restrict *U_sp_, size_t nnz_U,
real_t *U_colmeans, real_t *restrict *Utrans,
size_t **U_csr_p, int_t **U_csr_i, real_t *restrict *U_csr,
size_t **U_csc_p, int_t **U_csc_i, real_t *restrict *U_csc,
int_t *restrict *cnt_NA_u_byrow, int_t *restrict *cnt_NA_u_bycol,
bool *restrict full_dense_u, bool *restrict near_dense_u_row,
bool *restrict near_dense_u_col,
bool *restrict some_full_u_row, bool *restrict some_full_u_col,
bool NA_as_zero_U, bool nonneg, int nthreads,
bool *modified_U, bool *modified_Usp
)
{
int_t retval = 0;
*modified_U = false;
*modified_Usp = false;
real_t *restrict U = (U_ == NULL)? NULL : (*U_);
real_t *restrict U_sp = (U_sp_ == NULL)? NULL : (*U_sp_);
*full_dense_u = false;
*near_dense_u_row = false;
*near_dense_u_col = false;
*some_full_u_row = false;
*some_full_u_col = false;
if (U != NULL)
{
*cnt_NA_u_byrow = (int_t*)calloc(m_u, sizeof(int_t));
*cnt_NA_u_bycol = (int_t*)calloc(p, sizeof(int_t));
if (*cnt_NA_u_byrow == NULL || *cnt_NA_u_bycol == NULL)
return 1;
count_NAs_by_row(U, m_u, p, *cnt_NA_u_byrow, nthreads,
full_dense_u, near_dense_u_row, some_full_u_row);
count_NAs_by_col(U, m_u, p, *cnt_NA_u_bycol,
full_dense_u, near_dense_u_col, some_full_u_col);
}
if ((U != NULL || !NA_as_zero_U) && U_colmeans != NULL)
{
retval = center_by_cols(
U_colmeans,
U_, m_u, p,
U_row, U_col, U_sp_, nnz_U,
*U_csr_p, *U_csr_i, *U_csr,
*U_csc_p, *U_csc_i, *U_csc,
nthreads, modified_Usp, modified_U
);
if (retval != 0) return 1; /* <- arrays will be freed in 'fit_*' */
}
if (U == NULL && nnz_U)
{
*U_csr_p = (size_t*)malloc(((size_t)m_u+(size_t)1)*sizeof(size_t));
*U_csr_i = (int_t*)malloc(nnz_U*sizeof(int_t));
*U_csr = (real_t*)malloc(nnz_U*sizeof(real_t));
*U_csc_p = (size_t*)malloc(((size_t)p+(size_t)1)*sizeof(size_t));
*U_csc_i = (int_t*)malloc(nnz_U*sizeof(int_t));
*U_csc = (real_t*)malloc(nnz_U*sizeof(real_t));
if (*U_csr_p == NULL || *U_csr_i == NULL || *U_csr == NULL ||
*U_csc_p == NULL || *U_csc_i == NULL || *U_csc == NULL)
return 1;
coo_to_csr_and_csc(
U_row, U_col, U_sp,
(real_t*)NULL, m_u, p, nnz_U,
*U_csr_p, *U_csr_i, *U_csr,
*U_csc_p, *U_csc_i, *U_csc,
(real_t*)NULL, (real_t*)NULL,
nthreads
);
if (NA_as_zero_U && U_colmeans != NULL)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long col;
#endif
size_t *restrict U_csc_p_ = *U_csc_p;
real_t *restrict U_csc_ = *U_csc;
#pragma omp parallel for schedule(static) \
num_threads(cap_to_4(nthreads)) \
shared(U_csc_p_, U_csc_, U_colmeans, p)
for (size_t_for col = 0; col < (size_t)p; col++)
{
double colmean = 0;
int_t cnt = 0;
for (size_t ix = U_csc_p_[col]; ix < U_csc_p_[col+1]; ix++)
colmean += (U_csc_[ix] - colmean) / (double)(++cnt);
colmean *= (double)(U_csc_p_[col+1]-U_csc_p_[col]) / (double)m_u;
U_colmeans[col] = colmean;
}
}
}
if (U != NULL && Utrans != NULL && !full_dense_u && !near_dense_u_col)
{
*Utrans = (real_t*)malloc((size_t)m_u*(size_t)p*sizeof(real_t));
if (*Utrans == NULL)
return 1;
transpose_mat2(U, m_u, p, *Utrans);
}
return 0;
}
real_t wrapper_collective_fun_grad
(
void *instance,
real_t *x,
real_t *g,
const size_t n,
const real_t step
)
{
data_collective_fun_grad *data = (data_collective_fun_grad*)instance;
(data->nfev)++;
return collective_fun_grad(
x, g,
data->m, data->n, data->k,
data->ixA, data->ixB, data->X, data->nnz,
data->Xfull,
data->Xcsr_p, data->Xcsr_i, data->Xcsr,
data->Xcsc_p, data->Xcsc_i, data->Xcsc,
data->weight, data->weightR, data->weightC,
data->user_bias, data->item_bias,
data->lam, data->lam_unique,
data->U, data->m_u, data->p, data->U_has_NA,
data->II, data->n_i, data->q, data->I_has_NA,
data->Ub, data->m_ubin, data->pbin, data->Ub_has_NA,
data->Ib, data->n_ibin, data->qbin, data->Ib_has_NA,
data->U_row, data->U_col, data->U_sp, data->nnz_U,
data->I_row, data->I_col, data->I_sp, data->nnz_I,
data->U_csr_p, data->U_csr_i, data->U_csr,
data->U_csc_p, data->U_csc_i, data->U_csc,
data->I_csr_p, data->I_csr_i, data->I_csr,
data->I_csc_p, data->I_csc_i, data->I_csc,
data->buffer_real_t, data->buffer_mt,
data->k_main, data->k_user, data->k_item,
data->w_main, data->w_user, data->w_item,
data->nthreads
);
}
int_t fit_collective_explicit_lbfgs_internal
(
real_t *restrict values, bool reset_values,
real_t *restrict glob_mean,
real_t *restrict U_colmeans, real_t *restrict I_colmeans,
int_t m, int_t n, int_t k,
int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz,
real_t *restrict Xfull,
real_t *restrict weight,
bool user_bias, bool item_bias, bool center,
real_t lam, real_t *restrict lam_unique,
real_t *restrict U, int_t m_u, int_t p,
real_t *restrict II, int_t n_i, int_t q,
real_t *restrict Ub, int_t m_ubin, int_t pbin,
real_t *restrict Ib, int_t n_ibin, int_t qbin,
int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U,
int_t I_row[], int_t I_col[], real_t *restrict I_sp, size_t nnz_I,
int_t k_main, int_t k_user, int_t k_item,
real_t w_main, real_t w_user, real_t w_item,
int_t n_corr_pairs, size_t maxiter, int_t seed,
int nthreads, bool prefer_onepass,
bool verbose, int_t print_every, bool handle_interrupt,
int_t *restrict niter, int_t *restrict nfev,
real_t *restrict B_plus_bias
)
{
real_t *restrict buffer_real_t = NULL;
real_t *restrict buffer_mt = NULL;
int_t retval = 0;
size_t nvars, size_buffer, size_mt;
nvars_collective_fun_grad(
m, n, m_u, n_i, m_ubin, n_ibin,
p, q, pbin, qbin,
nnz, nnz_U, nnz_I,
k, k_main, k_user, k_item,
user_bias, item_bias, nthreads,
X, Xfull,
U, Ub, II, Ib,
U_sp, U_sp, I_sp, I_sp,
&nvars, &size_buffer, &size_mt
);
if (size_buffer) {
buffer_real_t = (real_t*)malloc(size_buffer*sizeof(real_t));
if (buffer_real_t == NULL) return 1;
}
int_t m_max = max2(max2(m, m_u), m_ubin);
int_t n_max = max2(max2(n, n_i), n_ibin);
bool U_has_NA = false;
bool I_has_NA = false;
bool Ub_has_NA = false;
bool Ib_has_NA = false;
real_t funval;
lbfgs_parameter_t lbfgs_params;
data_collective_fun_grad data;
size_t *Xcsr_p = NULL;
int_t *Xcsr_i = NULL;
real_t *restrict Xcsr = NULL;
real_t *restrict weightR = NULL;
size_t *Xcsc_p = NULL;
int_t *Xcsc_i = NULL;
real_t *restrict Xcsc = NULL;
real_t *restrict weightC = NULL;
size_t *U_csr_p = NULL;
int_t *U_csr_i = NULL;
real_t *restrict U_csr = NULL;
size_t *U_csc_p = NULL;
int_t *U_csc_i = NULL;
real_t *restrict U_csc = NULL;
size_t *I_csr_p = NULL;
int_t *I_csr_i = NULL;
real_t *restrict I_csr = NULL;
size_t *I_csc_p = NULL;
int_t *I_csc_i = NULL;
real_t *restrict I_csc = NULL;
bool free_X = false;
bool free_Xfull = false;
bool free_U = false;
bool free_Usp = false;
bool free_I = false;
bool free_Isp = false;
#ifdef _FOR_R
if (Xfull != NULL) R_nan_to_C_nan(Xfull, (size_t)m*(size_t)n);
if (U != NULL) R_nan_to_C_nan(U, (size_t)m_u*(size_t)p);
if (II != NULL) R_nan_to_C_nan(II, (size_t)n_i*(size_t)q);
if (Ub != NULL) R_nan_to_C_nan(Ub, (size_t)m_ubin*(size_t)pbin);
if (Ib != NULL) R_nan_to_C_nan(Ib, (size_t)n_ibin*(size_t)qbin);
#endif
sig_t_ old_interrupt_handle = NULL;
bool has_lock_on_handle = false;
#pragma omp critical
{
if (!handle_is_locked)
{
handle_is_locked = true;
has_lock_on_handle = true;
should_stop_procedure = false;
old_interrupt_handle = signal(SIGINT, set_interrup_global_variable);
}
}
#ifdef _OPENMP
if (nthreads > 1 && (Xfull == NULL || U_sp != NULL || I_sp != NULL))
{
if (prefer_onepass)
{
buffer_mt = (real_t*)malloc(size_mt*sizeof(real_t));
if (buffer_mt == NULL) {
retval = 1;
goto cleanup;
}
}
else if (Xfull == NULL || U_sp != NULL || I_sp != NULL)
{
if (Xfull == NULL)
{
retval = convert_sparse_X(
ixA, ixB, X, nnz,
&Xcsr_p, &Xcsr_i, &Xcsr,
&Xcsc_p, &Xcsc_i, &Xcsc,
weight, &weightR, &weightC,
m, n, nthreads
);
if (retval != 0) goto cleanup;
}
if (U_sp != NULL)
{
bool ignore[5];
retval = preprocess_sideinfo_matrix(
(real_t**)NULL, m_u, p,
U_row, U_col, &U_sp, nnz_U,
(real_t*)NULL, (real_t**)NULL,
&U_csr_p, &U_csr_i, &U_csr,
&U_csc_p, &U_csc_i, &U_csc,
(int_t**)NULL, (int_t**)NULL,
&ignore[0], &ignore[1], &ignore[2],
&ignore[3], &ignore[4],
false, false, nthreads,
&free_U, &free_Usp
);
if (retval != 0) goto cleanup;
if (free_Usp) {
free(U_sp);
U_sp = NULL;
free_Usp = false;
}
}
if (I_sp != NULL)
{
bool ignore[5];
retval = preprocess_sideinfo_matrix(
(real_t**)NULL, n_i, q,
I_row, I_col, &I_sp, nnz_I,
(real_t*)NULL, (real_t**)NULL,
&I_csr_p, &I_csr_i, &I_csr,
&I_csc_p, &I_csc_i, &I_csc,
(int_t**)NULL, (int_t**)NULL,
&ignore[0], &ignore[1], &ignore[2],
&ignore[3], &ignore[4],
false, false, nthreads,
&free_I, &free_Isp
);
if (retval != 0) goto cleanup;
if (free_Isp) {
free(I_sp);
I_sp = NULL;
free_Isp = false;
}
}
}
}
#endif
*glob_mean = 0;
retval = initialize_biases(
glob_mean, values, values + (user_bias? m_max : 0),
user_bias, item_bias, center,
(lam_unique == NULL)? (lam) : (lam_unique[0]),
(lam_unique == NULL)? (lam) : (lam_unique[1]),
false, false,
false, false,
(real_t*)NULL, (real_t*)NULL,
m, n,
m_max, n_max,
ixA, ixB, &X, nnz,
&Xfull, (real_t*)NULL,
Xcsr_p, Xcsr_i, Xcsr,
Xcsc_p, Xcsc_i, Xcsc,
weight, (real_t*)NULL,
(real_t*)NULL, (real_t*)NULL,
false,
nthreads,
&free_X, &free_Xfull,
false
);
if (retval != 0) goto cleanup;
if (U != NULL || U_sp != NULL || U_csr_p != NULL)
{
real_t *U_before = U;
bool free_U_before = free_U;
real_t *U_sp_before = U_sp;
bool free_Usp_before = free_Usp;
retval = center_by_cols(
U_colmeans,
&U, m_u, p,
U_row, U_col, &U_sp, nnz_U,
U_csr_p, U_csr_i, U_csr,
U_csc_p, U_csc_i, U_csc,
nthreads,
&free_Usp, &free_U
);
if (free_U_before && U_before != U) free(U_before);
if (free_Usp_before && U_sp_before != U_sp) free(U_sp_before);
if (retval != 0) goto cleanup;
}
if (II != NULL || I_sp != NULL || I_csr_p != NULL)
{
real_t *I_before = II;
bool free_I_before = free_I;
real_t *I_sp_before = I_sp;
bool free_Isp_before = free_Isp;
retval = center_by_cols(
I_colmeans,
&II, n_i, q,
I_row, I_col, &I_sp, nnz_I,
I_csr_p, I_csr_i, I_csr,
I_csc_p, I_csc_i, I_csc,
nthreads,
&free_Isp, &free_I
);
if (free_I_before && I_before != II) free(I_before);
if (free_Isp_before && I_sp_before != I_sp) free(I_sp_before);
if (retval != 0) goto cleanup;
}
if (reset_values)
{
ArraysToFill arrays =
#ifndef __cplusplus
(ArraysToFill)
#endif
{
values + (user_bias? m_max : 0) + (item_bias? n_max : 0),
nvars - (size_t)(user_bias? m_max : 0)
- (size_t)(item_bias? n_max : 0),
NULL, 0
};
retval = rnorm_parallel(arrays, seed, nthreads);
if (retval != 0) goto cleanup;
}
if (U != NULL)
U_has_NA = (bool)count_NAs(U, (size_t)m_u*(size_t)p, nthreads);
if (II != NULL)
I_has_NA = (bool)count_NAs(II, (size_t)n_i*(size_t)q, nthreads);
if (Ub != NULL)
Ub_has_NA = (bool)count_NAs(Ub, (size_t)m_ubin*(size_t)p, nthreads);
if (Ib != NULL)
Ib_has_NA = (bool)count_NAs(Ib, (size_t)n_ibin*(size_t)q, nthreads);
lbfgs_params =
#ifndef __cplusplus
(lbfgs_parameter_t)
#endif
{
(size_t)n_corr_pairs, 1e-5, 0, 1e-5,
maxiter, LBFGS_LINESEARCH_DEFAULT, 20,
1e-20, 1e20, 1e-4, 0.9, 0.9, EPSILON_T,
0.0, 0, -1,
};
data =
#ifndef __cplusplus
(data_collective_fun_grad)
#endif
{
m, n, k,
ixA, ixB, X, nnz,
Xfull,
Xcsr_p, Xcsr_i, Xcsr,
Xcsc_p, Xcsc_i, Xcsc,
weight, weightR, weightC,
user_bias, item_bias,
lam, lam_unique,
U, m_u, p, U_has_NA,
II, n_i, q, I_has_NA,
Ub, m_ubin, pbin, Ub_has_NA,
Ib, n_ibin, qbin, Ib_has_NA,
U_row, U_col, U_sp, nnz_U,
I_row, I_col, I_sp, nnz_I,
U_csr_p, U_csr_i, U_csr,
U_csc_p, U_csc_i, U_csc,
I_csr_p, I_csr_i, I_csr,
I_csc_p, I_csc_i, I_csc,
buffer_real_t, buffer_mt,
k_main, k_user, k_item,
w_main, w_user, w_item,
nthreads, print_every, 0, 0
};
if (should_stop_procedure)
{
print_err_msg("Procedure aborted before starting optimization.\n");
retval = 3;
goto cleanup;
}
retval = lbfgs(
nvars,
values,
&funval,
wrapper_collective_fun_grad,
(verbose)? (lbfgs_printer_collective) : (NULL),
(void*) &data,
&lbfgs_params,
(real_t*)NULL,
(iteration_data_t*)NULL
);
if (verbose) {
printf("\n\nOptimization terminated\n");
printf("\t%s\n", lbfgs_strerror(retval));
printf("\tniter:%3d, nfev:%3d\n", data.niter, data.nfev);
fflush(stdout);
}
if (retval == LBFGSERR_OUTOFMEMORY)
retval = 1;
else
retval = 0;
*niter = data.niter;
*nfev = data.nfev;
if (B_plus_bias != NULL && user_bias)
append_ones_last_col(
values + (
(user_bias? (size_t)m_max : (size_t)0)
+ (item_bias? (size_t)n_max : (size_t)0)
+ ((size_t)m_max * (size_t)(k_user+k+k_main))
),
n_max, k_item+k+k_main,
B_plus_bias
);
cleanup:
free(buffer_real_t);
free(buffer_mt);
free(Xcsr_p);
free(Xcsr_i);
free(Xcsr);
free(weightR);
free(Xcsc_p);
free(Xcsc_i);
free(Xcsc);
free(weightC);
free(U_csr_p);
free(U_csr_i);
free(U_csr);
free(U_csc_p);
free(U_csc_i);
free(U_csc);
free(I_csr_p);
free(I_csr_i);
free(I_csr);
free(I_csc_p);
free(I_csc_i);
free(I_csc);
if (free_X)
free(X);
if (free_Xfull)
free(Xfull);
if (free_U)
free(U);
if (free_Usp)
free(U_sp);
if (free_I)
free(II);
if (free_Isp)
free(I_sp);
#pragma omp critical
{
if (has_lock_on_handle && handle_is_locked)
{
handle_is_locked = false;
signal(SIGINT, old_interrupt_handle);
}
if (should_stop_procedure)
{
act_on_interrupt(3, handle_interrupt, true);
if (retval != 1) retval = 3;
}
}
if (retval == 1)
{
if (verbose)
print_oom_message();
}
return retval;
}
int_t fit_collective_explicit_lbfgs
(
real_t *restrict biasA, real_t *restrict biasB,
real_t *restrict A, real_t *restrict B,
real_t *restrict C, real_t *restrict Cb,
real_t *restrict D, real_t *restrict Db,
bool reset_values, int_t seed,
real_t *restrict glob_mean,
real_t *restrict U_colmeans, real_t *restrict I_colmeans,
int_t m, int_t n, int_t k,
int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz,
real_t *restrict Xfull,
real_t *restrict weight,
bool user_bias, bool item_bias, bool center,
real_t lam, real_t *restrict lam_unique,
real_t *restrict U, int_t m_u, int_t p,
real_t *restrict II, int_t n_i, int_t q,
real_t *restrict Ub, int_t m_ubin, int_t pbin,
real_t *restrict Ib, int_t n_ibin, int_t qbin,
int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U,
int_t I_row[], int_t I_col[], real_t *restrict I_sp, size_t nnz_I,
int_t k_main, int_t k_user, int_t k_item,
real_t w_main, real_t w_user, real_t w_item,
int_t n_corr_pairs, size_t maxiter,
int nthreads, bool prefer_onepass,
bool verbose, int_t print_every, bool handle_interrupt,
int_t *restrict niter, int_t *restrict nfev,
bool precompute_for_predictions,
bool include_all_X,
real_t *restrict B_plus_bias,
real_t *restrict precomputedBtB,
real_t *restrict precomputedTransBtBinvBt,
real_t *restrict precomputedBeTBeChol,
real_t *restrict precomputedTransCtCinvCt,
real_t *restrict precomputedCtCw
)
{
int_t retval = 0;
int_t k_totA = k_user + k + k_main;
int_t k_totB = k_item + k + k_main;
int_t m_max = max2(max2(m, m_u), m_ubin);
int_t n_max = max2(max2(n, n_i), n_ibin);
size_t nvars, ignored, ignored2 = 0;
nvars_collective_fun_grad(
(size_t)m, (size_t)n, (size_t)m_u, (size_t)n_i,
(size_t)m_ubin, (size_t)n_ibin,
(size_t)p, (size_t)q, (size_t)pbin, (size_t)qbin,
nnz, nnz_U, nnz_I,
(size_t)k, (size_t)k_main, (size_t)k_user, (size_t)k_item,
user_bias, item_bias, (size_t)nthreads,
X, Xfull,
U, Ub, II, Ib,
U_sp, U_sp, I_sp, I_sp,
&nvars, &ignored, &ignored2
);
size_t edge = 0;
real_t *restrict values = (real_t*)malloc(nvars*sizeof(real_t));
if (values == NULL) goto throw_oom;
if (!reset_values)
{
edge = 0;
if (user_bias) {
copy_arr(biasA, values + edge, m_max);
edge += m_max;
}
if (item_bias) {
copy_arr(biasB, values + edge, n_max);
edge += n_max;
}
copy_arr_(A, values + edge, (size_t)m_max*(size_t)k_totA, nthreads);
edge += (size_t)m_max*(size_t)k_totA;
copy_arr_(B, values + edge, (size_t)n_max*(size_t)k_totB, nthreads);
edge += (size_t)n_max*(size_t)k_totB;
if (p) {
copy_arr_(C, values + edge, (size_t)p*(size_t)(k_user+k), nthreads);
edge += (size_t)p*(size_t)(k_user+k);
}
if (pbin) {
copy_arr_(Cb, values+edge,(size_t)pbin*(size_t)(k_user+k),nthreads);
edge += (size_t)pbin*(size_t)(k_user+k);
}
if (q) {
copy_arr_(D, values + edge, (size_t)q*(size_t)(k_item+k), nthreads);
edge += (size_t)q*(size_t)(k_item+k);
}
if (qbin) {
copy_arr_(Db, values+edge,(size_t)qbin*(size_t)(k_item+k),nthreads);
edge += (size_t)qbin*(size_t)(k_item+k);
}
}
retval = fit_collective_explicit_lbfgs_internal(
values, reset_values,
glob_mean,
U_colmeans, I_colmeans,
m, n, k,
ixA, ixB, X, nnz,
Xfull,
weight,
user_bias, item_bias, center,
lam, lam_unique,
U, m_u, p,
II, n_i, q,
Ub, m_ubin, pbin,
Ib, n_ibin, qbin,
U_row, U_col, U_sp, nnz_U,
I_row, I_col, I_sp, nnz_I,
k_main, k_user, k_item,
w_main, w_user, w_item,
n_corr_pairs, maxiter, seed,
nthreads, prefer_onepass,
verbose, print_every, true,
niter, nfev,
B_plus_bias
);
if ((retval != 0 && retval != 3) || (retval == 3 && !handle_interrupt))
goto cleanup;
if (true)
{
edge = 0;
if (user_bias) {
copy_arr(values + edge, biasA, m_max);
edge += m_max;
}
if (item_bias) {
copy_arr(values + edge, biasB, n_max);
edge += n_max;
}
copy_arr_(values + edge, A, (size_t)m_max*(size_t)k_totA, nthreads);
edge += (size_t)m_max*(size_t)k_totA;
copy_arr_(values + edge, B, (size_t)n_max*(size_t)k_totB, nthreads);
edge += (size_t)n_max*(size_t)k_totB;
if (p) {
copy_arr_(values + edge, C, (size_t)p*(size_t)(k_user+k), nthreads);
edge += (size_t)p*(size_t)(k_user+k);
}
if (pbin) {
copy_arr_(values+edge,Cb, (size_t)pbin*(size_t)(k_user+k),nthreads);
edge += (size_t)pbin*(size_t)(k_user+k);
}
if (q) {
copy_arr_(values + edge, D, (size_t)q*(size_t)(k_item+k), nthreads);
edge += (size_t)q*(size_t)(k_item+k);
}
if (qbin) {
copy_arr_(values+edge,Db, (size_t)qbin*(size_t)(k_item+k),nthreads);
edge += (size_t)qbin*(size_t)(k_item+k);
}
}
if (precompute_for_predictions)
{
#pragma omp critical
{
if (retval == 3)
should_stop_procedure = true;
}
retval = precompute_collective_explicit(
B, n, n_max, include_all_X,
C, p,
(real_t*)NULL, false,
(real_t*)NULL, *glob_mean, false,
(real_t*)NULL, false,
k, k_user, k_item, k_main,
user_bias,
false,
lam, lam_unique,
false, false,
false, 0.,
w_main, w_user, 1.,
B_plus_bias,
precomputedBtB,
precomputedTransBtBinvBt,
(real_t*)NULL,
precomputedBeTBeChol,
(real_t*)NULL,
precomputedTransCtCinvCt,
precomputedCtCw,
(real_t*)NULL
);
#pragma omp critical
{
if (should_stop_procedure && retval == 0) {
retval = 3;
}
}
}
cleanup:
free(values);
act_on_interrupt(retval, handle_interrupt, false);
return retval;
throw_oom:
{
if (verbose)
print_oom_message();
retval = 1;
goto cleanup;
}
}
/* TODO: it's no longer necessary or beneficial to have separate functions
for 'optimizeA' - could instead make calls only to 'optimizeA_collective',
and can also replace the 'optimizeA_implicit' with calls to
'optimizeA' + 'NA_as_zero_X', so as to simplify the code. */
/* TODO: should have the option of passing the matrices either in row-major
or in column-major order, as it needs to have both in any case. */
int_t fit_collective_explicit_als
(
real_t *restrict biasA, real_t *restrict biasB,
real_t *restrict A, real_t *restrict B,
real_t *restrict C, real_t *restrict D,
real_t *restrict Ai, real_t *restrict Bi,
bool add_implicit_features,
bool reset_values, int_t seed,
real_t *restrict glob_mean,
real_t *restrict U_colmeans, real_t *restrict I_colmeans,
int_t m, int_t n, int_t k,
int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz,
real_t *restrict Xfull,
real_t *restrict weight,
bool user_bias, bool item_bias, bool center,
real_t lam, real_t *restrict lam_unique,
real_t l1_lam, real_t *restrict l1_lam_unique,
bool scale_lam, bool scale_lam_sideinfo, bool scale_bias_const,
real_t *restrict scaling_biasA, real_t *restrict scaling_biasB,
real_t *restrict U, int_t m_u, int_t p,
real_t *restrict II, int_t n_i, int_t q,
int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U,
int_t I_row[], int_t I_col[], real_t *restrict I_sp, size_t nnz_I,
bool NA_as_zero_X, bool NA_as_zero_U, bool NA_as_zero_I,
int_t k_main, int_t k_user, int_t k_item,
real_t w_main, real_t w_user, real_t w_item, real_t w_implicit,
int_t niter, int nthreads, bool verbose, bool handle_interrupt,
bool use_cg, int_t max_cg_steps, bool finalize_chol,
bool nonneg, int_t max_cd_steps, bool nonneg_C, bool nonneg_D,
bool precompute_for_predictions,
bool include_all_X,
real_t *restrict B_plus_bias,
real_t *restrict precomputedBtB,
real_t *restrict precomputedTransBtBinvBt,
real_t *restrict precomputedBtXbias,
real_t *restrict precomputedBeTBeChol,
real_t *restrict precomputedBiTBi,
real_t *restrict precomputedTransCtCinvCt,
real_t *restrict precomputedCtCw,
real_t *restrict precomputedCtUbias
)
{
int_t retval = 0;
if (k_user && U == NULL && nnz_U == 0) {
if (verbose)
fprintf(stderr, "Cannot pass 'k_user' without U data.\n");
retval = 2;
}
if (k_item && II == NULL && nnz_I == 0) {
if (verbose)
fprintf(stderr, "Cannot pass 'k_item' without I data.\n");
retval = 2;
}
if (k_main && Xfull == NULL && nnz == 0) {
if (verbose)
fprintf(stderr, "Cannot pass 'k_main' without X data.\n");
retval = 2;
}
if (retval == 2)
{
if (verbose) {
fflush(stderr);
}
return retval;
}
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long row, col, ix;
#endif
#ifndef _OPENMP
nthreads = 1;
#endif
int_t k_totA = k_user + k + k_main;
int_t k_totB = k_item + k + k_main;
int_t has_bias = user_bias || item_bias;
int_t m_max = max2(m, m_u);
int_t n_max = max2(n, n_i);
real_t *restrict A_bias = NULL;
real_t *restrict B_bias = NULL;
real_t *restrict Xcsr_orig = NULL;
real_t *restrict Xcsc_orig = NULL;
real_t *restrict Xfull_orig = NULL;
real_t *restrict Xtrans_orig = NULL;
real_t *restrict buffer_BtX = NULL;
bool free_BtX = false;
bool free_X = false;
bool free_Xfull = false;
bool free_U = false;
bool free_I = false;
bool free_Usp = false;
bool free_Isp = false;
real_t *restrict buffer_CtUbias = NULL;
real_t *restrict DtIbias = NULL;
real_t *restrict buffer_real_t = NULL;
size_t size_bufferA = 0;
size_t size_bufferB = 0;
size_t size_bufferC = 0;
size_t size_bufferD = 0;
size_t size_bufferAi = 0;
size_t size_bufferBi = 0;
size_t size_buffer = 0;
real_t *restrict Xtrans = NULL;
real_t *restrict Wtrans = NULL;
size_t *Xcsr_p = NULL;
int_t *Xcsr_i = NULL;
real_t *restrict Xcsr = NULL;
real_t *restrict weightR = NULL;
size_t *Xcsc_p = NULL;
int_t *Xcsc_i = NULL;
real_t *restrict Xcsc = NULL;
real_t *restrict weightC = NULL;
real_t *restrict Xones = NULL;
real_t *restrict Utrans = NULL;
size_t *U_csr_p = NULL;
int_t *U_csr_i = NULL;
real_t *restrict U_csr = NULL;
size_t *U_csc_p = NULL;
int_t *U_csc_i = NULL;
real_t *restrict U_csc = NULL;
real_t *restrict Itrans = NULL;
size_t *I_csr_p = NULL;
int_t *I_csr_i = NULL;
real_t *restrict I_csr = NULL;
size_t *I_csc_p = NULL;
int_t *I_csc_i = NULL;
real_t *restrict I_csc = NULL;
int_t *restrict cnt_NA_byrow = NULL;
int_t *restrict cnt_NA_bycol = NULL;
int_t *restrict cnt_NA_u_byrow = NULL;
int_t *restrict cnt_NA_u_bycol = NULL;
int_t *restrict cnt_NA_i_byrow = NULL;
int_t *restrict cnt_NA_i_bycol = NULL;
int_t *restrict zeros_m = NULL;
int_t *restrict zeros_n = NULL;
bool full_dense = false;
bool near_dense_row = false;
bool near_dense_col = false;
bool some_full_row = false;
bool some_full_col = false;
bool full_dense_u = false;
bool near_dense_u_row = false;
bool near_dense_u_col = false;
bool some_full_u_row = false;
bool some_full_u_col = false;
bool full_dense_i = false;
bool near_dense_i_row = false;
bool near_dense_i_col = false;
bool some_full_i_row = false;
bool some_full_i_col = false;
bool filled_BtB = false;
bool filled_CtCw = false;
bool filled_BeTBeChol = false;
bool filled_BiTBi = false;
bool filled_CtUbias = false;
bool CtC_is_scaled = false;
bool ignore = false;
bool ignore2 = false;
bool ignore3 = false;
bool ignore4 = false;
bool ignore5 = false;
bool back_to_precompute = false;
bool finished_TransBtBinvBt = false;
bool finished_TransCtCinvCt = false;
char lo = 'L';
int_t ignore_int = 0;
int_t k_pred = 0;
bool free_BiTBi = false;
bool free_arr_use = false;
real_t *arr_use = NULL;
real_t *restrict lam_unique_copy = NULL;
real_t *restrict l1_lam_unique_copy = NULL;
real_t *restrict wsumA = NULL;
real_t *restrict wsumB = NULL;
scale_lam = scale_lam || scale_lam_sideinfo;
bool use_cg_A = use_cg;
bool use_cg_B = use_cg;
if (NA_as_zero_X && weight == NULL && NA_as_zero_U && U == NULL)
use_cg_A = false;
if (NA_as_zero_X && weight == NULL && NA_as_zero_I && II == NULL)
use_cg_B = false;
if (nonneg || l1_lam || l1_lam_unique != NULL)
{
use_cg = false;
use_cg_A = false;
use_cg_B = false;
}
if (!use_cg) finalize_chol = false;
sig_t_ old_interrupt_handle = NULL;
bool has_lock_on_handle = false;
#pragma omp critical
{
if (!handle_is_locked)
{
handle_is_locked = true;
has_lock_on_handle = true;
should_stop_procedure = false;
old_interrupt_handle = signal(SIGINT, set_interrup_global_variable);
}
}
/* This avoids differences in the scaling of the precomputed matrices */
if (w_main != 1.)
{
lam /= w_main;
l1_lam /= w_main;
w_user /= w_main;
w_item /= w_main;
w_implicit /= w_main;
if (lam_unique != NULL)
{
lam_unique_copy = (real_t*)malloc(6*sizeof(real_t));
if (lam_unique_copy == NULL) goto throw_oom;
for (int_t ix = 0; ix < 6; ix++)
lam_unique_copy[ix] = lam_unique[ix] / w_main;
lam_unique = lam_unique_copy;
}
if (l1_lam_unique != NULL)
{
l1_lam_unique_copy = (real_t*)malloc(6*sizeof(real_t));
if (l1_lam_unique_copy == NULL) goto throw_oom;
for (int_t ix = 0; ix < 6; ix++)
l1_lam_unique_copy[ix] = l1_lam_unique[ix] / w_main;
l1_lam_unique = l1_lam_unique_copy;
}
w_main = 1.;
}
if (add_implicit_features && precomputedBiTBi == NULL)
{
free_BiTBi = true;
precomputedBiTBi = (real_t*)malloc((size_t)square(k+k_main)
* sizeof(real_t));
if (precomputedBiTBi == NULL) goto throw_oom;
}
if (U == NULL && NA_as_zero_U && U_colmeans != NULL &&
precomputedCtUbias == NULL)
{
buffer_CtUbias = (real_t*)malloc((size_t)(k_user+k)*sizeof(real_t));
if (buffer_CtUbias == NULL) goto throw_oom;
precomputedCtUbias = buffer_CtUbias;
}
if (II == NULL && NA_as_zero_I)
{
DtIbias = (real_t*)malloc((size_t)(k_item+k)*sizeof(real_t));
if (DtIbias == NULL) goto throw_oom;
}
#ifdef _FOR_R
if (Xfull != NULL) R_nan_to_C_nan(Xfull, (size_t)m*(size_t)n);
if (U != NULL) R_nan_to_C_nan(U, (size_t)m_u*(size_t)p);
if (II != NULL) R_nan_to_C_nan(II, (size_t)n_i*(size_t)q);
#endif
if (!center)
*glob_mean = 0.;
if ((!scale_lam && !scale_lam_sideinfo) || !has_bias)
scale_bias_const = false;
retval = calc_mean_and_center(
ixA, ixB, &X, nnz,
&Xfull, (real_t*)NULL,
m, n,
(size_t*)NULL, (int_t*)NULL, (real_t*)NULL,
(size_t*)NULL, (int_t*)NULL, (real_t*)NULL,
weight,
NA_as_zero_X, nonneg, center, nthreads,
glob_mean, &free_X, &free_Xfull,
false
);
if (retval != 0) goto throw_oom;
if (Xfull != NULL)
{
cnt_NA_byrow = (int_t*)calloc(m, sizeof(int_t));
cnt_NA_bycol = (int_t*)calloc(n, sizeof(int_t));
if (cnt_NA_byrow == NULL || cnt_NA_bycol == NULL)
goto throw_oom;
count_NAs_by_row(Xfull, m, n, cnt_NA_byrow, nthreads,
&full_dense, &near_dense_row, &some_full_row);
count_NAs_by_col(Xfull, m, n, cnt_NA_bycol,
&full_dense, &near_dense_col, &some_full_col);
}
else
{
if (NA_as_zero_X)
{
if (U != NULL || nnz_U)
m = max2(m, m_u);
if (II != NULL || nnz_I)
n = max2(n, n_i);
}
retval = convert_sparse_X(
ixA, ixB, X, nnz,
&Xcsr_p, &Xcsr_i, &Xcsr,
&Xcsc_p, &Xcsc_i, &Xcsc,
weight, &weightR, &weightC,
m, n, nthreads
);
if (retval != 0) goto throw_oom;
if (free_X)
{
free(X);
X = NULL;
free_X = false;
}
}
if (Xfull != NULL && ((!full_dense && near_dense_col) || m > m_u))
{
Xtrans = (real_t*)malloc((size_t)m*(size_t)n*sizeof(real_t));
if (Xtrans == NULL) goto throw_oom;
transpose_mat2(Xfull, m, n, Xtrans);
if (weight != NULL)
{
Wtrans = (real_t*)malloc((size_t)m*(size_t)n*sizeof(real_t));
if (Wtrans == NULL) goto throw_oom;
transpose_mat2(weight, m, n, Wtrans);
}
}
if (add_implicit_features)
{
if (Xfull == NULL) {
Xones = (real_t*)malloc(nnz*sizeof(real_t));
if (Xones == NULL) goto throw_oom;
for (size_t ix = 0; ix < nnz; ix++)
Xones[ix] = 1.;
}
else {
Xones = (real_t*)malloc((size_t)m*(size_t)n*sizeof(real_t));
zeros_m = (int_t*)calloc(m, sizeof(int_t));
zeros_n = (int_t*)calloc(n, sizeof(int_t));
if (Xones == NULL || zeros_m == NULL || zeros_n == NULL)
goto throw_oom;
/* TODO: maybe should add a transposed version too */
for (size_t row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++)
Xones[col + row*(size_t)n]
=
isnan(Xfull[col + row*(size_t)n])? 0. : 1.;
}
}
/* For the biases, will do the trick by subtracting the bias from
all entries before optimizing a given matrix, unless using 'NA_as_zero',
in which case will pre-multiply the biases by the opposite matrix. */
if (has_bias)
{
A_bias = (real_t*)malloc((size_t)m_max * (size_t)(k_totA+1)
* sizeof(real_t));
/* Note: 'B_plus_bias' might be part of the desired outputs, in which
case it is to be passed already allocated. If not, will allocate it
here instead */
if (B_plus_bias == NULL)
B_bias = (real_t*)malloc((size_t)n_max * (size_t)(k_totB+1)
* sizeof(real_t));
else
B_bias = B_plus_bias;
if (A_bias == NULL || B_bias == NULL) goto throw_oom;
if (Xcsr != NULL && Xfull == NULL && !NA_as_zero_X)
{
if (item_bias) {
Xcsr_orig = (real_t*)malloc(nnz*sizeof(real_t));
if (Xcsr_orig == NULL) goto throw_oom;
copy_arr_(Xcsr, Xcsr_orig, nnz, nthreads);
}
if (user_bias) {
Xcsc_orig = (real_t*)malloc(nnz*sizeof(real_t));
if (Xcsc_orig == NULL) goto throw_oom;
copy_arr_(Xcsc, Xcsc_orig, nnz, nthreads);
}
}
if (Xfull != NULL && (item_bias || Xtrans == NULL))
{
Xfull_orig = (real_t*)malloc((size_t)m*(size_t)n*sizeof(real_t));
if (Xfull_orig == NULL) goto throw_oom;
copy_arr_(Xfull, Xfull_orig, (size_t)m*(size_t)n, nthreads);
if (!free_Xfull) {
real_t *temp = Xfull_orig;
Xfull_orig = Xfull;
Xfull = temp;
free_Xfull = true;
}
}
if (Xtrans != NULL && user_bias)
{
Xtrans_orig = (real_t*)malloc((size_t)m*(size_t)n*sizeof(real_t));
if (Xtrans_orig == NULL) goto throw_oom;
copy_arr_(Xtrans, Xtrans_orig, (size_t)m*(size_t)n, nthreads);
}
}
else {
/* these are only used as place-holders, do not get overwritten */
A_bias = A;
B_bias = B;
}
if (Xfull == NULL && NA_as_zero_X && (center || has_bias))
{
if (precomputedBtXbias == NULL || (user_bias && !item_bias))
{
free_BtX = true;
buffer_BtX = (real_t*)calloc((size_t)(k+k_main+1), sizeof(real_t));
if (buffer_BtX == NULL) goto throw_oom;
}
else {
buffer_BtX = precomputedBtXbias;
set_to_zero(buffer_BtX, k+k_main+user_bias);
}
}
if (U != NULL || nnz_U)
{
if (U == NULL && NA_as_zero_U)
{
m_u = max2(m, m_u);
}
retval = preprocess_sideinfo_matrix(
&U, m_u, p,
U_row, U_col, &U_sp, nnz_U,
U_colmeans, &Utrans,
&U_csr_p, &U_csr_i, &U_csr,
&U_csc_p, &U_csc_i, &U_csc,
&cnt_NA_u_byrow, &cnt_NA_u_bycol,
&full_dense_u, &near_dense_u_row, &near_dense_u_col,
&some_full_u_row, &some_full_u_col,
NA_as_zero_U, nonneg_C, nthreads,
&free_U, &free_Usp
);
if (retval != 0) goto throw_oom;
if (free_Usp) {
free(U_sp);
U_sp = NULL;
free_Usp = false;
}
}
if (II != NULL || nnz_I)
{
if (II == NULL && NA_as_zero_U)
{
n_i = max2(n, n_i);
}
retval = preprocess_sideinfo_matrix(
&II, n_i, q,
I_row, I_col, &I_sp, nnz_I,
I_colmeans, &Itrans,
&I_csr_p, &I_csr_i, &I_csr,
&I_csc_p, &I_csc_i, &I_csc,
&cnt_NA_i_byrow, &cnt_NA_i_bycol,
&full_dense_i, &near_dense_i_row, &near_dense_i_col,
&some_full_i_row, &some_full_i_col,
NA_as_zero_I, nonneg_D, nthreads,
&free_I, &free_Isp
);
if (retval != 0) goto throw_oom;
if (free_Isp) {
free(I_sp);
I_sp = NULL;
free_Isp = false;
}
}
/* Sizes of the temporary arrays */
if (U != NULL || nnz_U)
size_bufferC = buffer_size_optimizeA(
m_u, full_dense_u,
near_dense_u_col,
some_full_u_col,
Utrans == NULL,
U != NULL, false, NA_as_zero_U,
nonneg_C, l1_lam != 0. || l1_lam_unique != NULL,
k_user+k, nthreads,
U_colmeans != NULL,
((size_t)n*(size_t)(k_user+k+k_main+user_bias)
>=
(size_t)square(k_user+k) )?
(precomputedTransBtBinvBt != NULL)
:
(precomputedBeTBeChol != NULL),
false,
use_cg && !nonneg_C, finalize_chol
);
if (II != NULL || nnz_I)
size_bufferD = buffer_size_optimizeA(
n_i, full_dense_i,
near_dense_i_col,
some_full_i_col,
Itrans == NULL,
II != NULL, false, NA_as_zero_I,
nonneg_D, l1_lam != 0. || l1_lam_unique != NULL,
k_item+k, nthreads,
I_colmeans != NULL,
((size_t)n*(size_t)(k_user+k+k_main+user_bias)
>=
(size_t)square(k_item+k) )?
(precomputedTransBtBinvBt != NULL)
:
((k_item <= k_user + user_bias)?
(precomputedBeTBeChol != NULL) : (false)),
false,
use_cg && !nonneg_D, finalize_chol
);
if (add_implicit_features)
size_bufferAi = buffer_size_optimizeA(
n, true, false, false, false,
Xfull != NULL, false, Xfull == NULL,
nonneg, l1_lam != 0. || l1_lam_unique != NULL,
k+k_main, nthreads,
false,
precomputedBtB != NULL, false,
false, false
);
if (add_implicit_features)
size_bufferBi = buffer_size_optimizeA(
m, true, false, false, Xfull != NULL,
Xfull != NULL, false, Xfull == NULL,
nonneg, l1_lam != 0. || l1_lam_unique != NULL,
k+k_main, nthreads,
false,
precomputedBtB != NULL, false,
false, false
);
if (U != NULL || nnz_U || add_implicit_features)
size_bufferA = buffer_size_optimizeA_collective(
m, m_u, n, p,
k, k_main + (int)user_bias, k_user,
full_dense, near_dense_row, some_full_row, false,
Xfull != NULL, Xcsr_p != NULL, weight != NULL, NA_as_zero_X,
U != NULL, U_csr_p != NULL,
full_dense_u, near_dense_u_row, some_full_u_row, NA_as_zero_U,
add_implicit_features, k_main,
nthreads,
use_cg_A && !nonneg, finalize_chol,
nonneg, l1_lam != 0. || l1_lam_unique != NULL,
true,
precomputedBtB != NULL,
precomputedCtCw != NULL,
precomputedBeTBeChol != NULL,
true
);
else
size_bufferA = buffer_size_optimizeA(
n, full_dense, near_dense_row, some_full_row, false,
Xfull != NULL, weight != NULL, NA_as_zero_X,
nonneg, l1_lam != 0. || l1_lam_unique != NULL,
k+k_main+(int)user_bias, nthreads,
false,
precomputedBtB != NULL, precompute_for_predictions,
use_cg && !nonneg, finalize_chol
);
if (II != NULL || nnz_I || add_implicit_features)
size_bufferB = buffer_size_optimizeA_collective(
n, n_i, m, q,
k, k_main + (int)item_bias, k_item,
full_dense, near_dense_col, some_full_col,
(Xtrans != NULL)? false : true,
Xfull != NULL, Xcsc_p != NULL, weight != NULL, NA_as_zero_X,
II != NULL, I_csr_p != NULL,
full_dense_i, near_dense_i_row, some_full_i_col, NA_as_zero_I,
add_implicit_features, k_main,
nthreads,
use_cg_B && !nonneg, finalize_chol,
nonneg, l1_lam != 0. || l1_lam_unique != NULL,
false,
(item_bias <= user_bias)?
(precomputedBtB != NULL) : (false),
(k_item + item_bias <= k_user + user_bias)?
(precomputedCtCw != NULL) : (false),
(k_item + item_bias <= k_user + user_bias)?
(precomputedBeTBeChol != NULL) : (false),
true
);
else
size_bufferB = buffer_size_optimizeA(
m, full_dense, near_dense_col, some_full_col, Xtrans == NULL,
Xfull != NULL, weight != NULL, NA_as_zero_X,
nonneg, l1_lam != 0. || l1_lam_unique != NULL,
k+k_main+(int)item_bias, nthreads,
false,
((size_t)n*(size_t)(k_user+k+k_main+user_bias)
>=
(size_t)square(k+k_main+item_bias) )?
(precomputedTransBtBinvBt != NULL)
:
((k_item + item_bias <= k_user + user_bias)?
(precomputedBeTBeChol != NULL) : (false)),
false,
use_cg && !nonneg, finalize_chol
);
size_buffer = max2(max2(size_bufferA, size_bufferB),
max2(size_bufferC, size_bufferD));
size_buffer = max2(size_buffer, max2(size_bufferAi, size_bufferBi));
buffer_real_t = (real_t*)malloc(size_buffer * sizeof(real_t));
if (buffer_real_t == NULL) goto throw_oom;
/* If using scaled lambda and there are weights or there are biases that
need to be initialized, will first need to calculate the multipliers
for each row and column. */
if (scale_lam && (weight != NULL || (user_bias || (item_bias && use_cg_B))))
{
if (weight != NULL || user_bias) {
wsumA = (real_t*)calloc(m_max, sizeof(real_t));
if (wsumA == NULL) goto throw_oom;
}
if (weight != NULL || (item_bias && (user_bias || use_cg_B))) {
wsumB = (real_t*)calloc(n_max, sizeof(real_t));
if (wsumB == NULL) goto throw_oom;
}
if (weight != NULL)
{
if (Xfull != NULL)
{
#pragma omp parallel for schedule(static) \
num_threads(cap_to_4(nthreads)) \
shared(Xfull, weight, m, n, wsumA)
for (size_t_for row = 0; row < (size_t)m; row++)
{
double wsum = 0;
for (size_t col = 0; col < (size_t)n; col++)
wsum += isnan(Xfull[col + row*n])?
0 : weight[col + row*n];
wsumA[row] = (cnt_NA_byrow[row] < n)? wsum : 1;
}
if (Xtrans != NULL)
{
#pragma omp parallel for schedule(static) \
num_threads(cap_to_4(nthreads)) \
shared(Xtrans, Wtrans, m, n, wsumB)
for (size_t_for col = 0; col < (size_t)n; col++)
{
double wsum = 0;
for (size_t row = 0; row < (size_t)m; row++)
wsum += isnan(Xtrans[row + col*m])?
0 : Wtrans[row + col*m];
wsumB[col] = (cnt_NA_bycol[col] < m)? wsum : 1;
}
}
else
{
for (size_t row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++)
wsumB[col] += isnan(Xfull[col + row*n])?
0 : weight[col + row*n];
for (int_t col = 0; col < n; col++)
wsumB[col] = (cnt_NA_bycol[col] < m)? wsumB[col] : 1;
}
}
else
{
#pragma omp parallel for schedule(static) \
num_threads(cap_to_4(nthreads)) \
shared(Xcsr_p, weightR, m, wsumA)
for (size_t_for row = 0; row < (size_t)m; row++)
{
double wsum = 0;
for (size_t ix = Xcsr_p[row]; ix < Xcsr_p[row+1]; ix++)
wsum += weightR[ix];
wsumA[row] = (NA_as_zero_X || Xcsr_p[row+1] > Xcsr_p[row])?
wsum : 1;
}
#pragma omp parallel for schedule(static) \
num_threads(cap_to_4(nthreads)) \
shared(Xcsc_p, weightC, n, wsumB)
for (size_t_for col = 0; col < (size_t)n; col++)
{
double wsum = 0;
for (size_t ix = Xcsc_p[col]; ix < Xcsc_p[col+1]; ix++)
wsum += weightC[ix];
wsumB[col] = (NA_as_zero_X || Xcsc_p[col+1] > Xcsc_p[col])?
wsum : 1;
}
}
if (NA_as_zero_X && Xfull == NULL)
{
for (int_t row = 0; row < m; row++)
wsumA[row] +=
(real_t)(n - (int_t)(Xcsr_p[row+1] - Xcsr_p[row]));
for (int_t col = 0; col < n; col++)
wsumB[col] +=
(real_t)(m - (int_t)(Xcsc_p[col+1] - Xcsc_p[col]));
}
}
else if (has_bias)
{
if (user_bias)
{
if (Xfull != NULL) {
for (int_t row = 0; row < m; row++)
wsumA[row] = n - cnt_NA_byrow[row]
+ (cnt_NA_byrow[row] == n);
}
else if (NA_as_zero_X) {
for (int_t row = 0; row < m; row++)
wsumA[row] = n_max;
}
else {
for (int_t row = 0; row < m; row++)
wsumA[row] = Xcsr_p[row+1] - Xcsr_p[row]
+ (Xcsr_p[row+1] == Xcsr_p[row]);
}
}
if (item_bias && (user_bias || use_cg_B))
{
if (Xfull != NULL) {
for (int_t col = 0; col < n; col++)
wsumB[col] = m - cnt_NA_bycol[col]
+ (cnt_NA_bycol[col] == m);
}
else if (NA_as_zero_X) {
for (int_t col = 0; col < n; col++)
wsumB[col] = m_max;
}
else {
for (int_t col = 0; col < n; col++)
wsumB[col] = Xcsc_p[col+1] - Xcsc_p[col]
+ (Xcsc_p[col+1] == Xcsc_p[col]);
}
}
}
if (scale_lam_sideinfo)
{
if (user_bias || weight != NULL)
{
if (U != NULL) {
for (int_t row = 0; row < m_u; row++)
wsumA[row] += p - cnt_NA_u_byrow[row];
}
else if (NA_as_zero_U) {
for (int_t row = 0; row < m_max; row++)
wsumA[row] += p;
}
else if (nnz_U) {
for (int_t row = 0; row < m_u; row++)
wsumA[row] += U_csr_p[row+1] - U_csr[row];
}
}
if (weight != NULL || (item_bias && (user_bias || use_cg_B)))
{
if (II != NULL) {
for (int_t col = 0; col < n_i; col++)
wsumB[col] += q - cnt_NA_i_byrow[col];
}
else if (NA_as_zero_I) {
for (int_t col = 0; col < n_max; col++)
wsumB[col] += q;
}
else if (nnz_I) {
for (int_t col = 0; col < n_i; col++)
wsumB[col] += I_csr_p[col+1] - I_csr[col];
}
}
}
if (scale_bias_const && has_bias)
{
if (user_bias) {
double wmean = 0;
int_t mlim = (Xfull == NULL && NA_as_zero_X)? m_max : m;
for (int_t row = 0; row < mlim; row++)
wmean += (wsumA[row] - wmean) / (double)(row+1);
*scaling_biasA = wmean;
}
if (item_bias) {
double wmean = 0;
int_t nlim = (Xfull == NULL && NA_as_zero_X)? n_max : n;
for (int_t col = 0; col < nlim; col++)
wmean += (wsumB[col] - wmean) / (double)(col+1);
*scaling_biasB = wmean;
}
}
}
if ((scale_lam || scale_lam_sideinfo) && scale_bias_const)
{
if (lam_unique_copy == NULL)
{
lam_unique_copy = (real_t*)malloc(6*sizeof(real_t));
l1_lam_unique_copy = (real_t*)malloc(6*sizeof(real_t));
if (lam_unique_copy == NULL || l1_lam_unique_copy == NULL)
goto throw_oom;
if (lam_unique != NULL)
memcpy(lam_unique_copy, lam_unique, 6*sizeof(real_t));
else
for (int_t ix = 0; ix < 6; ix++) lam_unique_copy[ix] = lam;
if (l1_lam_unique != NULL)
memcpy(l1_lam_unique_copy, l1_lam_unique, 6*sizeof(real_t));
else
for (int_t ix = 0; ix < 6;ix++) l1_lam_unique_copy[ix] = l1_lam;
}
if (user_bias) {
lam_unique_copy[0] *= *scaling_biasA;
l1_lam_unique_copy[0] *= *scaling_biasA;
}
if (item_bias) {
lam_unique_copy[1] *= *scaling_biasB;
l1_lam_unique_copy[1] *= *scaling_biasB;
}
lam_unique = lam_unique_copy;
l1_lam_unique = l1_lam_unique_copy;
}
/* Initialize biases */
if (has_bias && reset_values)
{
if (user_bias != item_bias)
{
if (user_bias)
{
retval = initialize_biases_onesided(
Xfull,
(Xfull == NULL && NA_as_zero_X)? m_max : m,
(Xfull == NULL && NA_as_zero_X)? n_max : n,
false, cnt_NA_byrow,
Xcsr_p, Xcsr_i, Xcsr,
weight, weightR,
center? (*glob_mean) : 0, NA_as_zero_X, nonneg,
(lam_unique != NULL)? lam_unique[0] : lam,
scale_lam && !scale_bias_const,
wsumA,
biasA,
nthreads
);
if (retval == 1) goto throw_oom;
}
else if (use_cg_B)
{
retval = initialize_biases_onesided(
(Xtrans == NULL)? Xfull : Xtrans,
(Xfull == NULL && NA_as_zero_X)? n_max : n,
(Xfull == NULL && NA_as_zero_X)? m_max : m,
Xtrans == NULL, cnt_NA_bycol,
Xcsc_p, Xcsc_i, Xcsc,
weight, weightC,
center? (*glob_mean) : 0, NA_as_zero_X, nonneg,
(lam_unique != NULL)? lam_unique[1] : lam,
scale_lam && !scale_bias_const,
wsumB,
biasB,
nthreads
);
if (retval == 1) goto throw_oom;
}
}
else
{
retval = initialize_biases_twosided(
Xfull, Xtrans,
cnt_NA_byrow, cnt_NA_bycol,
m, n,
NA_as_zero_X, nonneg, center? (*glob_mean) : (0.),
Xcsr_p, Xcsr_i, Xcsr,
Xcsc_p, Xcsc_i, Xcsc,
weight, Wtrans,
weightR, weightC,
(lam_unique != NULL)? lam_unique[0] : lam,
(lam_unique != NULL)? lam_unique[1] : lam,
scale_lam && !scale_bias_const,
wsumA, wsumB,
biasA, biasB,
nthreads
);
if (retval == 1) goto throw_oom;
}
if (weight == NULL)
{
free(wsumA); wsumA = NULL;
free(wsumB); wsumB = NULL;
}
}
/* Initialize values as necessary. Note that it is not necessary to
initialize all the matrices, because (a) if using cholesky or CD,
the current values of the matrix to optimize in a given iteration
do not matter; (b) if using CG, will reset them to zero at the
firt iteration (save for the bias), thus their values don't matter
either. Same goes for setting matrices as non-negative. */
if (reset_values)
{
bool fill_B = (II != NULL || I_csr_p != NULL || add_implicit_features);
ArraysToFill arrays =
#ifndef __cplusplus
(ArraysToFill)
#endif
{
A, (size_t)m_max*(size_t)k_totA,
fill_B? B : NULL,
fill_B? ((size_t)n_max*(size_t)k_totB) : 0
};
retval = rnorm_parallel(arrays, seed, nthreads);
if (retval != 0) goto throw_oom;
if (nonneg)
{
for (size_t ix = 0; ix < (size_t)m_max*(size_t)k_totA; ix++)
A[ix] = fabs_t(A[ix]);
if (fill_B)
for (size_t ix = 0; ix < (size_t)n_max*(size_t)k_totB; ix++)
B[ix] = fabs_t(B[ix]);
}
if (use_cg)
{
if (!fill_B)
set_to_zero_(B, (size_t)n_max*(size_t)k_totB, nthreads);
if (U != NULL || U_csr_p != NULL)
set_to_zero_(C, (size_t)p*(size_t)(k_user+k), nthreads);
if (II != NULL || I_csr_p != NULL)
set_to_zero_(D, (size_t)q*(size_t)(k_item+k), nthreads);
}
}
if (include_all_X && add_implicit_features && n_max > n)
{
set_to_zero_(Bi + (size_t)n*(size_t)(k+k_main),
(size_t)(n_max-n)*(size_t)(k+k_main),
nthreads);
}
if (has_bias)
{
copy_mat(m_max, k_user+k+k_main,
A, k_user+k+k_main,
A_bias, k_user+k+k_main + 1);
copy_mat(n_max, k_item+k+k_main,
B, k_item+k+k_main,
B_bias, k_item+k+k_main + 1);
/* TODO: one of these two is probably redundant depending on
parameters, find out and eliminate it. */
if (user_bias) {
if (m_max > m)
set_to_zero(biasA + m, m_max - m);
cblas_tcopy(m_max, biasA, 1,
A_bias + k_user+k+k_main, k_user+k+k_main + 1);
}
else
for (size_t ix = 0; ix < (size_t)m_max; ix++)
A_bias[(size_t)(k_user+k+k_main)
+ ix*(size_t)(k_user+k+k_main + 1)]
= 1.;
if (item_bias) {
if (n_max > n)
set_to_zero(biasB + n, n_max - n);
cblas_tcopy(n_max, biasB, 1,
B_bias + k_item+k+k_main, k_item+k+k_main + 1);
}
else
for (size_t ix = 0; ix < (size_t)n_max; ix++)
B_bias[(size_t)(k_item+k+k_main)
+ ix*(size_t)(k_item+k+k_main + 1)]
= 1.;
}
if (should_stop_procedure)
{
print_err_msg("Procedure aborted before starting optimization.\n");
retval = 3;
if (!handle_interrupt)
goto cleanup;
else
goto terminate_early;
}
if (verbose) {
printf("Starting ALS optimization routine\n\n");
fflush(stdout);
}
for (int_t iter = 0; iter < niter; iter++)
{
if (iter == niter - 1 && use_cg && finalize_chol) {
use_cg = false;
use_cg_A = false;
use_cg_B = false;
}
/* Optimize C and D (they are independent of each other) */
if (should_stop_procedure) goto check_interrupt;
if (U != NULL || nnz_U) {
if (verbose) {
printf("Updating C ...");
fflush(stdout);
}
if ((size_t)n*(size_t)(k_user+k+k_main+user_bias)
<
(size_t)square(k_user+k))
{
filled_BeTBeChol = false;
}
filled_CtUbias = false;
optimizeA(
C, k_user+k,
A_bias, k_user+k+k_main+(int)(user_bias||item_bias),
p, m_u, k_user+k,
U_csc_p, U_csc_i, U_csc,
(Utrans != NULL)? (Utrans) : (U),
(Utrans != NULL)? m_u : p,
full_dense_u, near_dense_u_col, some_full_u_col,
cnt_NA_u_bycol, (real_t*)NULL, NA_as_zero_U,
(lam_unique == NULL)? (lam/w_user) : (lam_unique[4]/w_user),
(lam_unique == NULL)? (lam/w_user) : (lam_unique[4]/w_user),
(l1_lam_unique == NULL)?
(l1_lam/w_user) : (l1_lam_unique[4]/w_user),
(l1_lam_unique == NULL)?
(l1_lam/w_user) : (l1_lam_unique[4]/w_user),
scale_lam, false, (real_t*)NULL,
Utrans == NULL,
nthreads,
use_cg && !nonneg_C, max_cg_steps,
nonneg_C, max_cd_steps,
(real_t*)NULL,
(real_t*)NULL, (real_t*)NULL, 0., U_colmeans, 1.,
false,
((size_t)n*(size_t)(k_user+k+k_main+user_bias)
>=
(size_t)square(k_user+k) )?
(precomputedTransBtBinvBt) : (precomputedBeTBeChol),
&ignore,
buffer_real_t
);
if (verbose) {
printf(" done\n");
fflush(stdout);
}
}
if (should_stop_procedure) goto check_interrupt;
if (II != NULL || nnz_I) {
if (verbose) {
printf("Updating D ...");
fflush(stdout);
}
if ((size_t)n*(size_t)(k_user+k+k_main+user_bias)
<
(size_t)square(k_item+k))
{
if (k_item > k_user + user_bias)
filled_BeTBeChol = false;
}
optimizeA(
D, k_item+k,
B_bias, k_item+k+k_main+(int)(user_bias||item_bias),
q, n_i, k_item+k,
I_csc_p, I_csc_i, I_csc,
(Itrans != NULL)? (Itrans) : (II),
(Itrans != NULL)? n_i : q,
full_dense_i, near_dense_i_col, some_full_i_col,
cnt_NA_i_bycol, (real_t*)NULL, NA_as_zero_I,
(lam_unique == NULL)? (lam/w_item) : (lam_unique[5]/w_item),
(lam_unique == NULL)? (lam/w_item) : (lam_unique[5]/w_item),
(l1_lam_unique == NULL)?
(l1_lam/w_item) : (l1_lam_unique[5]/w_item),
(l1_lam_unique == NULL)?
(l1_lam/w_item) : (l1_lam_unique[5]/w_item),
scale_lam, false, (real_t*)NULL,
Itrans == NULL,
nthreads,
use_cg && !nonneg_D, max_cg_steps,
nonneg_D, max_cd_steps,
(real_t*)NULL,
(real_t*)NULL, (real_t*)NULL, 0., I_colmeans, 1.,
false,
((size_t)n*(size_t)(k_user+k+k_main+user_bias)
>=
(size_t)square(k_item+k) )?
(precomputedTransBtBinvBt)
:
((k_item <= k_user + user_bias)?
(precomputedBeTBeChol) : ((real_t*)NULL)),
&ignore,
buffer_real_t
);
if (verbose) {
printf(" done\n");
fflush(stdout);
}
}
/* Optimizing implicit-features matrices (also independent) */
if (add_implicit_features)
{
if (should_stop_procedure) goto check_interrupt;
if (verbose) {
printf("Updating Bi...");
fflush(stdout);
}
filled_BtB = false;
optimizeA(
Bi, k+k_main,
A_bias + k_user, k_user+k+k_main+(user_bias||item_bias),
n, m, k+k_main,
Xcsc_p, Xcsc_i, (Xfull == NULL)? (Xones) : ((real_t*)NULL),
(Xfull == NULL)? ((real_t*)NULL) : (Xones),
n,
Xfull != NULL, false, true,
(Xfull == NULL)? ((int_t*)NULL) : (zeros_n),
(real_t*)NULL, Xfull == NULL,
(lam_unique == NULL)?
(lam/w_implicit) : (lam_unique[3]/w_implicit),
(lam_unique == NULL)?
(lam/w_implicit) : (lam_unique[3]/w_implicit),
(l1_lam_unique == NULL)?
(l1_lam/w_implicit) : (l1_lam_unique[3]/w_implicit),
(l1_lam_unique == NULL)?
(l1_lam/w_implicit) : (l1_lam_unique[3]/w_implicit),
scale_lam, false, (real_t*)NULL,
Xfull != NULL,
nthreads,
false, 0,
nonneg, max_cd_steps,
(real_t*)NULL,
(real_t*)NULL, (real_t*)NULL, 0., (real_t*)NULL, 1.,
false,
precomputedBtB, &ignore,
buffer_real_t
);
if (verbose) {
printf(" done\n");
fflush(stdout);
}
if (should_stop_procedure) goto check_interrupt;
if (verbose) {
printf("Updating Ai...");
fflush(stdout);
}
optimizeA(
Ai, k+k_main,
B_bias + k_item, k_item+k+k_main+(user_bias||item_bias),
m, n, k+k_main,
Xcsr_p, Xcsr_i, (Xfull == NULL)? (Xones) : ((real_t*)NULL),
(Xfull == NULL)? ((real_t*)NULL) : (Xones),
m,
Xfull != NULL, false, true,
(Xfull == NULL)? ((int_t*)NULL) : (zeros_m),
(real_t*)NULL, Xfull == NULL,
(lam_unique == NULL)?
(lam/w_implicit) : (lam_unique[2]/w_implicit),
(lam_unique == NULL)?
(lam/w_implicit) : (lam_unique[2]/w_implicit),
(l1_lam_unique == NULL)?
(l1_lam/w_implicit) : (l1_lam_unique[2]/w_implicit),
(l1_lam_unique == NULL)?
(l1_lam/w_implicit) : (l1_lam_unique[2]/w_implicit),
scale_lam, false, (real_t*)NULL,
false,
nthreads,
false, 0,
nonneg, max_cd_steps,
(real_t*)NULL,
(real_t*)NULL, (real_t*)NULL, 0., (real_t*)NULL, 1.,
false,
precomputedBtB, &ignore,
buffer_real_t
);
filled_BiTBi = true;
if (verbose) {
printf(" done\n");
fflush(stdout);
}
}
/* Apply bias beforehand, as its column will be fixed */
if (item_bias)
{
for (int_t ix = 0; ix < m; ix++)
A_bias[(size_t)(k_user+k+k_main)
+ ix*(size_t)(k_user+k+k_main + 1)] = 1.;
if (use_cg_B && user_bias)
cblas_tcopy(n, biasB, 1, B + (k_totB-1), k_totB);
}
if (user_bias && (!NA_as_zero_X || Xfull != NULL))
{
if (Xtrans != NULL) {
#pragma omp parallel for schedule(static) \
num_threads(cap_to_4(nthreads)) \
shared(Xtrans, m, n, biasA)
for (size_t_for col = 0; col < (size_t)n; col++)
for (size_t row = 0; row < (size_t)m; row++)
Xtrans[row + col*m] = Xtrans_orig[row + col*m]
- biasA[row];
}
else if (Xfull != NULL) {
for (size_t row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++)
Xfull[col + row*n] = Xfull_orig[col + row*n]
- biasA[row];
}
else {
#pragma omp parallel for schedule(static) \
num_threads(cap_to_4(nthreads)) \
shared(nnz, Xcsc, Xcsc_i, biasA)
for (size_t_for ix = 0; ix < nnz; ix++)
Xcsc[ix] = Xcsc_orig[ix] - biasA[Xcsc_i[ix]];
}
}
else if (user_bias && NA_as_zero_X && Xfull == NULL)
{
if (!center)
cblas_tgemv(CblasRowMajor, CblasTrans,
m, k+k_main+item_bias,
-1., A_bias + k_user, k_totA+has_bias,
biasA, 1,
0., buffer_BtX, 1);
else {
set_to_zero(buffer_BtX, k+k_main+item_bias);
for (size_t row = 0; row < (size_t)m; row++)
cblas_taxpy(k+k_main+item_bias,
-(biasA[row] + *glob_mean),
A_bias
+ (size_t)k_user
+ row*(size_t)(k_totA+has_bias), 1,
buffer_BtX, 1);
}
}
else if (NA_as_zero_X && center && Xfull == NULL)
{
set_to_zero(buffer_BtX, k+k_main+item_bias);
sum_by_cols(A_bias + k_user, buffer_BtX,
m, k+k_main,
k_totA+has_bias, nthreads);
if (item_bias)
buffer_BtX[k+k_main] = (real_t)m;
cblas_tscal(k+k_main+item_bias, -(*glob_mean), buffer_BtX, 1);
}
else if (Xfull != NULL && Xfull_orig != NULL &&
Xtrans == NULL && item_bias)
{
copy_arr_(Xfull_orig, Xfull, (size_t)m*(size_t)n, nthreads);
}
/* Optimize B */
if (should_stop_procedure) goto check_interrupt;
if (verbose) {
printf("Updating B ...");
fflush(stdout);
}
if (k_item + item_bias <= k_user + user_bias)
{
filled_CtCw = false;
filled_BeTBeChol = false;
}
if (item_bias <= user_bias)
{
filled_BtB = false;
filled_BeTBeChol = false;
}
filled_BiTBi = false;
/* TODO: it's possible to use more buffers here for the
case of k_item > k_user, avoid extra memory usage */
if (II != NULL || nnz_I || add_implicit_features)
optimizeA_collective(
B_bias, k_totB + has_bias, A_bias, k_totA + has_bias, D, Ai,
n, n_i, m, q,
k, k_main+(int)item_bias, k_item, k_user,
Xcsc_p, Xcsc_i, Xcsc,
(Xtrans != NULL)? (Xtrans) : (Xfull), (Xtrans != NULL)? m : n,
full_dense, near_dense_col, some_full_col,
cnt_NA_bycol,
(Xtrans != NULL)? (Wtrans) : ((Xfull == NULL)? weight:weightC),
NA_as_zero_X,
Xones, k_main, n,
add_implicit_features,
I_csr_p, I_csr_i, I_csr,
II, cnt_NA_i_byrow, I_colmeans,
full_dense_i, near_dense_i_row, some_full_i_row, NA_as_zero_I,
(lam_unique == NULL)? (lam) : (lam_unique[3]),
w_item, w_implicit,
(lam_unique == NULL)? (lam) : (lam_unique[item_bias? 1 : 3]),
(l1_lam_unique == NULL)? (l1_lam) : (l1_lam_unique[3]),
(l1_lam_unique == NULL)?
(l1_lam) : (l1_lam_unique[item_bias? 1 : 3]),
scale_lam, scale_lam_sideinfo,
scale_bias_const, wsumB,
Xfull != NULL && Xtrans == NULL,
nthreads,
use_cg_B && !nonneg, max_cg_steps,
nonneg, max_cd_steps,
biasB,
(buffer_BtX != NULL && (center || user_bias))?
(buffer_BtX) : ((real_t*)NULL),
(buffer_BtX != NULL && user_bias)?
(biasA) : ((real_t*)NULL),
*glob_mean,
false,
(item_bias <= user_bias)?
(precomputedBtB) : ((real_t*)NULL),
(k_item + item_bias <= k_user + user_bias)?
(precomputedCtCw) : ((real_t*)NULL),
(k_item + item_bias <= k_user + user_bias)?
(precomputedBeTBeChol) : ((real_t*)NULL),
precomputedBiTBi,
DtIbias,
&ignore, &ignore2, &ignore3, &ignore4, &ignore5,
buffer_real_t
);
else
optimizeA(
B_bias + k_item, k_item+k+k_main+(int)(user_bias||item_bias),
A_bias + k_user, k_user+k+k_main+(int)(user_bias||item_bias),
n, m, k+k_main+(int)item_bias,
Xcsc_p, Xcsc_i, Xcsc,
(Xtrans != NULL)? (Xtrans) : (Xfull),
(Xtrans != NULL)? m : n,
full_dense, near_dense_col, some_full_col,
cnt_NA_bycol,
(Xtrans != NULL)? (Wtrans) : ((Xfull == NULL)? weight:weightC),
NA_as_zero_X,
(lam_unique == NULL)? (lam) : (lam_unique[3]),
(lam_unique == NULL)? (lam) : (lam_unique[item_bias? 1 : 3]),
(l1_lam_unique == NULL)? (l1_lam) : (l1_lam_unique[3]),
(l1_lam_unique == NULL)?
(l1_lam) : (l1_lam_unique[item_bias? 1 : 3]),
scale_lam, scale_bias_const, wsumB,
Xfull != NULL && Xtrans == NULL,
nthreads,
use_cg && !nonneg, max_cg_steps,
nonneg, max_cd_steps,
biasB,
(buffer_BtX != NULL && (center || user_bias))?
(buffer_BtX) : ((real_t*)NULL),
(buffer_BtX != NULL && user_bias)?
(biasA) : ((real_t*)NULL),
*glob_mean, (real_t*)NULL, 1.,
false,
((size_t)n*(size_t)(k_user+k+k_main+user_bias)
>=
(size_t)square(k+k_main+item_bias) )?
(precomputedTransBtBinvBt)
:
((item_bias <= k_user + user_bias)?
(precomputedBeTBeChol) : ((real_t*)NULL)),
&ignore,
buffer_real_t
);
if (verbose) {
printf(" done\n");
fflush(stdout);
}
if (item_bias)
cblas_tcopy(n, B_bias + k_item+k+k_main, k_item+k+k_main + 1,
biasB, 1);
/* Apply bias beforehand, as its column will be fixed */
if (user_bias)
{
for (int_t ix = 0; ix < n; ix++)
B_bias[(size_t)(k_item+k+k_main)
+ ix*(size_t)(k_item+k+k_main + 1)] = 1.;
if (use_cg_A && item_bias)
cblas_tcopy(m, biasA, 1, A + (k_totA-1), k_totA);
}
if (item_bias && (!NA_as_zero_X || Xfull != NULL))
{
if (Xfull != NULL) {
#pragma omp parallel for schedule(static) \
num_threads(cap_to_4(nthreads)) \
shared(Xfull, m, n, biasB)
for (size_t_for row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++)
Xfull[col + row*n] = Xfull_orig[col + row*n]
- biasB[col];
}
else {
#pragma omp parallel for schedule(static) \
num_threads(cap_to_4(nthreads)) \
shared(nnz, Xcsr, Xcsr_i, biasB)
for (size_t_for ix = 0; ix < nnz; ix++)
Xcsr[ix] = Xcsr_orig[ix] - biasB[Xcsr_i[ix]];
}
}
else if (item_bias && NA_as_zero_X && Xfull == NULL)
{
if (!center)
cblas_tgemv(CblasRowMajor, CblasTrans,
n, k+k_main+user_bias,
-1., B_bias + k_item, k_totB+has_bias,
biasB, 1,
0., buffer_BtX, 1);
else {
set_to_zero(buffer_BtX, k+k_main+user_bias);
for (size_t col = 0; col < (size_t)n; col++)
cblas_taxpy(k+k_main+user_bias,
-(biasB[col] + *glob_mean),
B_bias
+ (size_t)k_item
+ col*(size_t)(k_totB+has_bias), 1,
buffer_BtX, 1);
}
}
else if (NA_as_zero_X && center && Xfull == NULL)
{
set_to_zero(buffer_BtX, k+k_main+user_bias);
sum_by_cols(B_bias + k_item, buffer_BtX,
n, k+k_main,
k_totB+has_bias, nthreads);
if (user_bias)
buffer_BtX[k+k_main] = (real_t)n;
cblas_tscal(k+k_main+user_bias, -(*glob_mean), buffer_BtX, 1);
}
else if (Xfull != NULL && Xfull_orig != NULL &&
Xtrans == NULL && user_bias)
{
copy_arr_(Xfull_orig, Xfull, (size_t)m*(size_t)n, nthreads);
}
/* Optimize A */
filled_BtB = false;
filled_CtCw = false;
filled_BeTBeChol = false;
if (should_stop_procedure) goto check_interrupt;
if (verbose) {
printf("Updating A ...");
fflush(stdout);
}
if (U != NULL || nnz_U || add_implicit_features)
optimizeA_collective(
A_bias, k_totA + has_bias, B_bias, k_totB + has_bias, C, Bi,
m, m_u, n, p,
k, k_main+(int)user_bias, k_user, k_item,
Xcsr_p, Xcsr_i, Xcsr,
Xfull, n, full_dense, near_dense_row, some_full_row,
cnt_NA_byrow,
(Xfull == NULL)? (weightR) : (weight),
NA_as_zero_X,
Xones, k_main, m,
add_implicit_features,
U_csr_p, U_csr_i, U_csr,
U, cnt_NA_u_byrow, U_colmeans,
full_dense_u, near_dense_u_row, some_full_u_row, NA_as_zero_U,
(lam_unique == NULL)? (lam) : (lam_unique[2]),
w_user, w_implicit,
(lam_unique == NULL)? (lam) : (lam_unique[user_bias? 0 : 2]),
(l1_lam_unique == NULL)? (l1_lam) : (l1_lam_unique[2]),
(l1_lam_unique == NULL)?
(l1_lam) : (l1_lam_unique[user_bias? 0 : 2]),
scale_lam, scale_lam_sideinfo,
scale_bias_const, wsumA,
false,
nthreads,
use_cg_B && !nonneg, max_cg_steps,
nonneg, max_cd_steps,
biasA,
(buffer_BtX != NULL && (center || item_bias))?
(buffer_BtX) : ((real_t*)NULL),
(buffer_BtX != NULL && item_bias)?
(biasB) : ((real_t*)NULL),
*glob_mean,
precompute_for_predictions,
precomputedBtB, precomputedCtCw, precomputedBeTBeChol,
precomputedBiTBi,
precomputedCtUbias,
&filled_BtB, &filled_CtCw, &filled_BeTBeChol, &filled_CtUbias,
&CtC_is_scaled,
buffer_real_t
);
else
optimizeA(
A_bias + k_user, k_user+k+k_main+(int)(user_bias||item_bias),
B_bias + k_item, k_item+k+k_main+(int)(user_bias||item_bias),
m, n, k+k_main+(int)user_bias,
Xcsr_p, Xcsr_i, Xcsr,
Xfull, n,
full_dense, near_dense_row, some_full_row,
cnt_NA_byrow,
(Xfull == NULL)? (weightR) : (weight),
NA_as_zero_X,
(lam_unique == NULL)? (lam) : (lam_unique[2]),
(lam_unique == NULL)? (lam) : (lam_unique[user_bias? 0 : 2]),
(l1_lam_unique == NULL)? (l1_lam) : (l1_lam_unique[2]),
(l1_lam_unique == NULL)?
(l1_lam) : (l1_lam_unique[user_bias? 0 : 2]),
scale_lam, scale_bias_const, wsumA,
false,
nthreads,
use_cg && !nonneg, max_cg_steps,
nonneg, max_cd_steps,
biasA,
(buffer_BtX != NULL && (center || item_bias))?
(buffer_BtX) : ((real_t*)NULL),
(buffer_BtX != NULL && item_bias)?
(biasB) : ((real_t*)NULL),
*glob_mean, (real_t*)NULL, 1.,
iter == niter - 1,
precomputedBtB, &filled_BtB,
buffer_real_t
);
if (verbose) {
printf(" done\n");
fflush(stdout);
}
if (user_bias)
cblas_tcopy(m, A_bias + k_user+k+k_main, k_user+k+k_main + 1,
biasA, 1);
if (verbose) {
printf("\tCompleted ALS iteration %2d\n\n", iter+1);
fflush(stdout);
}
check_interrupt:
if (should_stop_procedure)
{
if (precompute_for_predictions && handle_interrupt)
goto terminate_early;
else
goto cleanup;
}
}
if (verbose) {
if (!isnan(A[k_user]))
printf("ALS procedure terminated successfully\n");
else
printf("ALS procedure failed\n");
fflush(stdout);
}
terminate_early:
if (user_bias || item_bias)
{
copy_mat(
m_max, k_user+k+k_main,
A_bias, k_user+k+k_main + 1,
A, k_user+k+k_main
);
copy_mat(
n_max, k_item+k+k_main,
B_bias, k_item+k+k_main + 1,
B, k_item+k+k_main
);
if (m_max > m && user_bias)
set_to_zero(biasA + m, m_max-m);
if (n_max > n && item_bias)
set_to_zero(biasB + n, n_max-n);
if (free_BtX && precomputedBtXbias != NULL &&
(item_bias||center) && NA_as_zero_X)
{
copy_arr(buffer_BtX, precomputedBtXbias, k+k_main+user_bias);
precomputedBtXbias = NULL;
}
}
precompute:
if (precompute_for_predictions)
{
if ((NA_as_zero_X && (center||item_bias) && precomputedBtXbias != NULL)
&&
(!filled_BtB || Xfull != NULL)
&&
!back_to_precompute)
{
set_to_zero(precomputedBtXbias, k+k_main+user_bias);
if (item_bias)
{
if (n_max > n && center)
{
sum_by_cols(B_bias
+ k_item
+ (size_t)n*
(size_t)(k_item+k+k_main+has_bias),
precomputedBtXbias,
n_max - n, k+k_main,
k_item+k+k_main+has_bias, nthreads);
if (user_bias)
precomputedBtXbias[k+k_main] = (real_t)(n_max - n);
cblas_tscal(k+k_main+user_bias, -(*glob_mean),
precomputedBtXbias, 1);
}
if (!center)
cblas_tgemv(CblasRowMajor, CblasTrans,
n, k+k_main+user_bias,
-1., B_bias + k_item, k_item+k+k_main+has_bias,
biasB, 1,
0., precomputedBtXbias, 1);
else {
for (size_t col = 0; col < (size_t)n; col++)
cblas_taxpy(k+k_main+user_bias,
-(biasB[col] + *glob_mean),
B_bias
+ (size_t)k_item
+col*(size_t)(k_item+k+k_main+has_bias),
1,
precomputedBtXbias, 1);
}
}
else if (center)
{
set_to_zero(precomputedBtXbias, k+k_main+user_bias);
sum_by_cols(B_bias + k_item, precomputedBtXbias,
n_max, k+k_main,
k_item+k+k_main+has_bias, nthreads);
if (user_bias)
precomputedBtXbias[k+k_main] = (real_t)n_max;
cblas_tscal(k+k_main+user_bias, -(*glob_mean),
precomputedBtXbias, 1);
}
precomputedBtXbias = NULL;
}
if (add_implicit_features && !filled_BiTBi)
{
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k+k_main, n,
w_implicit, Bi, k+k_main,
0., precomputedBiTBi, k+k_main);
filled_BiTBi = true;
}
else if (add_implicit_features && use_cg && w_implicit != 1. &&
!back_to_precompute && !free_BiTBi)
{
/* TODO: revisit this */
cblas_tscal(square(k+k_main), w_implicit, precomputedBiTBi, 1);
}
if (!filled_BtB)
{
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k + k_main + user_bias, include_all_X? n_max : n,
1., B_bias + k_item, k_item+k+k_main+has_bias,
0., precomputedBtB, k+k_main+user_bias);
filled_BtB = true;
}
else if (include_all_X && n != n_max &&
buffer_real_t != NULL && !back_to_precompute)
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k + k_main + user_bias, n_max - n,
1., B_bias
+ k_item
+ (size_t)n*(size_t)(k_item+k+k_main+has_bias),
k_item+k+k_main+has_bias,
1., precomputedBtB, k+k_main+user_bias);
if (precomputedTransBtBinvBt != NULL && !finished_TransBtBinvBt &&
!add_implicit_features && !nonneg)
{
k_pred = k + k_main + (int)user_bias;
free_arr_use = false;
/* This is in case it needs an extra malloc, in which case will
first free up all the memory it allocated before, which is
no longer needed at this point. */
if (!back_to_precompute && buffer_real_t != NULL)
{
if (precomputedBeTBeChol != NULL && !filled_BeTBeChol)
arr_use = precomputedBeTBeChol;
else if (precomputedTransCtCinvCt != NULL &&
(size_t)p*(size_t)(k_user+k) >= (size_t)square(k_pred))
arr_use = precomputedTransCtCinvCt;
else if (precomputedCtCw != NULL && !filled_CtCw &&
k_user >= k_main + user_bias)
arr_use = precomputedCtCw;
else if (size_buffer >= (size_t)square(k_pred))
arr_use = buffer_real_t;
else {
back_to_precompute = true;
goto cleanup;
}
}
else
{
free_arr_use = true;
back_to_precompute = false;
arr_use = (real_t*)malloc((size_t)square(k_pred)
* sizeof(real_t));
if (arr_use == NULL) goto throw_oom;
}
copy_mat(include_all_X? n_max : n, k+k_main+user_bias,
B_bias + k_item, k_item+k+k_main+has_bias,
precomputedTransBtBinvBt, k+k_main+user_bias);
copy_arr(precomputedBtB, arr_use, square(k_pred));
add_to_diag(arr_use,
((lam_unique == NULL)? (lam) : (lam_unique[2]))
* (real_t)(scale_lam? (include_all_X? n_max : n) : 1),
k_pred);
if (lam_unique != NULL && user_bias && lam_unique[0]!=lam_unique[2])
arr_use[square(k_pred)-1]
+=
(lam_unique[0]-lam_unique[2])
* (real_t)(scale_lam? (include_all_X? n_max : n) : 1);
tposv_(&lo, &k_pred, include_all_X? &n_max : &n,
arr_use, &k_pred,
precomputedTransBtBinvBt, &k_pred, &ignore_int);
if (free_arr_use) free(arr_use);
arr_use = NULL;
finished_TransBtBinvBt = true;
}
if (p > 0)
{
if (precomputedTransCtCinvCt != NULL && !finished_TransCtCinvCt &&
!add_implicit_features && !nonneg)
{
k_pred = k_user + k;
free_arr_use = false;
arr_use = NULL;
if (!back_to_precompute && buffer_real_t != NULL)
{
if (precomputedBeTBeChol != NULL && !filled_BeTBeChol)
arr_use = precomputedBeTBeChol;
else if (size_buffer > (size_t)square(k_user+k))
arr_use = buffer_real_t;
else {
back_to_precompute = true;
goto cleanup;
}
}
else
{
free_arr_use = true;
back_to_precompute = false;
arr_use = (real_t*)malloc((size_t)square(k_pred)
* sizeof(real_t));
if (arr_use == NULL) goto throw_oom;
}
copy_arr_(C, precomputedTransCtCinvCt,
(size_t)p*(size_t)(k_user+k), nthreads);
if (!filled_CtCw) {
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k_pred, p,
1., C, k_pred,
0., precomputedCtCw, k_pred);
copy_arr(precomputedCtCw, arr_use, square(k_pred));
add_to_diag(arr_use,
((lam_unique == NULL)? (lam) : (lam_unique[2]))
* (real_t)(scale_lam? p : 1)
/ w_user,
k_pred);
if (w_user != 1.)
cblas_tscal(square(k_pred), w_user, precomputedCtCw, 1);
filled_CtCw = true;
CtC_is_scaled = true;
}
else {
copy_arr(precomputedCtCw, arr_use, square(k_pred));
if (w_user != 1.)
{
if (CtC_is_scaled)
cblas_tscal(square(k_pred), 1./w_user, arr_use, 1);
else {
cblas_tscal(square(k_user+k), w_user,
precomputedCtCw, 1);
CtC_is_scaled = true;
}
}
add_to_diag(arr_use,
((lam_unique == NULL)? (lam) : (lam_unique[2]))
* (real_t)(scale_lam? p : 1)
/ w_user,
k_pred);
}
tposv_(&lo, &k_pred, &p,
arr_use, &k_pred,
precomputedTransCtCinvCt, &k_pred, &ignore_int);
if (free_arr_use) free(arr_use);
arr_use = NULL;
finished_TransCtCinvCt = true;
}
if (precomputedCtCw != NULL && filled_CtCw &&
w_user != 1. && !CtC_is_scaled)
{
cblas_tscal(square(k_user+k), w_user, precomputedCtCw, 1);
CtC_is_scaled = true;
}
else if (!filled_CtCw && precomputedCtCw != NULL)
{
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k_user+k, p,
w_user, C, k_user+k,
0., precomputedCtCw, k_user+k);
filled_CtCw = true;
CtC_is_scaled = true;
}
}
if (precomputedBeTBeChol != NULL && !filled_BeTBeChol &&
(p || add_implicit_features) && !nonneg)
{
k_pred = k_user + k + k_main + (int)user_bias;
set_to_zero(precomputedBeTBeChol, square(k_pred));
copy_mat(k+k_main+user_bias, k+k_main+user_bias,
precomputedBtB, k+k_main+user_bias,
precomputedBeTBeChol + k_user + k_user*k_pred, k_pred);
if (p) {
if (filled_CtCw) {
if (!CtC_is_scaled && w_user != 1.) {
cblas_tscal(square(k_user+k), w_user,
precomputedCtCw, 1);
CtC_is_scaled = true;
}
sum_mat(k_user+k, k_user+k,
precomputedCtCw, k_user+k,
precomputedBeTBeChol, k_pred);
}
else {
if (precomputedCtCw != NULL) {
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k_user+k, p,
w_user, C, k_user+k,
0., precomputedCtCw, k_user+k);
sum_mat(k_user+k, k_user+k,
precomputedCtCw, k_user+k,
precomputedBeTBeChol, k_pred);
if (w_user != 1.)
cblas_tscal(square(k_user+k), w_user,
precomputedCtCw, 1);
filled_CtCw = true;
CtC_is_scaled = true;
} else {
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k_user+k, p,
w_user, C, k_user+k,
1., precomputedBeTBeChol, k_pred);
}
}
}
if (add_implicit_features)
sum_mat(k+k_main, k+k_main,
precomputedBiTBi, k+k_main,
precomputedBeTBeChol + k_user + k_user*k_pred, k_pred);
add_to_diag(precomputedBeTBeChol,
((lam_unique == NULL)? (lam) : (lam_unique[2]))
*
(real_t)(scale_lam_sideinfo?
(p+(include_all_X? n_max : n))
: (scale_lam? (include_all_X? n_max : n) : 1)),
k_user+k+k_main+user_bias);
if (lam_unique != NULL && user_bias && lam_unique[0]!=lam_unique[2])
precomputedBeTBeChol[square(k_pred)-1]
+=
(lam_unique[0]-lam_unique[2])
*
(real_t)(scale_lam_sideinfo?
(p+(include_all_X? n_max : n))
: (scale_lam? (include_all_X? n_max : n) : 1));
tpotrf_(&lo, &k_pred, precomputedBeTBeChol,&k_pred,&ignore_int);
filled_BeTBeChol = true;
}
if (!filled_CtUbias && U == NULL && nnz_U && U_colmeans != NULL &&
buffer_CtUbias == NULL && precomputedCtUbias != NULL)
{
cblas_tgemv(CblasRowMajor, CblasTrans, p, k_user+k,
w_user, C, k_user+k,
U_colmeans, 1,
0., precomputedCtUbias, 1);
filled_CtUbias = true;
}
back_to_precompute = false;
}
cleanup:
free(buffer_real_t); buffer_real_t = NULL;
free(Xtrans); Xtrans = NULL;
free(Wtrans); Wtrans = NULL;
free(Xcsr_p); Xcsr_p = NULL;
free(Xcsr_i); Xcsr_i = NULL;
free(Xcsr); Xcsr = NULL;
free(weightR); weightR = NULL;
free(Xcsc_p); Xcsc_p = NULL;
free(Xcsc_i); Xcsc_i = NULL;
free(Xcsc); Xcsc = NULL;
free(weightC); weightC = NULL;
free(Xones); Xones = NULL;
free(Utrans); Utrans = NULL;
free(U_csr_p); U_csr_p = NULL;
free(U_csr_i); U_csr_i = NULL;
free(U_csr); U_csr = NULL;
free(U_csc_p); U_csc_p = NULL;
free(U_csc_i); U_csc_i = NULL;
free(U_csc); U_csc = NULL;
free(Itrans); Itrans = NULL;
free(I_csr_p); I_csr_p = NULL;
free(I_csr_i); I_csr_i = NULL;
free(I_csr); I_csr = NULL;
free(I_csc_p); I_csc_p = NULL;
free(I_csc_i); I_csc_i = NULL;
free(I_csc); I_csc = NULL;
free(cnt_NA_byrow); cnt_NA_byrow = NULL;
free(cnt_NA_bycol); cnt_NA_bycol = NULL;
free(cnt_NA_u_byrow); cnt_NA_u_byrow = NULL;
free(cnt_NA_u_bycol); cnt_NA_u_bycol = NULL;
free(cnt_NA_i_byrow); cnt_NA_i_byrow = NULL;
free(cnt_NA_i_bycol); cnt_NA_i_bycol = NULL;
free(zeros_m); zeros_m = NULL;
free(zeros_n); zeros_n = NULL;
free(wsumA); wsumA = NULL;
free(wsumB); wsumB = NULL;
if (user_bias || item_bias) {
free(A_bias); A_bias = NULL;
if (B_plus_bias != B_bias && B_bias != B) {
free(B_bias); B_bias = NULL;
}
free(Xcsr_orig); Xcsr_orig = NULL;
free(Xcsc_orig); Xcsc_orig = NULL;
}
if (Xfull_orig != NULL && !free_Xfull) {
free(Xfull_orig); Xfull_orig = NULL;
}
if (Xtrans_orig != NULL) {
free(Xtrans_orig); Xtrans_orig = NULL;
}
if (free_BtX) {
free(buffer_BtX); buffer_BtX = NULL;
}
free(buffer_CtUbias); buffer_CtUbias = NULL;
free(DtIbias); DtIbias = NULL;
if (back_to_precompute) goto precompute;
free(lam_unique_copy); lam_unique_copy = NULL;
free(l1_lam_unique_copy); l1_lam_unique_copy = NULL;
if (free_BiTBi) {
free(precomputedBiTBi); precomputedBiTBi = NULL;
}
if (free_X) {
free(X); X = NULL;
}
if (free_Xfull) {
free(Xfull); Xfull = NULL;
}
if (free_U) {
free(U); U = NULL;
}
if (free_Usp) {
free(U_sp); U_sp = NULL;
}
if (free_I) {
free(II); II = NULL;
}
if (free_Isp) {
free(I_sp); I_sp = NULL;
}
#pragma omp critical
{
if (has_lock_on_handle && handle_is_locked)
{
signal(SIGINT, old_interrupt_handle);
handle_is_locked = false;
}
if (should_stop_procedure) retval = 3;
}
act_on_interrupt(retval, handle_interrupt, true);
return retval;
throw_oom:
{
retval = 1;
back_to_precompute = false;
if (verbose)
print_oom_message();
#pragma omp critical
{
if (should_stop_procedure)
{
signal(SIGINT, old_interrupt_handle);
raise(SIGINT);
}
}
goto cleanup;
}
}
/* TODO: the separation between implicit/explicit is no longer needed,
as the explicit one can now imitate the implicit. Should instead make
this function call the explicit one. */
int_t fit_collective_implicit_als
(
real_t *restrict A, real_t *restrict B,
real_t *restrict C, real_t *restrict D,
bool reset_values, int_t seed,
real_t *restrict U_colmeans, real_t *restrict I_colmeans,
int_t m, int_t n, int_t k,
int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz,
real_t lam, real_t *restrict lam_unique,
real_t l1_lam, real_t *restrict l1_lam_unique,
real_t *restrict U, int_t m_u, int_t p,
real_t *restrict II, int_t n_i, int_t q,
int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U,
int_t I_row[], int_t I_col[], real_t *restrict I_sp, size_t nnz_I,
bool NA_as_zero_U, bool NA_as_zero_I,
int_t k_main, int_t k_user, int_t k_item,
real_t w_main, real_t w_user, real_t w_item,
real_t *restrict w_main_multiplier,
real_t alpha, bool adjust_weight, bool apply_log_transf,
int_t niter, int nthreads, bool verbose, bool handle_interrupt,
bool use_cg, int_t max_cg_steps, bool finalize_chol,
bool nonneg, int_t max_cd_steps, bool nonneg_C, bool nonneg_D,
bool precompute_for_predictions,
real_t *restrict precomputedBtB,
real_t *restrict precomputedBeTBe,
real_t *restrict precomputedBeTBeChol,
real_t *restrict precomputedCtUbias
)
{
int_t retval = 0;
if (k_user && U == NULL && nnz_U == 0) {
if (verbose)
fprintf(stderr, "Cannot pass 'k_user' without U data.\n");
retval = 2;
}
if (k_item && II == NULL && nnz_I == 0) {
if (verbose)
fprintf(stderr, "Cannot pass 'k_item' without I data.\n");
retval = 2;
}
if (k_main && nnz == 0) {
if (verbose)
fprintf(stderr, "Cannot pass 'k_main' without X data.\n");
retval = 2;
}
if ((U != NULL && NA_as_zero_U) ||
(II != NULL && NA_as_zero_I))
{
if (verbose)
fprintf(stderr, "Cannot pass 'NA_as_zero' with dense data.\n");
retval = 2;
}
if (retval == 2)
{
if (verbose) {
fflush(stderr);
}
return retval;
}
#ifndef _OPENMP
nthreads = 1;
#endif
int_t k_totA = k_user + k + k_main;
int_t k_totB = k_item + k + k_main;
int_t m_max = max2(m, m_u);
int_t n_max = max2(n, n_i);
bool free_X = false;
bool free_U = false;
bool free_Usp = false;
bool free_I = false;
bool free_Isp = false;
real_t *restrict buffer_real_t = NULL;
size_t size_bufferA = 0;
size_t size_bufferB = 0;
size_t size_bufferC = 0;
size_t size_bufferD = 0;
size_t size_buffer = 0;
size_t *Xcsr_p = (size_t*)malloc(((size_t)m+(size_t)1)*sizeof(size_t));
int_t *Xcsr_i = (int_t*)malloc(nnz*sizeof(int_t));
real_t *restrict Xcsr = (real_t*)malloc(nnz*sizeof(real_t));
size_t *Xcsc_p = (size_t*)malloc(((size_t)n+(size_t)1)*sizeof(size_t));
int_t *Xcsc_i = (int_t*)malloc(nnz*sizeof(int_t));
real_t *restrict Xcsc = (real_t*)malloc(nnz*sizeof(real_t));
real_t *restrict Utrans = NULL;
size_t *U_csr_p = NULL;
int_t *U_csr_i = NULL;
real_t *restrict U_csr = NULL;
size_t *U_csc_p = NULL;
int_t *U_csc_i = NULL;
real_t *restrict U_csc = NULL;
real_t *restrict Itrans = NULL;
size_t *I_csr_p = NULL;
int_t *I_csr_i = NULL;
real_t *restrict I_csr = NULL;
size_t *I_csc_p = NULL;
int_t *I_csc_i = NULL;
real_t *restrict I_csc = NULL;
int_t *restrict cnt_NA_u_byrow = NULL;
int_t *restrict cnt_NA_u_bycol = NULL;
int_t *restrict cnt_NA_i_byrow = NULL;
int_t *restrict cnt_NA_i_bycol = NULL;
bool full_dense_u = false;
bool near_dense_u_row = false;
bool near_dense_u_col = false;
bool some_full_u_row = false;
bool some_full_u_col = false;
bool full_dense_i = false;
bool near_dense_i_row = false;
bool near_dense_i_col = false;
bool some_full_i_row = false;
bool some_full_i_col = false;
real_t *restrict buffer_CtUbias = NULL;
real_t *restrict DtIbias = NULL;
bool filled_BtB = false;
bool filled_BeTBe = false;
bool filled_BeTBeChol = false;
bool filled_CtC = false;
bool filled_CtUbias = false;
bool allocated_CtC = false;
bool ignore = false;
real_t *restrict precomputedCtC = NULL;
real_t *restrict lam_unique_copy = NULL;
real_t *restrict l1_lam_unique_copy = NULL;
if (!use_cg) finalize_chol = false;
sig_t_ old_interrupt_handle = NULL;
bool has_lock_on_handle = false;
#pragma omp critical
{
if (!handle_is_locked)
{
handle_is_locked = true;
has_lock_on_handle = true;
should_stop_procedure = false;
old_interrupt_handle = signal(SIGINT, set_interrup_global_variable);
}
}
if (Xcsr_p == NULL || Xcsr_i == NULL || Xcsr == NULL ||
Xcsc_p == NULL || Xcsc_i == NULL || Xcsc == NULL)
{
goto throw_oom;
}
if (!precompute_for_predictions)
{
precomputedBtB = (real_t*)malloc((size_t)square(k+k_main)
* sizeof(real_t));
if (precomputedBtB == NULL) goto throw_oom;
}
if (precompute_for_predictions && (U != NULL || nnz_U) && use_cg)
{
/* This one may be reused when solving for A or B. */
precomputedCtC = (real_t*)malloc((size_t)square(max2(k_user, k_item)+k)
* sizeof(real_t));
if (precomputedCtC == NULL) goto throw_oom;
allocated_CtC = true;
}
if (U == NULL && NA_as_zero_U && precomputedCtUbias == NULL)
{
buffer_CtUbias = (real_t*)malloc((size_t)(k_user+k)*sizeof(real_t));
if (buffer_CtUbias == NULL) goto throw_oom;
precomputedCtUbias = buffer_CtUbias;
}
if (II == NULL && NA_as_zero_I)
{
DtIbias = (real_t*)malloc((size_t)(k_item+k)*sizeof(real_t));
if (DtIbias == NULL) goto throw_oom;
}
if (nonneg || nonneg_C || nonneg_D || l1_lam || l1_lam_unique != NULL)
{
use_cg = false;
}
#ifdef _FOR_R
if (U != NULL) R_nan_to_C_nan(U, (size_t)m_u*(size_t)p);
if (II != NULL) R_nan_to_C_nan(II, (size_t)n_i*(size_t)q);
#endif
if (apply_log_transf)
{
real_t *restrict temp = (real_t*)malloc(nnz*sizeof(real_t));
if (temp == NULL) goto throw_oom;
copy_arr_(X, temp, nnz, nthreads);
X = temp;
free_X = true;
for (size_t ix = 0; ix < nnz; ix++)
X[ix] = log_t(X[ix]);
}
if (alpha != 1.)
{
if (!free_X)
{
real_t *restrict temp = (real_t*)malloc(nnz*sizeof(real_t));
if (temp == NULL) goto throw_oom;
copy_arr_(X, temp, nnz, nthreads);
X = temp;
free_X = true;
}
tscal_large(X, alpha, nnz, nthreads);
}
coo_to_csr_and_csc(
ixA, ixB, X,
(real_t*)NULL, m, n, nnz,
Xcsr_p, Xcsr_i, Xcsr,
Xcsc_p, Xcsc_i, Xcsc,
(real_t*)NULL, (real_t*)NULL,
nthreads
);
if (free_X) {
free(X);
X = NULL;
free_X = false;
}
if (U == NULL && NA_as_zero_U)
m_u = m_max;
retval = preprocess_sideinfo_matrix(
&U, m_u, p,
U_row, U_col, &U_sp, nnz_U,
U_colmeans, &Utrans,
&U_csr_p, &U_csr_i, &U_csr,
&U_csc_p, &U_csc_i, &U_csc,
&cnt_NA_u_byrow, &cnt_NA_u_bycol,
&full_dense_u, &near_dense_u_row, &near_dense_u_col,
&some_full_u_row, &some_full_u_col,
NA_as_zero_U, nonneg_C, nthreads,
&free_U, &free_Usp
);
if (retval != 0) goto throw_oom;
if (free_Usp) {
free(U_sp);
U_sp = NULL;
free_Usp = false;
}
if (U != NULL || U_csr_p != NULL)
size_bufferC = buffer_size_optimizeA(
m_u, full_dense_u,
near_dense_u_col,
some_full_u_col,
Utrans == NULL,
U != NULL, false, NA_as_zero_U,
nonneg_C, l1_lam != 0. || l1_lam_unique != NULL,
k_user+k, nthreads,
U_colmeans != NULL,
precomputedBeTBeChol != NULL,
false,
use_cg && !nonneg_C, finalize_chol
);
if (II == NULL && NA_as_zero_I)
n_i = n_max;
retval = preprocess_sideinfo_matrix(
&II, n_i, q,
I_row, I_col, &I_sp, nnz_I,
I_colmeans, &Itrans,
&I_csr_p, &I_csr_i, &I_csr,
&I_csc_p, &I_csc_i, &I_csc,
&cnt_NA_i_byrow, &cnt_NA_i_bycol,
&full_dense_i, &near_dense_i_row, &near_dense_i_col,
&some_full_i_row, &some_full_i_col,
NA_as_zero_I, nonneg_D, nthreads,
&free_I, &free_Isp
);
if (retval != 0) goto throw_oom;
if (free_Isp) {
free(I_sp);
I_sp = NULL;
free_Isp = false;
}
if (II != NULL || I_csc_p != NULL)
size_bufferD = buffer_size_optimizeA(
n_i, full_dense_i,
near_dense_i_col,
some_full_i_col,
Itrans == NULL,
II != NULL, false, NA_as_zero_I,
nonneg_D, l1_lam != 0. || l1_lam_unique != NULL,
k_item+k, nthreads,
I_colmeans != NULL,
precomputedBeTBeChol != NULL && k_item+k <= k_user+k+k_main,
false,
use_cg && !nonneg_D, finalize_chol
);
if (U != NULL || U_csr_p != NULL)
size_bufferA = buffer_size_optimizeA_collective_implicit(
m, m_u, p,
k, k_main, k_user,
U == NULL && U_csr_p != NULL,
NA_as_zero_U,
nthreads,
use_cg && !nonneg,
nonneg, l1_lam != 0. || l1_lam_unique != NULL,
true,
precompute_for_predictions,
precompute_for_predictions,
allocated_CtC,
finalize_chol
);
else
size_bufferA = buffer_size_optimizeA_implicit(
k+k_main, nthreads,
true,
nonneg, l1_lam != 0. || l1_lam_unique != NULL,
use_cg && !nonneg, finalize_chol
);
if (II != NULL || I_csr_p != NULL)
size_bufferB = buffer_size_optimizeA_collective_implicit(
n, n_i, q,
k, k_main, k_item,
II == NULL && I_csr_p != NULL,
NA_as_zero_I,
nthreads,
use_cg && !nonneg,
nonneg, l1_lam != 0. || l1_lam_unique != NULL,
true,
precompute_for_predictions && k_item <= k_user,
precompute_for_predictions && k_item <= k_user,
allocated_CtC,
finalize_chol
);
else
size_bufferB = buffer_size_optimizeA_implicit(
k+k_main, nthreads,
true,
nonneg, l1_lam != 0. || l1_lam_unique != NULL,
use_cg && !nonneg, finalize_chol
);
size_buffer = max2(max2(size_bufferA, size_bufferB),
max2(size_bufferC, size_bufferD));
buffer_real_t = (real_t*)malloc(size_buffer * sizeof(real_t));
if (buffer_real_t == NULL) goto throw_oom;
if (reset_values)
{
bool fill_B = II != NULL || I_csr_p != NULL;
ArraysToFill arrays =
#ifndef __cplusplus
(ArraysToFill)
#endif
{
A, (size_t)m_max*(size_t)k_totA,
fill_B? B : NULL,
fill_B? ((size_t)n_max*(size_t)k_totB) : 0
};
retval = rnorm_parallel(arrays, seed, nthreads);
if (retval != 0) goto throw_oom;
if (nonneg)
{
for (size_t ix = 0; ix < (size_t)m_max*(size_t)k_totA; ix++)
A[ix] = fabs_t(A[ix]);
if (fill_B)
for (size_t ix = 0; ix < (size_t)n_max*(size_t)k_totB; ix++)
B[ix] = fabs_t(B[ix]);
}
if (use_cg)
{
if (!fill_B)
set_to_zero_(B, (size_t)n_max*(size_t)k_totB, nthreads);
if (U != NULL || U_csr_p != NULL)
set_to_zero_(C, (size_t)p*(size_t)(k_user+k), nthreads);
if (II != NULL || I_csr_p != NULL)
set_to_zero_(D, (size_t)q*(size_t)(k_item+k), nthreads);
}
}
*w_main_multiplier = 1.;
if (adjust_weight)
{
*w_main_multiplier = (long double)nnz
/
(long double)((size_t)m * (size_t)n);
w_main *= *w_main_multiplier;
}
/* This avoids differences in the scaling of the precomputed matrices */
if (w_main != 1.)
{
lam /= w_main;
l1_lam /= w_main;
w_user /= w_main;
w_item /= w_main;
if (lam_unique != NULL)
{
lam_unique_copy = (real_t*)malloc(6*sizeof(real_t));
if (lam_unique_copy == NULL) goto throw_oom;
for (int_t ix = 2; ix < 6; ix++) {
lam_unique_copy[ix] = lam_unique[ix] / w_main;
}
lam_unique = lam_unique_copy;
}
if (l1_lam_unique != NULL)
{
l1_lam_unique_copy = (real_t*)malloc(6*sizeof(real_t));
if (l1_lam_unique_copy == NULL) goto throw_oom;
for (int_t ix = 2; ix < 6; ix++) {
l1_lam_unique_copy[ix] = l1_lam_unique[ix] / w_main;
}
l1_lam_unique = l1_lam_unique_copy;
}
w_main = 1.;
}
if (should_stop_procedure)
{
if (!handle_interrupt)
goto cleanup;
else
goto precompute;
}
if (verbose) {
printf("Starting ALS optimization routine\n\n");
fflush(stdout);
}
for (int_t iter = 0; iter < niter; iter++)
{
if (iter == niter - 1 && use_cg && finalize_chol)
use_cg = false;
/* Optimize C and D (they are independent of each other) */
if (should_stop_procedure) goto check_interrupt;
if (U != NULL || nnz_U) {
if (verbose) {
printf("Updating C...");
fflush(stdout);
}
if (k_item+k <= k_user+k+k_main)
filled_BeTBeChol = false;
filled_CtUbias = false;
optimizeA(
C, k_user+k,
A, k_user+k+k_main,
p, m_u, k_user+k,
U_csc_p, U_csc_i, U_csc,
(Utrans != NULL)? (Utrans) : (U),
(Utrans != NULL)? m_u : p,
full_dense_u,
near_dense_u_col,
some_full_u_col,
cnt_NA_u_bycol, (real_t*)NULL, NA_as_zero_U,
(lam_unique == NULL)? (lam/w_user) : (lam_unique[4]/w_user),
(lam_unique == NULL)? (lam/w_user) : (lam_unique[4]/w_user),
(l1_lam_unique == NULL)?
(l1_lam/w_user) : (l1_lam_unique[4]/w_user),
(l1_lam_unique == NULL)?
(l1_lam/w_user) : (l1_lam_unique[4]/w_user),
false, false, (real_t*)NULL,
(Utrans != NULL)? (false) : (true),
nthreads,
use_cg && !nonneg_C, max_cg_steps,
nonneg_C, max_cd_steps,
(real_t*)NULL,
(real_t*)NULL, (real_t*)NULL, 0., U_colmeans, 1.,
false,
precomputedBeTBeChol,
&ignore,
buffer_real_t
);
if (verbose) {
printf(" done\n");
fflush(stdout);
}
}
if (should_stop_procedure) goto check_interrupt;
if (II != NULL || nnz_I) {
if (verbose) {
printf("Updating D...");
fflush(stdout);
}
filled_BeTBeChol = false;
optimizeA(
D, k_item+k,
B, k_item+k+k_main,
q, n_i, k_item+k,
I_csc_p, I_csc_i, I_csc,
(Itrans != NULL)? (Itrans) : (II),
(Itrans != NULL)? n_i : q,
full_dense_i,
near_dense_i_col,
some_full_i_col,
cnt_NA_i_bycol, (real_t*)NULL, NA_as_zero_I,
(lam_unique == NULL)? (lam/w_item) : (lam_unique[5]/w_item),
(lam_unique == NULL)? (lam/w_item) : (lam_unique[5]/w_item),
(l1_lam_unique == NULL)?
(l1_lam/w_item) : (l1_lam_unique[5]/w_item),
(l1_lam_unique == NULL)?
(l1_lam/w_item) : (l1_lam_unique[5]/w_item),
false, false, (real_t*)NULL,
(Itrans != NULL)? (false) : (true),
nthreads,
use_cg && !nonneg_D, max_cg_steps,
nonneg_D, max_cd_steps,
(real_t*)NULL,
(real_t*)NULL, (real_t*)NULL, 0., I_colmeans, 1.,
false,
(k_item+k <= k_user+k+k_main)?
(precomputedBeTBeChol) : ((real_t*)NULL),
&ignore,
buffer_real_t
);
if (verbose) {
printf(" done\n");
fflush(stdout);
}
}
/* Optimize B */
if (should_stop_procedure) goto check_interrupt;
if (verbose) {
printf("Updating B...");
fflush(stdout);
}
/* Precomputed matrices might get overwritten when solving for A,
or the procedure may get interrupted */
filled_BtB = false;
filled_BeTBe = false;
filled_BeTBeChol = false;
filled_CtC = false;
if (II != NULL || nnz_I)
optimizeA_collective_implicit(
B, A, D,
n, n_i, m, q,
k, k_main, k_item, k_user,
Xcsc_p, Xcsc_i, Xcsc,
I_csr_p, I_csr_i, I_csr,
II, cnt_NA_i_byrow, I_colmeans,
full_dense_i, near_dense_i_row, NA_as_zero_I,
(lam_unique == NULL)? (lam) : (lam_unique[3]),
(l1_lam_unique == NULL)? (l1_lam) : (l1_lam_unique[3]),
w_item,
nthreads, use_cg && !nonneg, max_cg_steps,
nonneg, max_cd_steps,
precomputedBtB,
(k_item <= k_user)? (precomputedBeTBe) : ((real_t*)NULL),
(k_item <= k_user)? (precomputedBeTBeChol) : ((real_t*)NULL),
precomputedCtC,
DtIbias,
&ignore, &ignore, &ignore, &ignore,
buffer_real_t
);
else
optimizeA_implicit(
B + k_item, k_item+k+k_main,
A + k_user, k_user+k+k_main,
n, m, k+k_main,
Xcsc_p, Xcsc_i, Xcsc,
(lam_unique == NULL)? (lam) : (lam_unique[3]),
(l1_lam_unique == NULL)? (l1_lam) : (l1_lam_unique[3]),
nthreads, use_cg && !nonneg, max_cg_steps,
nonneg, max_cd_steps,
precomputedBtB,
buffer_real_t
);
if (verbose) {
printf(" done\n");
fflush(stdout);
}
/* Optimize A */
if (should_stop_procedure) goto check_interrupt;
if (verbose) {
printf("Updating A...");
fflush(stdout);
}
if (U != NULL || nnz_U)
optimizeA_collective_implicit(
A, B, C,
m, m_u, n, p,
k, k_main, k_user, k_item,
Xcsr_p, Xcsr_i, Xcsr,
U_csr_p, U_csr_i, U_csr,
U, cnt_NA_u_byrow, U_colmeans,
full_dense_u, near_dense_u_row, NA_as_zero_U,
(lam_unique == NULL)? (lam) : (lam_unique[2]),
(l1_lam_unique == NULL)? (l1_lam) : (l1_lam_unique[2]),
w_user,
nthreads, use_cg && !nonneg, max_cg_steps,
nonneg, max_cd_steps,
precomputedBtB,
precomputedBeTBe,
precomputedBeTBeChol,
precomputedCtC,
precomputedCtUbias,
&filled_BeTBe, &filled_BeTBeChol,
&filled_CtC,
&filled_CtUbias,
buffer_real_t
);
else
optimizeA_implicit(
A + k_user, k_user+k+k_main,
B + k_item, k_item+k+k_main,
m, n, k+k_main,
Xcsr_p, Xcsr_i, Xcsr,
(lam_unique == NULL)? (lam) : (lam_unique[2]),
(l1_lam_unique == NULL)? (l1_lam) : (l1_lam_unique[2]),
nthreads,
use_cg && !nonneg, max_cg_steps,
nonneg, max_cd_steps,
precomputedBtB,
buffer_real_t
);
if (verbose) {
printf(" done\n");
fflush(stdout);
}
filled_BtB = true;
if (verbose) {
printf("\tCompleted ALS iteration %2d\n\n", iter+1);
fflush(stdout);
}
check_interrupt:
if (should_stop_procedure)
{
if (!handle_interrupt)
goto cleanup;
if (precompute_for_predictions)
goto precompute;
else
goto cleanup;
}
}
if (verbose) {
if (!isnan(A[k_user]))
printf("ALS procedure terminated successfully\n");
else
printf("ALS procedure failed\n");
fflush(stdout);
}
precompute:
if (precompute_for_predictions)
{
if (verbose) {
printf("Finishing precomputed matrices...");
fflush(stdout);
}
if (!filled_BtB)
{
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k+k_main, n,
1., B + k_item, k_totB,
0., precomputedBtB, k+k_main);
add_to_diag(precomputedBtB, lam, k+k_main);
}
else if (use_cg) {
add_to_diag(precomputedBtB, lam, k+k_main);
}
if (!filled_BeTBe && (U != NULL || nnz_U))
{
set_to_zero(precomputedBeTBe, square(k_totA));
if (filled_CtC)
{
if (w_user == 1.)
cblas_tscal(square(k_user+k), w_user, precomputedCtC, 1);
copy_mat(k_user+k, k_user+k,
precomputedCtC, k_user+k,
precomputedBeTBe, k_totA);
}
else
{
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k_user+k, p,
w_user, C, k_user+k,
0., precomputedBeTBe, k_totA);
}
sum_mat(k+k_main, k+k_main,
precomputedBtB, k+k_main,
precomputedBeTBe + k_user + k_user*k_totA, k_totA);
for (int_t ix = 0; ix < k_user; ix++)
precomputedBeTBe[ix + ix*k_totA] += lam;
}
if (!filled_BeTBeChol && (U != NULL || nnz_U) &&
precomputedBeTBeChol != NULL)
{
copy_arr(precomputedBeTBe, precomputedBeTBeChol, square(k_totA));
char lo = 'L';
int_t ignore_int = 0;
tpotrf_(&lo, &k_totA, precomputedBeTBeChol, &k_totA, &ignore_int);
}
if (!filled_CtUbias && U == NULL && nnz_U && U_colmeans != NULL &&
buffer_CtUbias == NULL && precomputedCtUbias != NULL)
{
cblas_tgemv(CblasRowMajor, CblasTrans, p, k_user+k,
w_user, C, k_user+k,
U_colmeans, 1,
0., precomputedCtUbias, 1);
filled_CtUbias = true;
}
if (verbose) {
printf(" done\n");
fflush(stdout);
}
}
cleanup:
free(buffer_real_t);
free(Xcsr_p);
free(Xcsr_i);
free(Xcsr);
free(Xcsc_p);
free(Xcsc_i);
free(Xcsc);
free(Utrans);
free(U_csr_p);
free(U_csr_i);
free(U_csr);
free(U_csc_p);
free(U_csc_i);
free(U_csc);
free(Itrans);
free(I_csr_p);
free(I_csr_i);
free(I_csr);
free(I_csc_p);
free(I_csc_i);
free(I_csc);
free(cnt_NA_u_byrow);
free(cnt_NA_u_bycol);
free(cnt_NA_i_byrow);
free(cnt_NA_i_bycol);
free(buffer_CtUbias);
free(DtIbias);
if (!precompute_for_predictions)
free(precomputedBtB);
free(precomputedCtC);
free(lam_unique_copy);
free(l1_lam_unique_copy);
if (free_X)
free(X);
if (free_U)
free(U);
if (free_Usp)
free(U_sp);
if (free_I)
free(II);
if (free_Isp)
free(I_sp);
#pragma omp critical
{
if (has_lock_on_handle && handle_is_locked)
{
signal(SIGINT, old_interrupt_handle);
handle_is_locked = false;
}
if (should_stop_procedure) retval = 3;
}
act_on_interrupt(retval, handle_interrupt, true);
return retval;
throw_oom:
{
retval = 1;
if (verbose)
print_oom_message();
#pragma omp critical
{
if (should_stop_procedure)
{
signal(SIGINT, old_interrupt_handle);
raise(SIGINT);
}
}
goto cleanup;
}
}
int_t precompute_collective_explicit
(
real_t *restrict B, int_t n, int_t n_max, bool include_all_X,
real_t *restrict C, int_t p,
real_t *restrict Bi, bool add_implicit_features,
real_t *restrict biasB, real_t glob_mean, bool NA_as_zero_X,
real_t *restrict U_colmeans, bool NA_as_zero_U,
int_t k, int_t k_user, int_t k_item, int_t k_main,
bool user_bias,
bool nonneg,
real_t lam, real_t *restrict lam_unique,
bool scale_lam, bool scale_lam_sideinfo,
bool scale_bias_const, real_t scaling_biasA,
real_t w_main, real_t w_user, real_t w_implicit,
real_t *restrict B_plus_bias,
real_t *restrict BtB,
real_t *restrict TransBtBinvBt,
real_t *restrict BtXbias,
real_t *restrict BeTBeChol,
real_t *restrict BiTBi,
real_t *restrict TransCtCinvCt,
real_t *restrict CtCw,
real_t *restrict CtUbias
)
{
int_t retval = 0;
char lo = 'L';
int_t ignore = 0;
if (n_max == 0) n_max = n;
if (include_all_X) n = n_max;
int k_main_i = k_main;
int_t k_pred = 0;
real_t lam_last = lam;
if (lam_unique != NULL)
{
lam_last = lam_unique[user_bias? 0 : 2];
lam = lam_unique[2];
}
if (w_main != 1.)
{
lam /= w_main;
lam_last /= w_main;
w_user /= w_main;
w_implicit /= w_main;
}
real_t lam_B = lam;
real_t lam_last_B = lam_last;
real_t lam_C = lam;
if (scale_lam || scale_lam_sideinfo)
{
real_t multiplier = n + (scale_lam_sideinfo? p : 0);
lam *= multiplier;
lam_C *= (real_t)p;
if (scale_bias_const)
lam_last *= scaling_biasA;
else
lam_last *= multiplier;
lam_B = lam;
lam_last_B = lam_last;
}
real_t *arr_use = NULL;
bool free_B_plus_bias = false;
if (user_bias && B != NULL)
{
if (B_plus_bias == NULL)
{
free_B_plus_bias = true;
B_plus_bias = (real_t*)malloc( (size_t)n_max
* (size_t)(k_item+k+k_main+1)
* sizeof(real_t));
if (B_plus_bias == NULL) goto throw_oom;
}
append_ones_last_col(
B, n_max, k_item+k+k_main,
B_plus_bias
);
}
if (user_bias)
{
k_main++;
B = B_plus_bias;
}
if (NA_as_zero_X && BtXbias != NULL)
{
set_to_zero(BtXbias, k+k_main);
if (n_max > n && glob_mean != 0.)
{
sum_by_cols(B
+ (size_t)k_item
+ (size_t)n*(size_t)(k_item+k+k_main),
BtXbias,
n_max - n, k+k_main,
k_item+k+k_main, 1);
cblas_tscal(k+k_main, -glob_mean, BtXbias, 1);
}
if (biasB != NULL)
{
if (glob_mean == 0.)
cblas_tgemv(CblasRowMajor, CblasTrans,
n, k+k_main,
-1., B + k_item, k_item+k+k_main,
biasB, 1,
0., BtXbias, 1);
else {
for (size_t col = 0; col < (size_t)n; col++)
cblas_taxpy(k+k_main,
-(biasB[col] + glob_mean),
B
+ (size_t)k_item
+ col*(size_t)(k_item+k+k_main), 1,
BtXbias, 1);
}
}
else if (glob_mean != 0.)
{
for (size_t col = 0; col < (size_t)n; col++)
cblas_taxpy(k+k_main, -glob_mean,
B
+ (size_t)k_item
+ col*(size_t)(k_item+k+k_main), 1,
BtXbias, 1);
}
}
if (BtB != NULL)
{
set_to_zero(BtB, square(k+k_main));
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k+k_main, n,
1., B + k_item, k_item+k+k_main,
0., BtB, k+k_main);
}
if (Bi != NULL && add_implicit_features)
{
set_to_zero(BiTBi, square(k+k_main_i));
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k+k_main_i, n,
w_implicit, Bi, k+k_main_i,
0., BiTBi, k+k_main_i);
}
if (TransBtBinvBt != NULL && B != NULL && !nonneg && !add_implicit_features)
{
k_pred = k + k_main;
if (BeTBeChol != NULL)
arr_use = BeTBeChol; /* temporary */
else {
arr_use = (real_t*)malloc((size_t)square(k+k_main)*sizeof(real_t));
if (arr_use == NULL) goto throw_oom;
}
copy_arr(BtB, arr_use, square(k+k_main));
add_to_diag(arr_use, lam_B, k+k_main);
if (lam != lam_last)
arr_use[square(k+k_main)-1] += (lam_last_B - lam_B);
copy_mat(n, k+k_main,
B + k_item, k_item+k+k_main,
TransBtBinvBt, k+k_main);
tposv_(&lo, &k_pred, &n,
arr_use, &k_pred,
TransBtBinvBt, &k_pred, &ignore);
if (arr_use != BeTBeChol)
{
free(arr_use);
arr_use = NULL;
}
}
if (p > 0 && C != NULL && CtCw != NULL)
{
set_to_zero(CtCw, square(k_user+k));
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k_user+k, p,
1., C, k_user+k,
0., CtCw, k_user+k);
if (TransCtCinvCt != NULL && !add_implicit_features && !nonneg)
{
k_pred = k_user + k;
copy_arr(C, TransCtCinvCt, (size_t)p*(size_t)(k_user+k));
if (BeTBeChol != NULL)
arr_use = BeTBeChol; /* temporary */
else {
arr_use = (real_t*)malloc((size_t)square(k_user+k)
* sizeof(real_t));
if (arr_use == NULL) goto throw_oom;
}
copy_arr(CtCw, arr_use, square(k_user+k));
add_to_diag(arr_use, lam_C/w_user, k_user+k);
tposv_(&lo, &k_pred, &p,
arr_use, &k_pred,
TransCtCinvCt, &k_pred, &ignore);
if (arr_use != BeTBeChol)
{
free(arr_use);
arr_use = NULL;
}
}
if (w_user != 1.)
cblas_tscal(square(k_user+k), w_user, CtCw, 1);
}
if (BeTBeChol != NULL && B != NULL &&
(C != NULL || add_implicit_features) &&
!nonneg)
{
set_to_zero(BeTBeChol, square(k_user+k+k_main));
int_t k_totA = k_user + k + k_main;
if (CtCw != NULL)
{
copy_mat(k+k_main, k+k_main,
BtB, k+k_main,
BeTBeChol + k_user + k_user*k_totA, k_totA);
sum_mat(k_user+k, k_user+k,
CtCw, k_user+k,
BeTBeChol, k_totA);
}
else
{
if (p)
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k_user+k, p,
w_user, C, k_user+k,
0., BeTBeChol, k_totA);
sum_mat(k+k_main, k+k_main,
BtB, k+k_main,
BeTBeChol + k_user + k_user*k_totA, k_totA);
}
if (add_implicit_features)
sum_mat(k+k_main_i, k+k_main_i,
BiTBi, k+k_main_i,
BeTBeChol + k_user + k_user*k_totA, k_totA);
add_to_diag(BeTBeChol, lam, k_user+k+k_main);
if (lam != lam_last)
BeTBeChol[square(k_user+k+k_main)-1] += (lam_last-lam);
tpotrf_(&lo, &k_totA, BeTBeChol, &k_totA, &ignore);
}
if (C != NULL && CtUbias != NULL && p && U_colmeans != NULL && NA_as_zero_U)
{
cblas_tgemv(CblasRowMajor, CblasTrans, p, k_user+k,
w_user, C, k_user+k,
U_colmeans, 1,
0., CtUbias, 1);
}
cleanup:
if (free_B_plus_bias)
free(B_plus_bias);
if (arr_use != BeTBeChol)
free(arr_use);
return retval;
throw_oom:
{
retval = 1;
print_oom_message();
goto cleanup;
}
}
int_t precompute_collective_implicit
(
real_t *restrict B, int_t n,
real_t *restrict C, int_t p,
real_t *restrict U_colmeans, bool NA_as_zero_U,
int_t k, int_t k_user, int_t k_item, int_t k_main,
real_t lam, real_t w_main, real_t w_user, real_t w_main_multiplier,
bool nonneg,
bool extra_precision,
real_t *restrict BtB,
real_t *restrict BeTBe,
real_t *restrict BeTBeChol,
real_t *restrict CtUbias
)
{
if (w_main_multiplier != 1.)
w_main *= w_main_multiplier;
if (w_main != 1.)
{
lam /= w_main;
w_user /= w_main;
}
/* BtB */
set_to_zero(BtB, square(k+k_main));
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k+k_main, n,
1., B + k_item, k_item+k+k_main,
0., BtB, k+k_main);
add_to_diag(BtB, lam, k+k_main);
if (!p)
return 0;
/* BeTBe */
int_t k_totA = k_user + k + k_main;
set_to_zero(BeTBe, square(k_totA));
copy_mat(k+k_main, k+k_main,
BtB, k+k_main,
BeTBe + k_user + k_user*k_totA, k_totA);
if (extra_precision)
{
real_t *restrict CtC = (real_t*)calloc((size_t)square(k_user+k),
sizeof(real_t));
if (CtC == NULL) return 1;
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k_user+k, p,
w_user, C, k_user+k,
0., CtC, k_user+k);
sum_mat(k_user+k, k_user+k,
CtC, k_user+k,
BeTBe, k_totA);
free(CtC);
}
else
{
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k_user+k, p,
w_user, C, k_user+k,
1., BeTBe, k_totA);
}
for (int_t ix = 0; ix < k_user; ix++)
BeTBe[ix + ix*k_totA] += lam;
/* BeTBeChol */
if (BeTBeChol != NULL && !nonneg)
{
copy_arr(BeTBe, BeTBeChol, square(k_totA));
char lo = 'L';
int_t ignore = 0;
tpotrf_(&lo, &k_totA, BeTBeChol, &k_totA, &ignore);
}
/* CtUbias */
if (C != NULL && CtUbias != NULL && p && U_colmeans != NULL && NA_as_zero_U)
{
cblas_tgemv(CblasRowMajor, CblasTrans, p, k_user+k,
w_user, C, k_user+k,
U_colmeans, 1,
0., CtUbias, 1);
}
return 0;
}
int_t factors_collective_explicit_single
(
real_t *restrict a_vec, real_t *restrict a_bias,
real_t *restrict u_vec, int_t p,
real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec,
real_t *restrict u_bin_vec, int_t pbin,
bool NA_as_zero_U, bool NA_as_zero_X,
bool nonneg,
real_t *restrict C, real_t *restrict Cb,
real_t glob_mean, real_t *restrict biasB,
real_t *restrict U_colmeans,
real_t *restrict Xa, int_t ixB[], size_t nnz,
real_t *restrict Xa_dense, int_t n,
real_t *restrict weight,
real_t *restrict B,
real_t *restrict Bi, bool add_implicit_features,
int_t k, int_t k_user, int_t k_item, int_t k_main,
real_t lam, real_t *restrict lam_unique,
real_t l1_lam, real_t *restrict l1_lam_unique,
bool scale_lam, bool scale_lam_sideinfo,
bool scale_bias_const, real_t scaling_biasA,
real_t w_main, real_t w_user, real_t w_implicit,
int_t n_max, bool include_all_X,
real_t *restrict BtB,
real_t *restrict TransBtBinvBt,
real_t *restrict BtXbias,
real_t *restrict BeTBeChol,
real_t *restrict BiTBi,
real_t *restrict CtCw,
real_t *restrict TransCtCinvCt,
real_t *restrict CtUbias,
real_t *restrict B_plus_bias
)
{
int_t retval = 0;
real_t lam_bias = lam;
real_t l1_lam_bias = l1_lam;
if (lam_unique != NULL)
{
lam_bias = lam_unique[(a_bias != NULL)? 0 : 2];
lam = lam_unique[2];
}
if (l1_lam_unique != NULL)
{
l1_lam_bias = l1_lam_unique[(a_bias != NULL)? 0 : 2];
l1_lam = l1_lam_unique[2];
}
if (a_bias == NULL)
scale_bias_const = false;
if ((scale_lam || scale_lam_sideinfo) && scale_bias_const)
{
lam_bias *= scaling_biasA;
l1_lam_bias *= scaling_biasA;
}
bool set_to_nan = check_sparse_indices(
(include_all_X || n == 0)? n_max : n, p,
u_vec_sp, u_vec_ixB, nnz_u_vec,
Xa, ixB, nnz
);
if (set_to_nan) {
for (int_t ix = 0; ix < k_user+k+k_main; ix++)
a_vec[ix] = NAN_;
if (a_bias != NULL) *a_bias = NAN_;
return 0;
}
#ifdef _FOR_R
if (u_vec != NULL) R_nan_to_C_nan(u_vec, p);
if (Xa_dense != NULL) R_nan_to_C_nan(Xa_dense, n);
#endif
if (u_vec == NULL && !nnz_u_vec && !NA_as_zero_U)
p = 0;
real_t *restrict buffer_CtUbias = NULL;
bool user_bias = (a_bias != NULL);
bool free_B_plus_bias = false;
if (user_bias && B_plus_bias == NULL)
{
free_B_plus_bias = true;
B_plus_bias = (real_t*)malloc((size_t)(include_all_X? n_max : n)
*(size_t)(k_item+k+k_main+1)
* sizeof(real_t));
if (B_plus_bias == NULL) goto throw_oom;
append_ones_last_col(
B, include_all_X? n_max : n, k_item+k+k_main,
B_plus_bias
);
}
if (u_vec == NULL && NA_as_zero_U && U_colmeans != NULL && CtUbias == NULL)
{
buffer_CtUbias = (real_t*)malloc((size_t)(k_user+k)*sizeof(real_t));
if (buffer_CtUbias == NULL) goto throw_oom;
CtUbias = buffer_CtUbias;
cblas_tgemv(CblasRowMajor, CblasTrans, p, k_user+k,
w_user, C, k_user+k,
U_colmeans, 1,
0., CtUbias, 1);
}
if (!nnz && Xa_dense == NULL && !NA_as_zero_X && !add_implicit_features)
{
if (a_bias != NULL)
*a_bias = 0.;
retval = collective_factors_cold(
a_vec,
u_vec, p,
u_vec_sp, u_vec_ixB, nnz_u_vec,
u_bin_vec, pbin,
C, Cb,
TransCtCinvCt,
CtCw,
U_colmeans,
CtUbias,
k, k_user, k_main,
lam, l1_lam, w_main, w_user,
scale_lam_sideinfo,
NA_as_zero_U,
nonneg
);
}
else
retval = collective_factors_warm(
a_vec, a_bias,
u_vec, p,
u_vec_sp, u_vec_ixB, nnz_u_vec,
u_bin_vec, pbin,
C, Cb,
glob_mean, biasB,
U_colmeans,
Xa, ixB, nnz,
Xa_dense, n,
weight,
B,
Bi, add_implicit_features,
k, k_user, k_item, k_main,
lam, w_main, w_user, w_implicit, lam_bias,
l1_lam, l1_lam_bias,
scale_lam, scale_lam_sideinfo, scale_bias_const,
n_max, include_all_X,
TransBtBinvBt,
BtXbias,
BtB,
BeTBeChol,
BiTBi,
CtCw,
CtUbias,
NA_as_zero_U, NA_as_zero_X,
nonneg,
B_plus_bias
);
cleanup:
if (free_B_plus_bias)
free(B_plus_bias);
free(buffer_CtUbias);
return retval;
throw_oom:
{
retval = 1;
goto cleanup;
}
}
int_t factors_collective_implicit_single
(
real_t *restrict a_vec,
real_t *restrict u_vec, int_t p,
real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec,
bool NA_as_zero_U,
bool nonneg,
real_t *restrict U_colmeans,
real_t *restrict B, int_t n, real_t *restrict C,
real_t *restrict Xa, int_t ixB[], size_t nnz,
int_t k, int_t k_user, int_t k_item, int_t k_main,
real_t lam, real_t l1_lam, real_t alpha, real_t w_main, real_t w_user,
real_t w_main_multiplier,
bool apply_log_transf,
real_t *restrict BeTBe,
real_t *restrict BtB,
real_t *restrict BeTBeChol,
real_t *restrict CtUbias
)
{
bool set_to_nan = check_sparse_indices(
n, p,
u_vec_sp, u_vec_ixB, nnz_u_vec,
Xa, ixB, nnz
);
if (set_to_nan) {
for (int_t ix = 0; ix < k_user+k+k_main; ix++)
a_vec[ix] = NAN_;
return 0;
}
#ifdef _FOR_R
if (u_vec != NULL) R_nan_to_C_nan(u_vec, p);
#endif
bool free_xsp = false;
int retval = 0;
real_t *restrict buffer_CtUbias = NULL;
bool free_BtB = false;
if (BtB == NULL) {
free_BtB = true;
BtB = (real_t*)malloc((size_t)square(k+k_main)*sizeof(real_t));
if (BtB == NULL) return 1;
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k+k_main, n,
1., B + k_item, k_item+k+k_main,
0., BtB, k+k_main);
add_to_diag(BtB, lam, k+k_main);
}
if (u_vec == NULL && NA_as_zero_U && U_colmeans != NULL && CtUbias == NULL)
{
buffer_CtUbias = (real_t*)malloc((size_t)(k_user+k)*sizeof(real_t));
if (buffer_CtUbias == NULL) goto throw_oom;
CtUbias = buffer_CtUbias;
cblas_tgemv(CblasRowMajor, CblasTrans, p, k_user+k,
w_user, C, k_user+k,
U_colmeans, 1,
0., CtUbias, 1);
}
if (apply_log_transf)
{
real_t *restrict temp = (real_t*)malloc(nnz*sizeof(real_t));
if (temp == NULL) return 1;
Xa = temp;
free_xsp = true;
for (size_t ix = 0; ix < nnz; ix++)
Xa[ix] = log_t(Xa[ix]);
}
if (nnz)
retval = collective_factors_warm_implicit(
a_vec,
u_vec, p,
u_vec_sp, u_vec_ixB, nnz_u_vec,
NA_as_zero_U,
nonneg,
U_colmeans,
B, n, C,
Xa, ixB, nnz,
k, k_user, k_item, k_main,
lam, l1_lam, alpha, w_main, w_user,
w_main_multiplier,
BeTBe,
BtB,
BeTBeChol,
CtUbias
);
else
retval = collective_factors_cold_implicit(
a_vec,
u_vec, p,
u_vec_sp, u_vec_ixB, nnz_u_vec,
B, n,
C,
BeTBe,
BtB,
BeTBeChol,
U_colmeans,
CtUbias,
k, k_user, k_item, k_main,
lam, l1_lam,
w_main, w_user, w_main_multiplier,
NA_as_zero_U,
nonneg
);
cleanup:
if (free_BtB)
free(BtB);
free(buffer_CtUbias);
if (free_xsp)
free(Xa);
return retval;
throw_oom:
{
retval = 1;
goto cleanup;
}
}
/* TODO: these functions should call 'optimizeA' instead */
int_t factors_collective_explicit_multiple
(
real_t *restrict A, real_t *restrict biasA, int_t m,
real_t *restrict U, int_t m_u, int_t p,
bool NA_as_zero_U, bool NA_as_zero_X,
bool nonneg,
int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U,
size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr,
real_t *restrict Ub, int_t m_ubin, int_t pbin,
real_t *restrict C, real_t *restrict Cb,
real_t glob_mean, real_t *restrict biasB,
real_t *restrict U_colmeans,
real_t *restrict X, int_t ixA[], int_t ixB[], size_t nnz,
size_t *restrict Xcsr_p, int_t *restrict Xcsr_i, real_t *restrict Xcsr,
real_t *restrict Xfull, int_t n,
real_t *restrict weight,
real_t *restrict B,
real_t *restrict Bi, bool add_implicit_features,
int_t k, int_t k_user, int_t k_item, int_t k_main,
real_t lam, real_t *restrict lam_unique,
real_t l1_lam, real_t *restrict l1_lam_unique,
bool scale_lam, bool scale_lam_sideinfo,
bool scale_bias_const, real_t scaling_biasA,
real_t w_main, real_t w_user, real_t w_implicit,
int_t n_max, bool include_all_X,
real_t *restrict BtB,
real_t *restrict TransBtBinvBt,
real_t *restrict BtXbias,
real_t *restrict BeTBeChol,
real_t *restrict BiTBi,
real_t *restrict TransCtCinvCt,
real_t *restrict CtCw,
real_t *restrict CtUbias,
real_t *restrict B_plus_bias,
int nthreads
)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
size_t lda = k_user+k+k_main;
int_t retval = 0;
size_t m_max = max2(m, m_u);
if (NA_as_zero_U && U == NULL) m_u = m_max;
if (NA_as_zero_X && Xfull == NULL) m = m_max;
if (U == NULL && (!nnz_U && U_csr_p == NULL) && !NA_as_zero_U) m_u = 0;
if (Xfull == NULL && (!nnz && Xcsr_p == NULL) && !NA_as_zero_X) m = 0;
bool user_bias = (biasA != NULL);
bool free_B_plus_bias = false;
int nthreads_restore = 1;
real_t *restrict weightR = NULL;
real_t *restrict buffer_CtUbias = NULL;
bool free_U_csr = false;
bool free_X_csr = false;
bool free_BtB = false;
bool free_BiTBi = false;
bool free_BtX = false;
int_t *restrict ret = (int_t*)malloc(m_max*sizeof(int_t));
if (ret == NULL) goto throw_oom;
if (user_bias && B_plus_bias == NULL)
{
free_B_plus_bias = true;
B_plus_bias = (real_t*)malloc((size_t)n*(size_t)(k_item+k+k_main+1)
* sizeof(real_t));
if (B_plus_bias == NULL) goto throw_oom;
append_ones_last_col(
B, n, k_item+k+k_main,
B_plus_bias
);
}
if (Xfull == NULL && (nnz || NA_as_zero_X) && Xcsr_p == NULL)
{
free_X_csr = true;
Xcsr_p = (size_t*)malloc(((size_t)m + (size_t)1) * sizeof(size_t));
Xcsr_i = (int_t*)malloc(nnz*sizeof(int_t));
Xcsr = (real_t*)malloc(nnz*sizeof(real_t));
if (Xcsr_p == NULL || Xcsr_i == NULL || Xcsr == NULL)
goto throw_oom;
if (weight != NULL) {
weightR = (real_t*)malloc(nnz*sizeof(real_t));
if (weightR == NULL) goto throw_oom;
}
coo_to_csr(
ixA, ixB, X,
weight,
m, n, nnz,
Xcsr_p, Xcsr_i, Xcsr,
weightR
);
}
else if (Xfull == NULL && Xcsr_p != NULL && weight != NULL) {
weightR = weight;
}
if (U == NULL && (nnz_U || NA_as_zero_U) && U_csr_p == NULL)
{
free_U_csr = true;
U_csr_p = (size_t*)malloc(((size_t)m_u + (size_t)1) * sizeof(size_t));
U_csr_i = (int_t*)malloc(nnz_U*sizeof(int_t));
U_csr = (real_t*)malloc(nnz_U*sizeof(real_t));
if (U_csr_p == NULL || U_csr_i == NULL || U_csr == NULL)
goto throw_oom;
coo_to_csr(
U_row, U_col, U_sp,
(real_t*)NULL,
m, p, nnz_U,
U_csr_p, U_csr_i, U_csr,
(real_t*)NULL
);
}
if (U == NULL && NA_as_zero_U && U_colmeans != NULL && CtUbias == NULL)
{
buffer_CtUbias = (real_t*)malloc((size_t)(k_user+k)*sizeof(real_t));
if (buffer_CtUbias == NULL) goto throw_oom;
CtUbias = buffer_CtUbias;
cblas_tgemv(CblasRowMajor, CblasTrans, p, k_user+k,
w_user, C, k_user+k,
U_colmeans, 1,
0., CtUbias, 1);
}
if (BtB == NULL && NA_as_zero_X)
{
free_BtB = true;
BtB =(real_t*)malloc((size_t)square(k+k_main+user_bias)*sizeof(real_t));
if (BtB == NULL) goto throw_oom;
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k+k_main+user_bias, max2(n, n_max),
1.,
(user_bias? B_plus_bias : B) + k_item,
k_item+k+k_main+user_bias,
0., BtB, k+k_main+user_bias);
}
if (add_implicit_features && BiTBi == NULL)
{
free_BiTBi = true;
BiTBi = (real_t*)malloc((size_t)square(k+k_main)*sizeof(real_t));
if (BiTBi == NULL) goto throw_oom;
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k+k_main, n,
w_implicit,
Bi, k+k_main,
0., BiTBi, k+k_main);
}
if (Xfull == NULL && NA_as_zero_X && BtXbias == NULL &&
(biasB != NULL || glob_mean != 0.))
{
BtXbias = (real_t*)calloc(k+k_main+user_bias, sizeof(real_t));
if (BtXbias == NULL) goto throw_oom;
free_BtX = true;
if (biasB != NULL)
{
if (glob_mean != 0. && n_max > n)
{
sum_by_cols((user_bias? B_plus_bias : B)
+ k_item
+ (size_t)n*(size_t)(k_item+k+k_main+user_bias),
BtXbias,
n_max - n, k+k_main+user_bias,
k_item+k+k_main+user_bias, nthreads);
if (user_bias)
BtXbias[k+k_main] = (real_t)(n_max-n);
cblas_tscal(k+k_main+user_bias, -glob_mean, BtXbias, 1);
}
for (size_t col = 0; col < (size_t)n; col++)
cblas_taxpy(k+k_main+user_bias,
-(biasB[col] + glob_mean),
(user_bias? B_plus_bias : B)
+ (size_t)k_item
+ col*(size_t)(k_item+k+k_main+user_bias), 1,
BtXbias, 1);
}
else if (glob_mean != 0.)
{
sum_by_cols((user_bias? B_plus_bias : B)
+ k_item
+ (size_t)n*(size_t)(k_item+k+k_main+user_bias),
BtXbias,
n_max, k+k_main+user_bias,
k_item+k+k_main+user_bias, nthreads);
if (user_bias)
BtXbias[k+k_main] = (real_t)n_max;
cblas_tscal(k+k_main+user_bias, -glob_mean, BtXbias, 1);
}
}
set_blas_threads(1, &nthreads_restore);
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
shared(A, B, C, Cb, biasA, biasB, glob_mean, U_colmeans, \
Bi, add_implicit_features, \
Xfull, weight, Xcsr, Xcsr_p, Xcsr_i, weightR, m, n, \
U, U_csr, U_csr_p, U_csr_i, p, Ub, pbin, m_u, m_ubin, \
NA_as_zero_X, NA_as_zero_U, nonneg, m_max, \
lam, lam_unique, l1_lam, l1_lam_unique, \
w_main, w_user, w_implicit, \
k, k_user, k_item, k_main, \
TransBtBinvBt, BtB, BeTBeChol, CtCw, TransCtCinvCt, \
B_plus_bias, BiTBi, BtXbias, CtUbias, \
scale_lam, scale_lam_sideinfo)
for (size_t_for ix = 0; ix < m_max; ix++)
ret[ix] = factors_collective_explicit_single(
A + ix*lda,
user_bias? (biasA + ix) : ((real_t*)NULL),
(U == NULL || ix >= (size_t)m_u)?
((real_t*)NULL) : (U + ix*(size_t)p),
(ix < (size_t)m_u)? p : 0,
(ix < (size_t)m_u && U_csr_p != NULL)?
(U_csr + U_csr_p[ix]) : ((real_t*)NULL),
(ix < (size_t)m_u && U_csr_p != NULL)?
(U_csr_i + U_csr_p[ix]) : ((int_t*)NULL),
(ix < (size_t)m_u && U_csr_p != NULL)?
(U_csr_p[ix+1] - U_csr_p[ix]) : ((size_t)0),
(Ub == NULL || ix >= (size_t)m_ubin)?
((real_t*)NULL) : (Ub + ix*(size_t)pbin),
(ix < (size_t)m_ubin)? pbin : 0,
NA_as_zero_U, NA_as_zero_X,
nonneg,
C, Cb,
glob_mean, biasB,
U_colmeans,
(ix < (size_t)m && Xcsr_p != NULL)?
(Xcsr + Xcsr_p[ix]) : ((real_t*)NULL),
(ix < (size_t)m && Xcsr_p != NULL)?
(Xcsr_i + Xcsr_p[ix]) : ((int_t*)NULL),
(ix < (size_t)m && Xcsr_p != NULL)?
(Xcsr_p[ix+1] - Xcsr_p[ix]) : ((size_t)0),
(Xfull == NULL || ix >= (size_t)m)?
((real_t*)NULL) : (Xfull + ix*(size_t)n),
(ix < (size_t)m)? n : 0,
(weight == NULL || ix >= (size_t)m)?
((real_t*)NULL)
:
((Xfull != NULL)?
(weight + ix*(size_t)n) : (weightR + Xcsr_p[ix])),
B,
Bi, add_implicit_features,
k, k_user, k_item, k_main,
lam, lam_unique,
l1_lam, l1_lam_unique,
scale_lam, scale_lam_sideinfo,
scale_bias_const, scaling_biasA,
w_main, w_user, w_implicit,
n_max, include_all_X,
BtB,
TransBtBinvBt,
BtXbias,
BeTBeChol,
BiTBi,
CtCw,
TransCtCinvCt,
CtUbias,
B_plus_bias
);
set_blas_threads(nthreads_restore, (int*)NULL);
for (size_t ix = 0; ix < m_max; ix++)
retval = max2(retval, ret[ix]);
if (retval == 1)
goto throw_oom;
else if (retval != 0)
goto cleanup;
cleanup:
if (free_U_csr) {
free(U_csr);
free(U_csr_i);
free(U_csr_p);
}
if (free_X_csr) {
free(Xcsr);
free(Xcsr_p);
free(Xcsr_i);
free(weightR);
}
if (free_B_plus_bias)
free(B_plus_bias);
if (free_BtB)
free(BtB);
if (free_BiTBi)
free(BiTBi);
if (free_BtX)
free(BtXbias);
free(buffer_CtUbias);
free(ret);
return retval;
throw_oom:
{
retval = 1;
goto cleanup;
}
}
int_t factors_collective_implicit_multiple
(
real_t *restrict A, int_t m,
real_t *restrict U, int_t m_u, int_t p,
bool NA_as_zero_U,
bool nonneg,
int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U,
size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr,
real_t *restrict X, int_t ixA[], int_t ixB[], size_t nnz,
size_t *restrict Xcsr_p, int_t *restrict Xcsr_i, real_t *restrict Xcsr,
real_t *restrict B, int_t n,
real_t *restrict C,
real_t *restrict U_colmeans,
int_t k, int_t k_user, int_t k_item, int_t k_main,
real_t lam, real_t l1_lam, real_t alpha, real_t w_main, real_t w_user,
real_t w_main_multiplier,
bool apply_log_transf,
real_t *restrict BeTBe,
real_t *restrict BtB,
real_t *restrict BeTBeChol,
real_t *restrict CtUbias,
int nthreads
)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
size_t lda = k_user+k+k_main;
int_t retval = 0;
m = max2(m, m_u);
if (NA_as_zero_U && U == NULL) m_u = m;
if (U == NULL && (!nnz_U && U_csr_p == NULL) && !NA_as_zero_U) m_u = 0;
int nthreads_restore = 1;
bool free_U_csr = false;
bool free_X_csr = false;
bool free_BtB = false;
real_t *restrict buffer_CtUbias = NULL;
int_t *restrict ret = (int_t*)malloc(m*sizeof(int_t));
if (ret == NULL) goto throw_oom;
if (Xcsr_p == NULL)
{
free_X_csr = true;
Xcsr_p = (size_t*)malloc(((size_t)m + (size_t)1) * sizeof(size_t));
Xcsr_i = (int_t*)malloc(nnz*sizeof(int_t));
Xcsr = (real_t*)malloc(nnz*sizeof(real_t));
if (Xcsr_p == NULL || Xcsr_i == NULL || Xcsr == NULL)
goto throw_oom;
coo_to_csr(
ixA, ixB, X,
(real_t*)NULL,
m, n, nnz,
Xcsr_p, Xcsr_i, Xcsr,
(real_t*)NULL
);
}
if (U == NULL && (nnz_U || NA_as_zero_U) && U_csr_p == NULL)
{
free_U_csr = true;
U_csr_p = (size_t*)malloc(((size_t)m_u + (size_t)1) * sizeof(size_t));
U_csr_i = (int_t*)malloc(nnz_U*sizeof(int_t));
U_csr = (real_t*)malloc(nnz_U*sizeof(real_t));
if (U_csr_p == NULL || U_csr_i == NULL || U_csr == NULL)
goto throw_oom;
coo_to_csr(
U_row, U_col, U_sp,
(real_t*)NULL,
m, p, nnz_U,
U_csr_p, U_csr_i, U_csr,
(real_t*)NULL
);
}
if (U == NULL && NA_as_zero_U && U_colmeans != NULL && CtUbias == NULL)
{
buffer_CtUbias = (real_t*)malloc((size_t)(k_user+k)*sizeof(real_t));
if (buffer_CtUbias == NULL) goto throw_oom;
CtUbias = buffer_CtUbias;
cblas_tgemv(CblasRowMajor, CblasTrans, p, k_user+k,
w_user, C, k_user+k,
U_colmeans, 1,
0., CtUbias, 1);
}
if (BtB == NULL && m > 1 && BeTBeChol == NULL)
{
free_BtB = true;
BtB = (real_t*)malloc((size_t)square(k+k_main)*sizeof(real_t));
if (BtB == NULL) goto throw_oom;
cblas_tsyrk(CblasRowMajor, CblasUpper, CblasTrans,
k+k_main, n,
1., B + k_item, k_item+k+k_main,
0., BtB, k+k_main);
add_to_diag(BtB, lam, k+k_main);
}
set_blas_threads(1, &nthreads_restore);
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(A, B, C, m, m_u, U_colmeans, n, \
U, U_csr, U_csr_i, U_csr_p, NA_as_zero_U, nonneg, \
Xcsr, Xcsr_i, Xcsr_p, \
lam, l1_lam, alpha, w_main, w_user, w_main_multiplier, \
k, k_user, k_item, k_main, \
BtB, BeTBe, BeTBeChol, CtUbias)
for (size_t_for ix = 0; ix < (size_t)m; ix++)
ret[ix] = factors_collective_implicit_single(
A + ix*lda,
(U == NULL || ix >= (size_t)m_u)?
((real_t*)NULL) : (U + ix*(size_t)p),
(ix < (size_t)m_u)? p : 0,
(ix < (size_t)m_u && U_csr_p != NULL)?
(U_csr + U_csr_p[ix]) : ((real_t*)NULL),
(ix < (size_t)m_u && U_csr_p != NULL)?
(U_csr_i + U_csr_p[ix]) : ((int_t*)NULL),
(ix < (size_t)m_u && U_csr_p != NULL)?
(U_csr_p[ix+1] - U_csr_p[ix]) : ((size_t)0),
NA_as_zero_U,
nonneg,
U_colmeans,
B, n, C,
Xcsr + Xcsr_p[ix],
Xcsr_i + Xcsr_p[ix],
Xcsr_p[ix+1] - Xcsr_p[ix],
k, k_user, k_item, k_main,
lam, l1_lam, alpha, w_main, w_user,
w_main_multiplier,
apply_log_transf,
BeTBe,
BtB,
BeTBeChol,
CtUbias
);
set_blas_threads(nthreads_restore, (int*)NULL);
for (size_t ix = 0; ix < (size_t)m; ix++)
retval = max2(retval, ret[ix]);
if (retval == 1) goto throw_oom;
cleanup:
if (free_U_csr) {
free(U_csr);
free(U_csr_i);
free(U_csr_p);
}
if (free_X_csr) {
free(Xcsr);
free(Xcsr_p);
free(Xcsr_i);
}
if (free_BtB) {
free(BtB);
}
free(buffer_CtUbias);
free(ret);
return retval;
throw_oom:
{
retval = 1;
goto cleanup;
}
}
int_t impute_X_collective_explicit
(
int_t m, bool user_bias,
real_t *restrict U, int_t m_u, int_t p,
bool NA_as_zero_U,
bool nonneg,
int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U,
size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr,
real_t *restrict Ub, int_t m_ubin, int_t pbin,
real_t *restrict C, real_t *restrict Cb,
real_t glob_mean, real_t *restrict biasB,
real_t *restrict U_colmeans,
real_t *restrict Xfull, int_t n,
real_t *restrict weight,
real_t *restrict B,
real_t *restrict Bi, bool add_implicit_features,
int_t k, int_t k_user, int_t k_item, int_t k_main,
real_t lam, real_t *restrict lam_unique,
real_t l1_lam, real_t *restrict l1_lam_unique,
bool scale_lam, bool scale_lam_sideinfo,
bool scale_bias_const, real_t scaling_biasA,
real_t w_main, real_t w_user, real_t w_implicit,
int_t n_max, bool include_all_X,
real_t *restrict BtB,
real_t *restrict TransBtBinvBt,
real_t *restrict BeTBeChol,
real_t *restrict BiTBi,
real_t *restrict TransCtCinvCt,
real_t *restrict CtCw,
real_t *restrict CtUbias,
real_t *restrict B_plus_bias,
int nthreads
)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long row;
#endif
/* TODO: this function should first check which rows have missing values,
and calculate the factors only for them. The imputation loop should
also make a pre-check on the whole row to see if it has missing values.*/
int_t retval = 0;
size_t m_by_n = (size_t)m*(size_t)n;
size_t lda = k_user + k + k_main;
size_t ldb = k_item + k + k_main;
size_t cnt_NA = 0;
bool free_B_plus_bias = false;
bool dont_produce_full_X = false;
real_t *restrict A = (real_t*)malloc( (size_t)max2(m, m_u)
* (size_t)lda
* sizeof(real_t));
real_t *restrict biasA = NULL;
if (user_bias) biasA = (real_t*)calloc((size_t)max2(m, m_u),sizeof(real_t));
if (A == NULL || (biasA == NULL && user_bias))
goto throw_oom;
if (user_bias && B_plus_bias == NULL)
{
free_B_plus_bias = true;
B_plus_bias = (real_t*)malloc((size_t)n*(size_t)(ldb + (size_t)1)
* sizeof(real_t));
if (B_plus_bias == NULL) goto throw_oom;
append_ones_last_col(
B, n, k_item+k+k_main,
B_plus_bias
);
}
for (size_t ix = 0; ix < m_by_n; ix++)
cnt_NA += isnan(Xfull[ix]) != 0;
dont_produce_full_X = (cnt_NA <= m_by_n / (size_t)10);
if (cnt_NA == 0) goto cleanup;
retval = factors_collective_explicit_multiple(
A, biasA, m,
U, m_u, p,
NA_as_zero_U, false,
nonneg,
U_row, U_col, U_sp, nnz_U,
U_csr_p, U_csr_i, U_csr,
Ub, m_ubin, pbin,
C, Cb,
glob_mean, biasB,
U_colmeans,
(real_t*)NULL, (int_t*)NULL, (int_t*)NULL, (size_t)0,
(size_t*)NULL, (int_t*)NULL, (real_t*)NULL,
Xfull, n,
weight,
B,
Bi, add_implicit_features,
k, k_user, k_item, k_main,
lam, lam_unique,
l1_lam, l1_lam_unique,
scale_lam, scale_lam_sideinfo,
scale_bias_const, scaling_biasA,
w_main, w_user, w_implicit,
n_max, include_all_X,
BtB,
TransBtBinvBt,
(real_t*)NULL,
BeTBeChol,
BiTBi,
TransCtCinvCt,
CtCw,
CtUbias,
B_plus_bias,
nthreads
);
if (retval == 1)
goto throw_oom;
else if (retval != 0)
goto cleanup;
if (dont_produce_full_X)
{
#if !defined(_MSC_VER) || (_MSC_VER >= 1921)
#pragma omp parallel for collapse(2) \
schedule(dynamic) num_threads(nthreads) \
shared(m, n, Xfull, k, k_user, k_item, k_main, lda, ldb, \
glob_mean, user_bias, biasA, biasB, A, B)
#endif
for (size_t_for row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++)
Xfull[col + row*(size_t)n]
=
#ifndef _FOR_R
(!isnan(Xfull[col + row*(size_t)n]))?
#else
(!isnan(Xfull[col + row*(size_t)n]) &&
!ISNAN(Xfull[col + row*(size_t)n]))?
#endif
(Xfull[col + row*(size_t)n])
:
(
cblas_tdot(k+k_main,
A + row*lda + (size_t)k_user, 1,
B + col*ldb + (size_t)k_item, 1)
+ glob_mean
+ (user_bias? biasA[row] : 0.)
+ ((biasB != NULL)? biasB[col] : 0.)
);
}
else
{
size_t m_by_n = (size_t)m * (size_t)n;
real_t *restrict Xpred = (real_t*)malloc(m_by_n*sizeof(real_t));
if (Xpred == NULL) goto throw_oom;
cblas_tgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
m, n, k+k_main,
1., A + k_user, lda, B + k_item, ldb,
0., Xpred, n);
#if !defined(_MSC_VER) || (_MSC_VER >= 1921)
#pragma omp parallel for collapse(2) \
schedule(dynamic) num_threads(nthreads) \
shared(m, n, Xfull, Xpred, glob_mean, user_bias, biasA, biasB)
#endif
for (size_t_for row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++)
Xfull[col + row*(size_t)n]
=
#ifndef _FOR_R
(!isnan(Xfull[col + row*(size_t)n]))?
#else
(!isnan(Xfull[col + row*(size_t)n]) &&
!ISNAN(Xfull[col + row*(size_t)n]))?
#endif
(Xfull[col + row*(size_t)n])
:
(Xpred[col + row*(size_t)n]
+ glob_mean
+ (user_bias? biasA[row] : 0.)
+ ((biasB != NULL)? biasB[col] : 0.));
free(Xpred);
}
cleanup:
free(A);
free(biasA);
if (free_B_plus_bias)
free(B_plus_bias);
return retval;
throw_oom:
{
retval = 1;
goto cleanup;
}
}
int_t topN_old_collective_explicit
(
real_t *restrict a_vec, real_t a_bias,
real_t *restrict A, real_t *restrict biasA, int_t row_index,
real_t *restrict B,
real_t *restrict biasB,
real_t glob_mean,
int_t k, int_t k_user, int_t k_item, int_t k_main,
int_t *restrict include_ix, int_t n_include,
int_t *restrict exclude_ix, int_t n_exclude,
int_t *restrict outp_ix, real_t *restrict outp_score,
int_t n_top, int_t n, int_t n_max, bool include_all_X, int nthreads
)
{
if (include_all_X || n == 0)
n = n_max;
if (a_vec != NULL)
return topN(
a_vec, k_user,
B, k_item,
biasB,
glob_mean, a_bias,
k, k_main,
include_ix, n_include,
exclude_ix, n_exclude,
outp_ix, outp_score,
n_top, n, nthreads
);
else
return topN(
A + (size_t)row_index*(size_t)(k_user+k+k_main), k_user,
B, k_item,
biasB,
glob_mean, (biasA == NULL)? (0.) : (biasA[row_index]),
k, k_main,
include_ix, n_include,
exclude_ix, n_exclude,
outp_ix, outp_score,
n_top, n, nthreads
);
}
int_t topN_old_collective_implicit
(
real_t *restrict a_vec,
real_t *restrict A, int_t row_index,
real_t *restrict B,
int_t k, int_t k_user, int_t k_item, int_t k_main,
int_t *restrict include_ix, int_t n_include,
int_t *restrict exclude_ix, int_t n_exclude,
int_t *restrict outp_ix, real_t *restrict outp_score,
int_t n_top, int_t n, int nthreads
)
{
return topN_old_collective_explicit(
a_vec, 0.,
A, (real_t*)NULL, row_index,
B,
(real_t*)NULL,
0.,
k, k_user, k_item, k_main,
include_ix, n_include,
exclude_ix, n_exclude,
outp_ix, outp_score,
n_top, n, n, false, nthreads
);
}
int_t topN_new_collective_explicit
(
/* inputs for the factors */
bool user_bias,
real_t *restrict u_vec, int_t p,
real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec,
real_t *restrict u_bin_vec, int_t pbin,
bool NA_as_zero_U, bool NA_as_zero_X,
bool nonneg,
real_t *restrict C, real_t *restrict Cb,
real_t glob_mean, real_t *restrict biasB,
real_t *restrict U_colmeans,
real_t *restrict Xa, int_t ixB[], size_t nnz,
real_t *restrict Xa_dense, int_t n,
real_t *restrict weight,
real_t *restrict B,
real_t *restrict Bi, bool add_implicit_features,
int_t k, int_t k_user, int_t k_item, int_t k_main,
real_t lam, real_t *restrict lam_unique,
real_t l1_lam, real_t *restrict l1_lam_unique,
bool scale_lam, bool scale_lam_sideinfo,
bool scale_bias_const, real_t scaling_biasA,
real_t w_main, real_t w_user, real_t w_implicit,
int_t n_max, bool include_all_X,
real_t *restrict BtB,
real_t *restrict TransBtBinvBt,
real_t *restrict BtXbias,
real_t *restrict BeTBeChol,
real_t *restrict BiTBi,
real_t *restrict CtCw,
real_t *restrict TransCtCinvCt,
real_t *restrict CtUbias,
real_t *restrict B_plus_bias,
/* inputs for topN */
int_t *restrict include_ix, int_t n_include,
int_t *restrict exclude_ix, int_t n_exclude,
int_t *restrict outp_ix, real_t *restrict outp_score,
int_t n_top, int nthreads
)
{
int_t retval = 0;
real_t *restrict a_vec = (real_t*)malloc((size_t)(k_user+k+k_main)
* sizeof(real_t));
real_t a_bias = 0.;
if (a_vec == NULL) goto throw_oom;
retval = factors_collective_explicit_single(
a_vec, user_bias? &a_bias : (real_t*)NULL,
u_vec, p,
u_vec_sp, u_vec_ixB, nnz_u_vec,
u_bin_vec, pbin,
NA_as_zero_U, NA_as_zero_X,
nonneg,
C, Cb,
glob_mean, biasB,
U_colmeans,
Xa, ixB, nnz,
Xa_dense, n,
weight,
B,
Bi, add_implicit_features,
k, k_user, k_item, k_main,
lam, lam_unique,
l1_lam, l1_lam_unique,
scale_lam, scale_lam_sideinfo,
scale_bias_const, scaling_biasA,
w_main, w_user, w_implicit,
n_max, include_all_X,
BtB,
TransBtBinvBt,
BtXbias,
BeTBeChol,
BiTBi,
CtCw,
TransCtCinvCt,
CtUbias,
B_plus_bias
);
if (retval == 1)
goto throw_oom;
else if (retval != 0)
goto cleanup;
retval = topN_old_collective_explicit(
a_vec, a_bias,
(real_t*)NULL, (real_t*)NULL, 0,
B,
biasB,
glob_mean,
k, k_user, k_item, k_main,
include_ix, n_include,
exclude_ix, n_exclude,
outp_ix, outp_score,
n_top, n, n_max, include_all_X, nthreads
);
cleanup:
free(a_vec);
return retval;
throw_oom:
{
retval = 1;
goto cleanup;
}
}
int_t topN_new_collective_implicit
(
/* inputs for the factors */
int_t n,
real_t *restrict u_vec, int_t p,
real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec,
bool NA_as_zero_U,
bool nonneg,
real_t *restrict U_colmeans,
real_t *restrict B, real_t *restrict C,
real_t *restrict Xa, int_t ixB[], size_t nnz,
int_t k, int_t k_user, int_t k_item, int_t k_main,
real_t lam, real_t l1_lam, real_t alpha, real_t w_main, real_t w_user,
real_t w_main_multiplier,
bool apply_log_transf,
real_t *restrict BeTBe,
real_t *restrict BtB,
real_t *restrict BeTBeChol,
real_t *restrict CtUbias,
/* inputs for topN */
int_t *restrict include_ix, int_t n_include,
int_t *restrict exclude_ix, int_t n_exclude,
int_t *restrict outp_ix, real_t *restrict outp_score,
int_t n_top, int nthreads
)
{
int_t retval = 0;
real_t *restrict a_vec = (real_t*)malloc((size_t)(k_user+k+k_main)
* sizeof(real_t));
if (a_vec == NULL) goto throw_oom;
retval = factors_collective_implicit_single(
a_vec,
u_vec, p,
u_vec_sp, u_vec_ixB, nnz_u_vec,
NA_as_zero_U,
nonneg,
U_colmeans,
B, n, C,
Xa, ixB, nnz,
k, k_user, k_item, k_main,
lam, l1_lam, alpha, w_main, w_user,
w_main_multiplier,
apply_log_transf,
BeTBe,
BtB,
BeTBeChol,
CtUbias
);
if (retval == 1)
goto throw_oom;
else if (retval != 0)
goto cleanup;
retval = topN_old_collective_implicit(
a_vec,
(real_t*)NULL, 0,
B,
k, k_user, k_item, k_main,
include_ix, n_include,
exclude_ix, n_exclude,
outp_ix, outp_score,
n_top, n, nthreads
);
cleanup:
free(a_vec);
return retval;
throw_oom:
{
retval = 1;
goto cleanup;
}
}
int_t predict_X_old_collective_explicit
(
int_t row[], int_t col[], real_t *restrict predicted, size_t n_predict,
real_t *restrict A, real_t *restrict biasA,
real_t *restrict B, real_t *restrict biasB,
real_t glob_mean,
int_t k, int_t k_user, int_t k_item, int_t k_main,
int_t m, int_t n_max,
int nthreads
)
{
predict_multiple(
A, k_user,
B, k_item,
biasA, biasB,
glob_mean,
k, k_main,
m, n_max,
row, col, n_predict,
predicted,
nthreads
);
for (size_t ix = 0; ix < n_predict; ix++)
{
predicted[ix]
=
#ifdef _FOR_R
(!ISNAN(predicted[ix]))?
#else
(!isnan(predicted[ix]))?
#endif
predicted[ix]
:
(glob_mean
+ ((biasA != NULL && row[ix] < m)? biasA[row[ix]] : 0.)
+ ((biasB != NULL && col[ix] < n_max)? biasB[col[ix]] : 0.));
}
return 0;
}
int_t predict_X_old_collective_implicit
(
int_t row[], int_t col[], real_t *restrict predicted, size_t n_predict,
real_t *restrict A,
real_t *restrict B,
int_t k, int_t k_user, int_t k_item, int_t k_main,
int_t m, int_t n,
int nthreads
)
{
predict_multiple(
A, k_user,
B, k_item,
(real_t*)NULL, (real_t*)NULL,
0.,
k, k_main,
m, n,
row, col, n_predict,
predicted,
nthreads
);
return 0;
}
int_t predict_X_new_collective_explicit
(
/* inputs for predictions */
int_t m_new,
int_t row[], int_t col[], real_t *restrict predicted, size_t n_predict,
int nthreads,
/* inputs for factors */
bool user_bias,
real_t *restrict U, int_t m_u, int_t p,
bool NA_as_zero_U, bool NA_as_zero_X,
bool nonneg,
int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U,
size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr,
real_t *restrict Ub, int_t m_ubin, int_t pbin,
real_t *restrict C, real_t *restrict Cb,
real_t glob_mean, real_t *restrict biasB,
real_t *restrict U_colmeans,
real_t *restrict X, int_t ixA[], int_t ixB[], size_t nnz,
size_t *restrict Xcsr_p, int_t *restrict Xcsr_i, real_t *restrict Xcsr,
real_t *restrict Xfull, int_t n,
real_t *restrict weight,
real_t *restrict B,
real_t *restrict Bi, bool add_implicit_features,
int_t k, int_t k_user, int_t k_item, int_t k_main,
real_t lam, real_t *restrict lam_unique,
real_t l1_lam, real_t *restrict l1_lam_unique,
bool scale_lam, bool scale_lam_sideinfo,
bool scale_bias_const, real_t scaling_biasA,
real_t w_main, real_t w_user, real_t w_implicit,
int_t n_max, bool include_all_X,
real_t *restrict BtB,
real_t *restrict TransBtBinvBt,
real_t *restrict BtXbias,
real_t *restrict BeTBeChol,
real_t *restrict BiTBi,
real_t *restrict TransCtCinvCt,
real_t *restrict CtCw,
real_t *restrict CtUbias,
real_t *restrict B_plus_bias
)
{
int_t retval = 0;
size_t m_max = max2(m_new, m_u);
real_t *restrict biasA = NULL;
real_t *restrict A = (real_t*)malloc(m_max * (size_t)(k_user+k+k_main)
* sizeof(real_t));
if (A == NULL) goto throw_oom;
if (user_bias) {
biasA = (real_t*)malloc(m_max * sizeof(real_t));
if (biasA == NULL) goto throw_oom;
}
retval = factors_collective_explicit_multiple(
A, biasA, m_new,
U, m_u, p,
NA_as_zero_U, NA_as_zero_X,
nonneg,
U_row, U_col, U_sp, nnz_U,
U_csr_p, U_csr_i, U_csr,
Ub, m_ubin, pbin,
C, Cb,
glob_mean, biasB,
U_colmeans,
X, ixA, ixB, nnz,
Xcsr_p, Xcsr_i, Xcsr,
Xfull, n,
weight,
B,
Bi, add_implicit_features,
k, k_user, k_item, k_main,
lam, lam_unique,
l1_lam, l1_lam_unique,
scale_lam, scale_lam_sideinfo,
scale_bias_const, scaling_biasA,
w_main, w_user, w_implicit,
n_max, include_all_X,
BtB,
TransBtBinvBt,
BtXbias,
BeTBeChol,
BiTBi,
TransCtCinvCt,
CtCw,
CtUbias,
B_plus_bias,
nthreads
);
if (retval != 0)
goto cleanup;
retval = predict_X_old_collective_explicit(
row, col, predicted, n_predict,
A, biasA,
B, biasB,
glob_mean,
k, k_user, k_item, k_main,
m_max, n_max,
nthreads
);
if (retval != 0)
goto cleanup;
for (size_t ix = 0; ix < n_predict; ix++)
{
predicted[ix]
=
#ifdef _FOR_R
(!ISNAN(predicted[ix]))?
#else
(!isnan(predicted[ix]))?
#endif
predicted[ix]
:
(glob_mean
+ ((biasA != NULL && row[ix] < m_new)? biasA[row[ix]] : 0.)
+ ((biasB != NULL && col[ix] < n_max)? biasB[col[ix]] : 0.));
}
cleanup:
free(A);
free(biasA);
return retval;
throw_oom:
{
retval = 1;
goto cleanup;
}
}
int_t predict_X_new_collective_implicit
(
/* inputs for predictions */
int_t m_new,
int_t row[], int_t col[], real_t *restrict predicted, size_t n_predict,
int nthreads,
/* inputs for factors */
real_t *restrict U, int_t m_u, int_t p,
bool NA_as_zero_U,
bool nonneg,
int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U,
size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr,
real_t *restrict X, int_t ixA[], int_t ixB[], size_t nnz,
size_t *restrict Xcsr_p, int_t *restrict Xcsr_i, real_t *restrict Xcsr,
real_t *restrict B, int_t n,
real_t *restrict C,
real_t *restrict U_colmeans,
int_t k, int_t k_user, int_t k_item, int_t k_main,
real_t lam, real_t l1_lam, real_t alpha, real_t w_main, real_t w_user,
real_t w_main_multiplier,
bool apply_log_transf,
real_t *restrict BeTBe,
real_t *restrict BtB,
real_t *restrict BeTBeChol,
real_t *restrict CtUbias
)
{
int_t retval = 0;
size_t m_max = max2(m_new, m_u);
real_t *restrict A = (real_t*)malloc(m_max * (size_t)(k_user+k+k_main)
* sizeof(real_t));
if (A == NULL) goto throw_oom;
retval = factors_collective_implicit_multiple(
A, m_new,
U, m_u, p,
NA_as_zero_U,
nonneg,
U_row, U_col, U_sp, nnz_U,
U_csr_p, U_csr_i, U_csr,
X, ixA, ixB, nnz,
Xcsr_p, Xcsr_i, Xcsr,
B, n,
C,
U_colmeans,
k, k_user, k_item, k_main,
lam, l1_lam, alpha, w_main, w_user,
w_main_multiplier,
apply_log_transf,
BeTBe,
BtB,
BeTBeChol,
CtUbias,
nthreads
);
if (retval == 1)
goto throw_oom;
else if (retval != 0)
goto cleanup;
retval = predict_X_old_collective_implicit(
row, col, predicted, n_predict,
A,
B,
k, k_user, k_item, k_main,
m_max, n,
nthreads
);
cleanup:
free(A);
return retval;
throw_oom:
{
retval = 1;
goto cleanup;
}
}
|
GB_binop__islt_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_int32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_int32)
// A.*B function (eWiseMult): GB (_AemultB_03__islt_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_int32)
// A*D function (colscale): GB (_AxD__islt_int32)
// D*A function (rowscale): GB (_DxB__islt_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_int32)
// C=scalar+B GB (_bind1st__islt_int32)
// C=scalar+B' GB (_bind1st_tran__islt_int32)
// C=A+scalar GB (_bind2nd__islt_int32)
// C=A'+scalar GB (_bind2nd_tran__islt_int32)
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_INT32 || GxB_NO_ISLT_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__islt_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__islt_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__islt_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = Bx [p] ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
TimeDiscretization.h | /**
* @file TimeDiscretization.h
* @author N. Fottner
* @date 13/05/19
*/
#pragma once
#include "autopas/AutoPasDecl.h"
#include "autopas/utils/ArrayMath.h"
/**
* Functions for updating velocities and positions as simulation time progresses.
*/
namespace TimeDiscretization {
/**
* Calculate and update the position for every particle using the Störmer-Verlet Algorithm.
* @param autopas
* @param particlePropertiesLibrary
* @param deltaT time step width
*/
template <class AutoPasTemplate, class ParticlePropertiesLibraryTemplate>
void calculatePositions(AutoPasTemplate &autopas, const ParticlePropertiesLibraryTemplate &particlePropertiesLibrary,
const double deltaT) {
// helper declarations for operations with vector
using autopas::utils::ArrayMath::add;
using autopas::utils::ArrayMath::mulScalar;
#ifdef AUTOPAS_OPENMP
#pragma omp parallel
#endif
for (auto iter = autopas.begin(autopas::IteratorBehavior::owned); iter.isValid(); ++iter) {
auto v = iter->getV();
auto m = particlePropertiesLibrary.getMass(iter->getTypeId());
auto f = iter->getF();
iter->setOldF(f);
iter->setF({0., 0., 0.});
v = mulScalar(v, deltaT);
f = mulScalar(f, (deltaT * deltaT / (2 * m)));
auto newR = add(v, f);
iter->addR(newR);
}
}
/**
* Calculate and update the velocity for every particle using the the Störmer-Verlet Algorithm.
* @param autopas
* @param particlePropertiesLibrary
* @param deltaT time step width
*/
template <class AutoPasTemplate, class ParticlePropertiesLibraryTemplate>
void calculateVelocities(AutoPasTemplate &autopas, const ParticlePropertiesLibraryTemplate &particlePropertiesLibrary,
const double deltaT) {
// helper declarations for operations with vector
using autopas::utils::ArrayMath::add;
using autopas::utils::ArrayMath::mulScalar;
#ifdef AUTOPAS_OPENMP
#pragma omp parallel
#endif
for (auto iter = autopas.begin(autopas::IteratorBehavior::owned); iter.isValid(); ++iter) {
auto m = particlePropertiesLibrary.getMass(iter->getTypeId());
auto force = iter->getF();
auto oldForce = iter->getOldf();
auto newV = mulScalar((add(force, oldForce)), deltaT / (2 * m));
iter->addV(newV);
}
}
/**
* Calculate and update the position for every DEM particle using the Störmer-Verlet Algorithm.
* @param autopas
* @param deltaT time step width
*/
template <class AutoPasTemplate, class ParticlePropertiesLibraryTemplate>
void calculatePositionsDEM(AutoPasTemplate &autopas, const ParticlePropertiesLibraryTemplate &particlePropertiesLibrary,
const double deltaT) {
// helper declarations for operations with vector
using autopas::utils::ArrayMath::add;
using autopas::utils::ArrayMath::mulScalar;
#ifdef AUTOPAS_OPENMP
#pragma omp parallel
#endif
for (auto iter = autopas.begin(autopas::IteratorBehavior::owned); iter.isValid(); ++iter) {
auto v = iter->getV();
auto m = iter->getMass();
auto f = iter->getF();
iter->setOldF(f);
iter->setF({0., 0., 0.});
if(m < 1000){
v = mulScalar(v, deltaT);
f = mulScalar(f, (deltaT * deltaT / (2 * m)));
auto newR = add(v, f);
iter->addR(newR);
}
}
}
/**
* Calculate and update the velocity for every DEM particle using the the Störmer-Verlet Algorithm.
* @param autopas
* @param particlePropertiesLibrary
* @param deltaT time step width
*/
template <class AutoPasTemplate, class ParticlePropertiesLibraryTemplate>
void calculateVelocitiesDEM(AutoPasTemplate &autopas, const ParticlePropertiesLibraryTemplate &particlePropertiesLibrary,
const double deltaT) {
// helper declarations for operations with vector
using autopas::utils::ArrayMath::add;
using autopas::utils::ArrayMath::mulScalar;
#ifdef AUTOPAS_OPENMP
#pragma omp parallel
#endif
for (auto iter = autopas.begin(autopas::IteratorBehavior::owned); iter.isValid(); ++iter) {
auto m = iter->getMass();
if(m < 1000){
auto force = iter->getF();
auto oldForce = iter->getOldf();
auto newV = mulScalar((add(force, oldForce)), deltaT / (2 * m));
iter->addV(newV);
}
}
}
} // namespace TimeDiscretization
|
State.h | //===-------- State.h - OpenMP State & ICV interface ------------- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//
//===----------------------------------------------------------------------===//
#ifndef OMPTARGET_STATE_H
#define OMPTARGET_STATE_H
#include "Debug.h"
#include "Types.h"
#pragma omp begin declare target device_type(nohost)
namespace _OMP {
namespace state {
inline constexpr uint32_t SharedScratchpadSize = SHARED_SCRATCHPAD_SIZE;
/// Initialize the state machinery. Must be called by all threads.
void init(bool IsSPMD);
/// TODO
enum ValueKind {
VK_NThreads,
VK_Level,
VK_ActiveLevel,
VK_MaxActiveLevels,
VK_RunSched,
// ---
VK_RunSchedChunk,
VK_ParallelRegionFn,
VK_ParallelTeamSize,
};
/// TODO
void enterDataEnvironment(IdentTy *Ident);
/// TODO
void exitDataEnvironment();
/// TODO
struct DateEnvironmentRAII {
DateEnvironmentRAII(IdentTy *Ident) { enterDataEnvironment(Ident); }
~DateEnvironmentRAII() { exitDataEnvironment(); }
};
/// TODO
void resetStateForThread(uint32_t TId);
uint32_t &lookup32(ValueKind VK, bool IsReadonly, IdentTy *Ident);
void *&lookupPtr(ValueKind VK, bool IsReadonly);
/// A class without actual state used to provide a nice interface to lookup and
/// update ICV values we can declare in global scope.
template <typename Ty, ValueKind Kind> struct Value {
__attribute__((flatten, always_inline)) operator Ty() {
return lookup(/* IsReadonly */ true, /* IdentTy */ nullptr);
}
__attribute__((flatten, always_inline)) Value &operator=(const Ty &Other) {
set(Other, /* IdentTy */ nullptr);
return *this;
}
__attribute__((flatten, always_inline)) Value &operator++() {
inc(1, /* IdentTy */ nullptr);
return *this;
}
__attribute__((flatten, always_inline)) Value &operator--() {
inc(-1, /* IdentTy */ nullptr);
return *this;
}
private:
__attribute__((flatten, always_inline)) Ty &lookup(bool IsReadonly,
IdentTy *Ident) {
Ty &t = lookup32(Kind, IsReadonly, Ident);
return t;
}
__attribute__((flatten, always_inline)) Ty &inc(int UpdateVal,
IdentTy *Ident) {
return (lookup(/* IsReadonly */ false, Ident) += UpdateVal);
}
__attribute__((flatten, always_inline)) Ty &set(Ty UpdateVal,
IdentTy *Ident) {
return (lookup(/* IsReadonly */ false, Ident) = UpdateVal);
}
template <typename VTy, typename Ty2> friend struct ValueRAII;
};
/// A mookup class without actual state used to provide
/// a nice interface to lookup and update ICV values
/// we can declare in global scope.
template <typename Ty, ValueKind Kind> struct PtrValue {
__attribute__((flatten, always_inline)) operator Ty() {
return lookup(/* IsReadonly */ true, /* IdentTy */ nullptr);
}
__attribute__((flatten, always_inline)) PtrValue &operator=(const Ty Other) {
set(Other);
return *this;
}
private:
Ty &lookup(bool IsReadonly, IdentTy *) { return lookupPtr(Kind, IsReadonly); }
Ty &set(Ty UpdateVal) {
return (lookup(/* IsReadonly */ false, /* IdentTy */ nullptr) = UpdateVal);
}
template <typename VTy, typename Ty2> friend struct ValueRAII;
};
template <typename VTy, typename Ty> struct ValueRAII {
ValueRAII(VTy &V, Ty NewValue, Ty OldValue, bool Active, IdentTy *Ident)
: Ptr(Active ? &V.lookup(/* IsReadonly */ false, Ident) : nullptr),
Val(OldValue), Active(Active) {
if (!Active)
return;
ASSERT(*Ptr == OldValue &&
"ValueRAII initialization with wrong old value!");
*Ptr = NewValue;
}
~ValueRAII() {
if (Active)
*Ptr = Val;
}
private:
Ty *Ptr;
Ty Val;
bool Active;
};
/// TODO
inline state::Value<uint32_t, state::VK_RunSchedChunk> RunSchedChunk;
/// TODO
inline state::Value<uint32_t, state::VK_ParallelTeamSize> ParallelTeamSize;
/// TODO
inline state::PtrValue<ParallelRegionFnTy, state::VK_ParallelRegionFn>
ParallelRegionFn;
void runAndCheckState(void(Func(void)));
void assumeInitialState(bool IsSPMD);
} // namespace state
namespace icv {
/// TODO
inline state::Value<uint32_t, state::VK_NThreads> NThreads;
/// TODO
inline state::Value<uint32_t, state::VK_Level> Level;
/// The `active-level` describes which of the parallel level counted with the
/// `level-var` is active. There can only be one.
///
/// active-level-var is 1, if ActiveLevelVar is not 0, otherweise it is 0.
inline state::Value<uint32_t, state::VK_ActiveLevel> ActiveLevel;
/// TODO
inline state::Value<uint32_t, state::VK_MaxActiveLevels> MaxActiveLevels;
/// TODO
inline state::Value<uint32_t, state::VK_RunSched> RunSched;
} // namespace icv
namespace memory {
/// Alloca \p Size bytes in shared memory, if possible, for \p Reason.
///
/// Note: See the restrictions on __kmpc_alloc_shared for proper usage.
void *allocShared(uint64_t Size, const char *Reason);
/// Free \p Ptr, alloated via allocShared, for \p Reason.
///
/// Note: See the restrictions on __kmpc_free_shared for proper usage.
void freeShared(void *Ptr, uint64_t Bytes, const char *Reason);
/// Alloca \p Size bytes in global memory, if possible, for \p Reason.
void *allocGlobal(uint64_t Size, const char *Reason);
/// Return a pointer to the dynamic shared memory buffer.
void *getDynamicBuffer();
/// Free \p Ptr, alloated via allocGlobal, for \p Reason.
void freeGlobal(void *Ptr, const char *Reason);
} // namespace memory
} // namespace _OMP
#pragma omp end declare target
#endif
|
GB_binop__max_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__max_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__max_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__max_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_uint32)
// A*D function (colscale): GB (_AxD__max_uint32)
// D*A function (rowscale): GB (_DxB__max_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__max_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__max_uint32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_uint32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_uint32)
// C=scalar+B GB (_bind1st__max_uint32)
// C=scalar+B' GB (_bind1st_tran__max_uint32)
// C=A+scalar GB (_bind2nd__max_uint32)
// C=A'+scalar GB (_bind2nd_tran__max_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMAX (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_UINT32 || GxB_NO_MAX_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__max_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
5490.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp parallel for private(j) collapse(2) schedule(static, 16) num_threads(1)
for (i = 1; i < _PB_NI - 1; ++i)
{
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
reduction-8.c | struct A { int t; };
struct B { char t; };
struct C { unsigned long long t; };
struct D { long t; };
void
add (struct B *x, struct B *y)
{
x->t += y->t;
}
void
zero (struct B *x)
{
x->t = 0;
}
void
orit (struct C *x, struct C *y)
{
y->t |= x->t;
}
#pragma omp declare reduction(+:struct A:omp_out.t += omp_in.t)
#pragma omp declare reduction(+:struct B:add (&omp_out, &omp_in)) initializer(zero (&omp_priv))
#pragma omp declare reduction(*:struct A:omp_out.t *= omp_in.t) initializer(omp_priv = { 1 })
#pragma omp declare reduction(|:struct C:orit (&omp_in, &omp_out))
#pragma omp declare reduction(&:struct D:omp_out.t = omp_out.t & omp_in.t) initializer(omp_priv = { ~0L })
#pragma omp declare reduction(maxb:short:omp_out = omp_in > omp_out ? omp_in : omp_out) initializer(omp_priv = -6)
struct B z[10];
__attribute__((noinline, noclone)) void
foo (struct A (*x)[3][2], struct A *y, struct D w[1][2])
{
struct C a[9] = {};
short b[5] = {};
int i;
#pragma omp parallel for reduction(+:x[0:2][:][0:2], z[:4]) \
reduction(*:y[:3]) reduction(|:a[:4]) \
reduction(&:w[0:1][:2]) reduction(maxb:b)
for (i = 0; i < 128; i++)
{
x[i / 64][i % 3][(i / 4) & 1].t += i;
if ((i & 15) == 1)
y[0].t *= 3;
if ((i & 31) == 2)
y[1].t *= 7;
if ((i & 63) == 3)
y[2].t *= 17;
z[i / 32].t += (i & 3);
if (i < 4)
z[i].t += i;
a[i / 32].t |= 1ULL << (i & 30);
w[0][i & 1].t &= ~(1L << (i / 17 * 3));
if ((i % 79) > b[0])
b[0] = i % 79;
if ((i % 13) > b[1])
b[1] = i % 13;
if ((i % 23) > b[2])
b[2] = i % 23;
if ((i % 85) > b[3])
b[3] = i % 85;
if ((i % 192) > b[4])
b[4] = i % 192;
}
for (i = 0; i < 9; i++)
if (a[i].t != (i < 4 ? 0x55555555ULL : 0))
__builtin_abort ();
if (b[0] != 78 || b[1] != 12 || b[2] != 22 || b[3] != 84 || b[4] != 127)
__builtin_abort ();
}
int
main ()
{
struct A a[4][3][2] = {};
static int a2[4][3][2] = {{{ 0, 0 }, { 0, 0 }, { 0, 0 }},
{{ 312, 381 }, { 295, 356 }, { 337, 335 }},
{{ 1041, 975 }, { 1016, 1085 }, { 935, 1060 }},
{{ 0, 0 }, { 0, 0 }, { 0, 0 }}};
struct A y[5] = { { 0 }, { 1 }, { 1 }, { 1 }, { 0 } };
int y2[5] = { 0, 6561, 2401, 289, 0 };
char z2[10] = { 48, 49, 50, 51, 0, 0, 0, 0, 0, 0 };
struct D w[1][2] = { { { ~0L }, { ~0L } } };
foo (&a[1], y + 1, w);
int i, j, k;
for (i = 0; i < 4; i++)
for (j = 0; j < 3; j++)
for (k = 0; k < 2; k++)
if (a[i][j][k].t != a2[i][j][k])
__builtin_abort ();
for (i = 0; i < 5; i++)
if (y[i].t != y2[i])
__builtin_abort ();
for (i = 0; i < 10; i++)
if (z[i].t != z2[i])
__builtin_abort ();
if (w[0][0].t != ~0x249249L || w[0][1].t != ~0x249249L)
__builtin_abort ();
return 0;
}
|
GB_unop__identity_uint16_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint16_fc32)
// op(A') function: GB (_unop_tran__identity_uint16_fc32)
// C type: uint16_t
// A type: GxB_FC32_t
// cast: uint16_t cij = GB_cast_to_uint16_t ((double) crealf (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = GB_cast_to_uint16_t ((double) crealf (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = GB_cast_to_uint16_t ((double) crealf (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint16_fc32)
(
uint16_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
uint16_t z = GB_cast_to_uint16_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
uint16_t z = GB_cast_to_uint16_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint16_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pr34610.c | /* PR gcov-profile/34610 */
/* { dg-do compile } */
/* { dg-options "-O2 -fprofile-arcs -fopenmp" } */
/* { dg-require-profiling "-fprofile-generate" } */
extern void bar (int);
extern void baz (int) __attribute__((noreturn));
void
foo (int k)
{
int i;
#pragma omp for schedule(dynamic)
for (i = 0; i < 10; ++i)
bar (i);
#pragma omp parallel for schedule(static)
for (i = 0; i < 10; ++i)
bar (i);
#pragma omp parallel for schedule(static, 4)
for (i = 0; i < 10; ++i)
bar (i);
if (k)
#pragma omp for schedule(dynamic)
for (i = 0; i < 10; ++i)
baz (i);
#pragma omp parallel
for (i = 0; i < 10; ++i)
bar (i);
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 8;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(3*t1,2)),ceild(24*t2-Nz+5,8)),3*t1-3*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(12*t1+Ny+15,8)),floord(24*t2+Ny+11,8)),floord(24*t1-24*t2+Nz+Ny+13,8));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-14,16)),ceild(3*t1-30,32)),ceild(24*t2-Nz-115,128)),ceild(8*t3-Ny-115,128));t4<=min(min(min(min(floord(4*Nt+Nx-9,128),floord(12*t1+Nx+15,128)),floord(24*t2+Nx+11,128)),floord(8*t3+Nx-5,128)),floord(24*t1-24*t2+Nz+Nx+13,128));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(128*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),2*t3),Nt-1),3*t1+5),6*t2+4),32*t4+30);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) {
lbv=max(128*t4,4*t5+4);
ubv=min(128*t4+127,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
sw-full-cs.c | /* $Id: sw-full-cs.c,v 1.15 2009/06/16 23:26:21 rumble Exp $ */
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <math.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <zlib.h>
#include <limits.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include "../common/util.h"
#include "../common/fasta.h"
#include "../common/sw-full-common.h"
#include "../common/sw-full-cs.h"
#include "../common/time_counter.h"
typedef struct swcell {
struct {
int score_n;
int score_w;
int score_nw;
int8_t back_n;
int8_t back_w;
int8_t back_nw;
} from[4];
} swcell;
#define FROM_A 0x00
#define FROM_B 0x01
#define FROM_C 0x02
#define FROM_D 0x03
#define FROM_NORTH_NORTH 0x01
#define FROM_NORTH_NORTHWEST 0x02
#define FROM_WEST_NORTHWEST 0x03
#define FROM_WEST_WEST 0x04
#define FROM_NORTHWEST_NORTH 0x05
#define FROM_NORTHWEST_NORTHWEST 0x06
#define FROM_NORTHWEST_WEST 0x07
#define FROM_x(_mat, _dir) (int8_t)(((_dir) << 2) | (_mat))
#define FROM_A_NORTH_NORTH FROM_x(FROM_A, FROM_NORTH_NORTH)
#define FROM_A_NORTH_NORTHWEST FROM_x(FROM_A, FROM_NORTH_NORTHWEST)
#define FROM_A_WEST_NORTHWEST FROM_x(FROM_A, FROM_WEST_NORTHWEST)
#define FROM_A_WEST_WEST FROM_x(FROM_A, FROM_WEST_WEST)
#define FROM_A_NORTHWEST_NORTH FROM_x(FROM_A, FROM_NORTHWEST_NORTH)
#define FROM_A_NORTHWEST_NORTHWEST FROM_x(FROM_A, FROM_NORTHWEST_NORTHWEST)
#define FROM_A_NORTHWEST_WEST FROM_x(FROM_A, FROM_NORTHWEST_WEST)
#define FROM_B_NORTH_NORTH FROM_x(FROM_B, FROM_NORTH_NORTH)
#define FROM_B_NORTH_NORTHWEST FROM_x(FROM_B, FROM_NORTH_NORTHWEST)
#define FROM_B_WEST_NORTHWEST FROM_x(FROM_B, FROM_WEST_NORTHWEST)
#define FROM_B_WEST_WEST FROM_x(FROM_B, FROM_WEST_WEST)
#define FROM_B_NORTHWEST_NORTH FROM_x(FROM_B, FROM_NORTHWEST_NORTH)
#define FROM_B_NORTHWEST_NORTHWEST FROM_x(FROM_B, FROM_NORTHWEST_NORTHWEST)
#define FROM_B_NORTHWEST_WEST FROM_x(FROM_B, FROM_NORTHWEST_WEST)
#define FROM_C_NORTH_NORTH FROM_x(FROM_C, FROM_NORTH_NORTH)
#define FROM_C_NORTH_NORTHWEST FROM_x(FROM_C, FROM_NORTH_NORTHWEST)
#define FROM_C_WEST_NORTHWEST FROM_x(FROM_C, FROM_WEST_NORTHWEST)
#define FROM_C_WEST_WEST FROM_x(FROM_C, FROM_WEST_WEST)
#define FROM_C_NORTHWEST_NORTH FROM_x(FROM_C, FROM_NORTHWEST_NORTH)
#define FROM_C_NORTHWEST_NORTHWEST FROM_x(FROM_C, FROM_NORTHWEST_NORTHWEST)
#define FROM_C_NORTHWEST_WEST FROM_x(FROM_C, FROM_NORTHWEST_WEST)
#define FROM_D_NORTH_NORTH FROM_x(FROM_D, FROM_NORTH_NORTH)
#define FROM_D_NORTH_NORTHWEST FROM_x(FROM_D, FROM_NORTH_NORTHWEST)
#define FROM_D_WEST_NORTHWEST FROM_x(FROM_D, FROM_WEST_NORTHWEST)
#define FROM_D_WEST_WEST FROM_x(FROM_D, FROM_WEST_WEST)
#define FROM_D_NORTHWEST_NORTH FROM_x(FROM_D, FROM_NORTHWEST_NORTH)
#define FROM_D_NORTHWEST_NORTHWEST FROM_x(FROM_D, FROM_NORTHWEST_NORTHWEST)
#define FROM_D_NORTHWEST_WEST FROM_x(FROM_D, FROM_NORTHWEST_WEST)
enum {
BACK_INSERTION = 1,
BACK_A_DELETION,
BACK_B_DELETION,
BACK_C_DELETION,
BACK_D_DELETION,
BACK_A_MATCH_MISMATCH,
BACK_B_MATCH_MISMATCH,
BACK_C_MATCH_MISMATCH,
BACK_D_MATCH_MISMATCH
};
static int initialised;
static int8_t *db, *qr[4];
static int dblen, qrlen;
static int a_gap_open, a_gap_ext, b_gap_open, b_gap_ext;
static int match, mismatch;
static int global_xover_penalty;
static struct swcell *swmatrix;
static uint8_t *backtrace;
static char *dbalign, *qralign;
static int anchor_width;
static int indel_taboo_len;
/* statistics */
static uint64_t swcells, swinvocs;
static time_counter sw_tc;
#pragma omp threadprivate(initialised,db,qr,dblen,qrlen,a_gap_open,a_gap_ext,b_gap_open,b_gap_ext,match,mismatch,global_xover_penalty,\
swmatrix,backtrace,dbalign,qralign,sw_tc,swcells,swinvocs,indel_taboo_len)
#define BT_CROSSOVER 0x80
#define BT_CLIPPED 0xf0
#define BT_ISCROSSOVER(_x) ((_x) & BT_CROSSOVER)
#define BT_TYPE(_x) ((_x) & 0x0f)
#ifdef DEBUG_CROSSOVERS
static int _glen;
static int _rlen;
#endif
#ifdef DEBUG_SW
static void print_sw(int lena, int lenb) {
printf("len a %d, len b %d\n",lena,lenb);
int k;
for (k=0; k<4; k++) {
int i,j;
printf(" %5s ","-");
for (j=1; j< lena+1; j++) {
printf("%5c ",base_translate(db[j-1],false));
}
printf("\n");
//rows
for (i=0; i<lenb+1; i++) {
//cols
if (i==0) {
printf(" - ");
} else {
printf("%5c ",base_translate(qr[k][i-1],false));
}
for (j=0; j<lena+1; j++) {
swcell curr=swmatrix[i*(lena+1)+j];
int tmp=0;
tmp=MAX(curr.from[k].score_n,curr.from[k].score_w);
tmp=MAX(tmp,curr.from[k].score_nw);
if (tmp<-99) {
tmp=-99;
}
printf("%5d ",tmp);
}
printf("\n");
}
}
}
#endif
/*
static void print_sw_backtrace(int lena, int lenb) {
int i,j;
printf(" %5s ","-");
for (j=1; j< lenb+1; j++) {
printf("%5c ",base_translate(qr[j-1],false));
}
printf("\n");
//rows
for (i=0; i<lena+1; i++) {
//cols
if (i==0) {
printf(" - ");
} else {
printf("%5c ",base_translate(db[i-1],false));
}
for (j=0; j<lenb+1; j++) {
swcell curr=swmatrix[j*(lena+1)+i];
int btrace[3]={0,0,0};
int maxscore=0;
maxscore=MAX(curr.score_north,curr.score_west);
maxscore=MAX(maxscore,curr.score_northwest);
if (curr.score_west==maxscore) {
btrace[0]=curr.back_west;
}
if (curr.score_northwest==maxscore) {
btrace[1]=curr.back_northwest;
}
if (curr.score_north==maxscore) {
btrace[2]=curr.back_north;
}
printf("%d/%d/%d ",btrace[0],btrace[1],btrace[2]);
1}
printf("\n");
}
}*/
inline static void
init_cell(int idx, int local_alignment, int xover_penalty) {
if (local_alignment) {
swmatrix[idx].from[0].score_nw = 0;
swmatrix[idx].from[0].score_n = -b_gap_open;
swmatrix[idx].from[0].score_w = -a_gap_open;
swmatrix[idx].from[1].score_nw = xover_penalty;
swmatrix[idx].from[1].score_n = -b_gap_open + xover_penalty;
swmatrix[idx].from[1].score_w = -a_gap_open + xover_penalty;
swmatrix[idx].from[2].score_nw = xover_penalty;
swmatrix[idx].from[2].score_n = -b_gap_open + xover_penalty;
swmatrix[idx].from[2].score_w = -a_gap_open + xover_penalty;
swmatrix[idx].from[3].score_nw = xover_penalty;
swmatrix[idx].from[3].score_n = -b_gap_open + xover_penalty;
swmatrix[idx].from[3].score_w = -a_gap_open + xover_penalty;
} else {
swmatrix[idx].from[0].score_nw = -INT_MAX/2;
swmatrix[idx].from[0].score_n = -INT_MAX/2;
swmatrix[idx].from[0].score_w = -INT_MAX/2;
swmatrix[idx].from[1].score_nw = -INT_MAX/2;
swmatrix[idx].from[1].score_n = -INT_MAX/2;
swmatrix[idx].from[1].score_w = -INT_MAX/2;
swmatrix[idx].from[2].score_nw = -INT_MAX/2;
swmatrix[idx].from[2].score_n = -INT_MAX/2;
swmatrix[idx].from[2].score_w = -INT_MAX/2;
swmatrix[idx].from[3].score_nw = -INT_MAX/2;
swmatrix[idx].from[3].score_n = -INT_MAX/2;
swmatrix[idx].from[3].score_w = -INT_MAX/2;
}
swmatrix[idx].from[0].back_nw = 0;
swmatrix[idx].from[0].back_n = 0;
swmatrix[idx].from[0].back_w = 0;
swmatrix[idx].from[1].back_nw = 0;
swmatrix[idx].from[1].back_n = 0;
swmatrix[idx].from[1].back_w = 0;
swmatrix[idx].from[2].back_nw = 0;
swmatrix[idx].from[2].back_n = 0;
swmatrix[idx].from[2].back_w = 0;
swmatrix[idx].from[3].back_nw = 0;
swmatrix[idx].from[3].back_n = 0;
swmatrix[idx].from[3].back_w = 0;
}
/*
* Perform a full Smith-Waterman alignment. For the colour case, this means
* computing each possible letter space read string and doing a four layer
* scan.
*/
//lena - genome length
static int
full_sw(int lena, int lenb, int threshscore, int *iret, int *jret,
int *kret, bool revcmpl,
struct anchor * anchors, int anchors_cnt, int local_alignment, int * crossover_score)
{
int i, j, k, l, max_i, max_j, max_k;
int score, ms, tmp, resetval, xover_penalty;
//int go, ge;
//int sw_band, ne_band;
int8_t tmp2;
struct anchor rectangle;
/* shut up gcc */
max_i = max_j = max_k = j = 0;
score = 0;
//go = gap_open;
//ge = gap_ext;
for (j = 0; j < lena + 1; j++) {
init_cell(j, 1, global_xover_penalty);
}
//for (j = 0; j < lenb + 1; j++) {
//init_cell(j * (lena + 1));
//}
/*
* Figure out our band.
* We can actually skip computation of a significant number of
* cells, which could never be part of an alignment corresponding
* to our threshhold score.
*/
//sw_band = ((lenb * match - threshscore + match - 1) / match) + 1;
//ne_band = lena - (lenb - sw_band);
if (anchors != NULL && anchor_width >= 0) {
anchor_join(anchors, anchors_cnt, &rectangle);
anchor_widen(&rectangle, anchor_width);
} else {
struct anchor tmp_anchors[2];
tmp_anchors[0].x = 0;
tmp_anchors[0].y = (lenb * match - threshscore) / match;
tmp_anchors[0].length = 1;
tmp_anchors[0].width = 1;
tmp_anchors[1].x = lena - 1;
tmp_anchors[1].y = lenb - 1 - tmp_anchors[0].y;
tmp_anchors[1].length = 1;
tmp_anchors[1].width = 1;
anchor_join(tmp_anchors, 2, &rectangle);
}
for (i = 0; i < lenb; i++) {
/*
* computing row i of virtual matrix, stored in row i+1
*/
int x_min, x_max;
xover_penalty = (crossover_score == NULL? global_xover_penalty : crossover_score[i]);
anchor_get_x_range(&rectangle, lena, lenb, i, &x_min, &x_max);
if (!local_alignment) {
//x_max=MIN(lena,x_max); x_min=MAX(0,x_min-lenb/40);
//init_cell(i * (lena + 1) + x_max + 1, 0);
//init_cell((i + 1) * (lena + 1) + (x_min - 1) + 1, x_min == 0 ? 1 : 0);
init_cell((i + 1) * (lena + 1) + (x_min - 1) + 1, 0, xover_penalty);
} else {
init_cell((i + 1) * (lena + 1) + (x_min - 1) + 1, 1, xover_penalty);
}
//if (x_min > 0) {
//fprintf(stderr,"INIT cell %d , %d, %d\n",i+1, (x_min-1)+1,x_max);
//}
swcells += x_max - x_min + 1;
for (j = x_min; j <= x_max; j++) {
/*
* computing column j of virtual matrix, stored in column j+1
*/
struct swcell *cell_nw, *cell_n, *cell_w, *cell_cur;
cell_nw = &swmatrix[i * (lena + 1) + j];
cell_n = cell_nw + 1;
cell_w = cell_nw + (lena + 1);
cell_cur = cell_w + 1;
/* banding */
//if (i >= sw_band + j) {
//memset(cell_cur, 0, sizeof(*cell_cur));
//continue;
//}
//if (j >= ne_band + i) {
//memset(cell_cur, 0, sizeof(*cell_cur));
//break;
//}
for (k = 0; k < 4; k++) {
if (k != 0)
resetval = xover_penalty;
else
resetval = 0;
/*
* northwest
*/
if (db[j] == BASE_N || qr[k][i] == BASE_N)
ms = 0;
else
ms = (db[j] == qr[k][i]) ? match : mismatch;
if (!revcmpl) {
tmp = cell_nw->from[k].score_nw + ms;
tmp2 = FROM_x(k, FROM_NORTHWEST_NORTHWEST);
// end of an insertion: not in taboo zone
if (i < lenb - indel_taboo_len && cell_nw->from[k].score_n + ms > tmp) {
tmp = cell_nw->from[k].score_n + ms;
tmp2 = FROM_x(k, FROM_NORTHWEST_NORTH);
}
// end of a deletion
if (cell_nw->from[k].score_w + ms > tmp) {
tmp = cell_nw->from[k].score_w + ms;
tmp2 = FROM_x(k, FROM_NORTHWEST_WEST);
}
} else {
//end of a deletion
tmp = cell_nw->from[k].score_w + ms;
tmp2 = FROM_x(k, FROM_NORTHWEST_WEST);
//end of an insertion: not in taboo zone
if (i < lenb - indel_taboo_len && cell_nw->from[k].score_n + ms > tmp) {
tmp = cell_nw->from[k].score_n + ms;
tmp2 = FROM_x(k, FROM_NORTHWEST_NORTH);
}
if (cell_nw->from[k].score_nw + ms > tmp) {
tmp = cell_nw->from[k].score_nw + ms;
tmp2 = FROM_x(k, FROM_NORTHWEST_NORTHWEST);
}
}
/* check neighbours */
for (l = 0; l < 4; l++) {
if (l == k)
continue;
if (!revcmpl) {
/* northwest */
if (cell_nw->from[l].score_nw + ms + xover_penalty > tmp) {
tmp = cell_nw->from[l].score_nw + ms + xover_penalty;
tmp2 = FROM_x(l, FROM_NORTHWEST_NORTHWEST);
}
/* north */ // end of insertion, not in taboo zone
if (i < lenb - indel_taboo_len && cell_nw->from[l].score_n + ms + xover_penalty > tmp) {
tmp = cell_nw->from[l].score_n + ms + xover_penalty;
tmp2 = FROM_x(l, FROM_NORTHWEST_NORTH);
}
/* west */
if (cell_nw->from[l].score_w + ms + xover_penalty > tmp) {
tmp = cell_nw->from[l].score_w + ms + xover_penalty;
tmp2 = FROM_x(l, FROM_NORTHWEST_WEST);
}
} else {
/* west */
if (cell_nw->from[l].score_w + ms + xover_penalty > tmp) {
tmp = cell_nw->from[l].score_w + ms + xover_penalty;
tmp2 = FROM_x(l, FROM_NORTHWEST_WEST);
}
/* north */ // end of insertion, not in taboo zone
if (i < lenb - indel_taboo_len && cell_nw->from[l].score_n + ms + xover_penalty > tmp) {
tmp = cell_nw->from[l].score_n + ms + xover_penalty;
tmp2 = FROM_x(l, FROM_NORTHWEST_NORTH);
}
/* northwest */
if (cell_nw->from[l].score_nw + ms + xover_penalty > tmp) {
tmp = cell_nw->from[l].score_nw + ms + xover_penalty;
tmp2 = FROM_x(l, FROM_NORTHWEST_NORTHWEST);
}
}
}
if (tmp <= resetval && local_alignment) {
tmp = resetval;
tmp2 = 0;
}
cell_cur->from[k].score_nw = tmp;
cell_cur->from[k].back_nw = tmp2;
/*
* north
*/
if (!revcmpl) {
// insertion start
tmp = cell_n->from[k].score_nw - b_gap_open - b_gap_ext;
tmp2 = FROM_x(k, FROM_NORTH_NORTHWEST);
if (!(i < lenb - indel_taboo_len) || cell_n->from[k].score_n - b_gap_ext > tmp) {
tmp = cell_n->from[k].score_n - b_gap_ext;
tmp2 = FROM_x(k, FROM_NORTH_NORTH);
}
} else {
tmp = cell_n->from[k].score_n - b_gap_ext;
tmp2 = FROM_x(k, FROM_NORTH_NORTH);
// insertion start
if (i < lenb - indel_taboo_len && cell_n->from[k].score_nw - b_gap_open - b_gap_ext > tmp) {
tmp = cell_n->from[k].score_nw - b_gap_open - b_gap_ext;
tmp2 = FROM_x(k, FROM_NORTH_NORTHWEST);
}
}
/* check neighbours */
for (l = 0; l < 4; l++) {
if (l == k)
continue;
if (!revcmpl) {
/* northwest */ // insertion start
if (i < lenb - indel_taboo_len && cell_n->from[l].score_nw - b_gap_open - b_gap_ext + xover_penalty > tmp) {
tmp = cell_n->from[l].score_nw - b_gap_open - b_gap_ext + xover_penalty;
tmp2 = FROM_x(l, FROM_NORTH_NORTHWEST);
}
/* north */
if (cell_n->from[l].score_n - b_gap_ext + xover_penalty > tmp) {
tmp = cell_n->from[l].score_n - b_gap_ext + xover_penalty;
tmp2 = FROM_x(l, FROM_NORTH_NORTH);
}
} else {
/* north */
if (cell_n->from[l].score_n - b_gap_ext + xover_penalty > tmp) {
tmp = cell_n->from[l].score_n - b_gap_ext + xover_penalty;
tmp2 = FROM_x(l, FROM_NORTH_NORTH);
}
/* northwest */ // insertion start
if (i < lenb - indel_taboo_len && cell_n->from[l].score_nw - b_gap_open - b_gap_ext + xover_penalty > tmp) {
tmp = cell_n->from[l].score_nw - b_gap_open - b_gap_ext + xover_penalty;
tmp2 = FROM_x(l, FROM_NORTH_NORTHWEST);
}
}
}
if (tmp <= resetval && local_alignment) {
tmp = resetval;
tmp2 = 0;
}
cell_cur->from[k].score_n = tmp;
cell_cur->from[k].back_n = tmp2;
/*
* west
*/
if (!revcmpl) {
// deletion start
tmp = cell_w->from[k].score_nw - a_gap_open - a_gap_ext;
tmp2 = FROM_x(k, FROM_WEST_NORTHWEST);
if (!(i < lenb - indel_taboo_len) || cell_w->from[k].score_w - a_gap_ext > tmp) {
tmp = cell_w->from[k].score_w - a_gap_ext;
tmp2 = FROM_x(k, FROM_WEST_WEST);
}
} else {
tmp = cell_w->from[k].score_w - a_gap_ext;
tmp2 = FROM_x(k, FROM_WEST_WEST);
// deletion start
if (i < lenb - indel_taboo_len && cell_w->from[k].score_nw - a_gap_open - a_gap_ext > tmp) {
tmp = cell_w->from[k].score_nw - a_gap_open - a_gap_ext;
tmp2 = FROM_x(k, FROM_WEST_NORTHWEST);
}
}
/*
* NB: It doesn't make sense to cross over on a
* genomic gap, so we won't.
*/
if (tmp <= resetval && local_alignment) {
tmp = resetval;
tmp2 = 0;
}
cell_cur->from[k].score_w = tmp;
cell_cur->from[k].back_w = tmp2;
/*
* max score
*/
if (local_alignment || i==lenb-1) {
if (!revcmpl) {
if (cell_cur->from[k].score_nw > score) {
score = cell_cur->from[k].score_nw;
max_i = i, max_j = j, max_k = k;
}
if (cell_cur->from[k].score_n > score) {
score = cell_cur->from[k].score_n;
max_i = i, max_j = j, max_k = k;
}
if (cell_cur->from[k].score_w > score) {
score = cell_cur->from[k].score_w;
max_i = i, max_j = j, max_k = k;
}
} else {
if (cell_cur->from[k].score_w > score) {
score = cell_cur->from[k].score_w;
max_i = i, max_j = j, max_k = k;
}
if (cell_cur->from[k].score_n > score) {
score = cell_cur->from[k].score_n;
max_i = i, max_j = j, max_k = k;
}
if (cell_cur->from[k].score_nw > score) {
score = cell_cur->from[k].score_nw;
max_i = i, max_j = j, max_k = k;
}
}
}
#ifdef DEBUG_SW
fprintf(stderr, "i:%d j:%d k:%d score_nw:%d [%u,%s] score_n:%d [%u,%s] score_w:%d [%u,%s] xover_penalty:%d\n", i+1, j+1, k,
cell_cur->from[k].score_nw, cell_cur->from[k].back_nw & 0x3,
(cell_cur->from[k].back_nw >> 2 == 0 ? "!" :
(cell_cur->from[k].back_nw >> 2 == FROM_NORTHWEST_NORTH ? "n" :
(cell_cur->from[k].back_nw >> 2 == FROM_NORTHWEST_NORTHWEST ? "nw" : "w"))),
cell_cur->from[k].score_n, cell_cur->from[k].back_n & 0x3,
(cell_cur->from[k].back_n >> 2 == 0 ? "!" :
(cell_cur->from[k].back_n >> 2 == FROM_NORTH_NORTH ? "n" : "nw")),
cell_cur->from[k].score_w, cell_cur->from[k].back_w & 0x3,
(cell_cur->from[k].back_w >> 2 == 0 ? "!" :
(cell_cur->from[k].back_w >> 2 == FROM_WEST_NORTHWEST ? "nw" : "w")),
xover_penalty);
#endif
}
}
if (i+1 < lenb) {
int next_x_min, next_x_max;
anchor_get_x_range(&rectangle, lena, lenb, i+1, &next_x_min, &next_x_max);
for (j = x_max + 1; j <= next_x_max; j++) {
//fprintf(stderr,"Init cell %d , , %d\n",i+1,j+1);
init_cell((i + 1) * (lena + 1) + (j + 1), local_alignment, xover_penalty); // still xover on i-th color
}
}
}
#ifdef DEBUG_SW
fprintf(stderr, "max_i:%d max_j:%d max_k:%d\n", max_i+1, max_j+1, max_k);
#endif
*iret = max_i;
*jret = max_j;
*kret = max_k;
//print_sw(lena,lenb);
return (score);
}
/*
* Fill in the backtrace in order to do a pretty printout.
*
* Returns the beginning matrix cell (i, j) in 'sfr->read_start' and
* 'sfr->genome_start'.
*
* The return value is the first valid offset in the backtrace buffer.
*/
static int
do_backtrace(int lena, int i, int j, int k, struct sw_full_results *sfr)
{
struct swcell *cell;
int off, from, fromscore;
off = (dblen + qrlen) - 1;
cell = &swmatrix[(i + 1) * (lena + 1) + j + 1];
from = cell->from[k].back_nw;
fromscore = cell->from[k].score_nw;
if (cell->from[k].score_w > fromscore) {
from = cell->from[k].back_w;
fromscore = cell->from[k].score_w;
}
if (cell->from[k].score_n > fromscore)
from = cell->from[k].back_n;
if (from == 0) {
int l, base;
fprintf(stderr, "Assertion failed.\nQr:");
for (l = 1, base = qr[0][0]; l < qrlen; l++) {
fprintf(stderr, "%d", lstocs(base, qr[0][l], false));
base = qr[0][l];
}
fprintf(stderr, "\n");
}
assert(from != 0);
/* fill out the backtrace */
while (i >= 0 && j >= 0) {
//printf("i %d, j %d\n",i,j);
assert(off >= 0);
cell = NULL;
/* common operations first */
switch (from) {
case FROM_A_NORTH_NORTH:
case FROM_A_NORTH_NORTHWEST:
case FROM_B_NORTH_NORTH:
case FROM_B_NORTH_NORTHWEST:
case FROM_C_NORTH_NORTH:
case FROM_C_NORTH_NORTHWEST:
case FROM_D_NORTH_NORTH:
case FROM_D_NORTH_NORTHWEST:
sfr->deletions++;
sfr->read_start = i--;
break;
case FROM_A_WEST_WEST:
case FROM_A_WEST_NORTHWEST:
case FROM_B_WEST_WEST:
case FROM_B_WEST_NORTHWEST:
case FROM_C_WEST_WEST:
case FROM_C_WEST_NORTHWEST:
case FROM_D_WEST_WEST:
case FROM_D_WEST_NORTHWEST:
sfr->insertions++;
sfr->genome_start = j--;
break;
case FROM_A_NORTHWEST_NORTH:
case FROM_A_NORTHWEST_NORTHWEST:
case FROM_A_NORTHWEST_WEST:
case FROM_B_NORTHWEST_NORTH:
case FROM_B_NORTHWEST_NORTHWEST:
case FROM_B_NORTHWEST_WEST:
case FROM_C_NORTHWEST_NORTH:
case FROM_C_NORTHWEST_NORTHWEST:
case FROM_C_NORTHWEST_WEST:
case FROM_D_NORTHWEST_NORTH:
case FROM_D_NORTHWEST_NORTHWEST:
case FROM_D_NORTHWEST_WEST:
if (db[j] == qr[k][i] || db[j] == BASE_N || qr[k][i] == BASE_N)
sfr->matches++;
else
sfr->mismatches++;
sfr->read_start = i--;
sfr->genome_start = j--;
break;
default:
fprintf(stderr, "INTERNAL ERROR: from = %d\n", from);
assert(0);
}
/* handle match/mismatch and north */
switch (from) {
case FROM_A_NORTH_NORTH:
case FROM_A_NORTH_NORTHWEST:
case FROM_B_NORTH_NORTH:
case FROM_B_NORTH_NORTHWEST:
case FROM_C_NORTH_NORTH:
case FROM_C_NORTH_NORTHWEST:
case FROM_D_NORTH_NORTH:
case FROM_D_NORTH_NORTHWEST:
switch(k) {
case 0:
backtrace[off]= BACK_A_DELETION;
break;
case 1:
backtrace[off]= BACK_B_DELETION;
break;
case 2:
backtrace[off]= BACK_C_DELETION;
break;
case 3:
backtrace[off]= BACK_D_DELETION;
break;
default:
fprintf(stderr, "INTERNAL ERROR: k = %d\n", k);
assert(0);
}
break;
case FROM_A_WEST_WEST:
case FROM_A_WEST_NORTHWEST:
case FROM_B_WEST_WEST:
case FROM_B_WEST_NORTHWEST:
case FROM_C_WEST_WEST:
case FROM_C_WEST_NORTHWEST:
case FROM_D_WEST_WEST:
case FROM_D_WEST_NORTHWEST:
/* doesn't make sense to cross over on a genomic gap */
backtrace[off] = BACK_INSERTION;
break;
case FROM_A_NORTHWEST_NORTH:
case FROM_A_NORTHWEST_NORTHWEST:
case FROM_A_NORTHWEST_WEST:
case FROM_B_NORTHWEST_NORTH:
case FROM_B_NORTHWEST_NORTHWEST:
case FROM_B_NORTHWEST_WEST:
case FROM_C_NORTHWEST_NORTH:
case FROM_C_NORTHWEST_NORTHWEST:
case FROM_C_NORTHWEST_WEST:
case FROM_D_NORTHWEST_NORTH:
case FROM_D_NORTHWEST_NORTHWEST:
case FROM_D_NORTHWEST_WEST:
switch(k) {
case 0:
backtrace[off] = BACK_A_MATCH_MISMATCH;
break;
case 1:
backtrace[off] = BACK_B_MATCH_MISMATCH;
break;
case 2:
backtrace[off] = BACK_C_MATCH_MISMATCH;
break;
case 3:
backtrace[off] = BACK_D_MATCH_MISMATCH;
break;
default:
fprintf(stderr, "INTERNAL ERROR: k = %d\n", k);
assert(0);
}
break;
default:
fprintf(stderr, "INTERNAL ERROR: from = %d\n", from);
assert(0);
}
/* set k */
switch (from) {
case FROM_A_NORTH_NORTH:
case FROM_A_NORTH_NORTHWEST:
case FROM_A_WEST_WEST:
case FROM_A_WEST_NORTHWEST:
case FROM_A_NORTHWEST_NORTH:
case FROM_A_NORTHWEST_NORTHWEST:
case FROM_A_NORTHWEST_WEST:
if (k != 0) {
backtrace[off] |= BT_CROSSOVER;
sfr->crossovers++;
k = 0;
}
break;
case FROM_B_NORTH_NORTH:
case FROM_B_NORTH_NORTHWEST:
case FROM_B_WEST_WEST:
case FROM_B_WEST_NORTHWEST:
case FROM_B_NORTHWEST_NORTH:
case FROM_B_NORTHWEST_NORTHWEST:
case FROM_B_NORTHWEST_WEST:
if (k != 1) {
backtrace[off] |= BT_CROSSOVER;
sfr->crossovers++;
k = 1;
}
break;
case FROM_C_NORTH_NORTH:
case FROM_C_NORTH_NORTHWEST:
case FROM_C_WEST_WEST:
case FROM_C_WEST_NORTHWEST:
case FROM_C_NORTHWEST_NORTH:
case FROM_C_NORTHWEST_NORTHWEST:
case FROM_C_NORTHWEST_WEST:
if (k != 2) {
backtrace[off] |= BT_CROSSOVER;
sfr->crossovers++;
k = 2;
}
break;
case FROM_D_NORTH_NORTH:
case FROM_D_NORTH_NORTHWEST:
case FROM_D_WEST_WEST:
case FROM_D_WEST_NORTHWEST:
case FROM_D_NORTHWEST_NORTH:
case FROM_D_NORTHWEST_NORTHWEST:
case FROM_D_NORTHWEST_WEST:
if (k != 3) {
backtrace[off] |= BT_CROSSOVER;
sfr->crossovers++;
k = 3;
}
break;
default:
fprintf(stderr, "INTERNAL ERROR: from = %d\n", from);
assert(0);
}
/*
* Continue backtrace (nb: i,j and k have already been changed).
*/
cell = &swmatrix[(i + 1) * (lena + 1) + j + 1];
switch (from) {
case FROM_A_NORTH_NORTH:
case FROM_B_NORTH_NORTH:
case FROM_C_NORTH_NORTH:
case FROM_D_NORTH_NORTH:
from = cell->from[k].back_n;
break;
case FROM_A_NORTH_NORTHWEST:
case FROM_B_NORTH_NORTHWEST:
case FROM_C_NORTH_NORTHWEST:
case FROM_D_NORTH_NORTHWEST:
from = cell->from[k].back_nw;
break;
case FROM_A_WEST_WEST:
case FROM_B_WEST_WEST:
case FROM_C_WEST_WEST:
case FROM_D_WEST_WEST:
from = cell->from[k].back_w;
break;
case FROM_A_WEST_NORTHWEST:
case FROM_B_WEST_NORTHWEST:
case FROM_C_WEST_NORTHWEST:
case FROM_D_WEST_NORTHWEST:
from = cell->from[k].back_nw;
break;
case FROM_A_NORTHWEST_NORTH:
case FROM_B_NORTHWEST_NORTH:
case FROM_C_NORTHWEST_NORTH:
case FROM_D_NORTHWEST_NORTH:
from = cell->from[k].back_n;
break;
case FROM_A_NORTHWEST_NORTHWEST:
case FROM_B_NORTHWEST_NORTHWEST:
case FROM_C_NORTHWEST_NORTHWEST:
case FROM_D_NORTHWEST_NORTHWEST:
from = cell->from[k].back_nw;
break;
case FROM_A_NORTHWEST_WEST:
case FROM_B_NORTHWEST_WEST:
case FROM_C_NORTHWEST_WEST:
case FROM_D_NORTHWEST_WEST:
from = cell->from[k].back_w;
break;
default:
fprintf(stderr, "INTERNAL ERROR: from = %d\n", from);
assert(0);
}
off--;
if (from == 0)
break;
}
off++;
if (k != 0) {
backtrace[off] |= BT_CROSSOVER;
sfr->crossovers++;
}
return (off);
}
/*
* Pretty print our alignment of 'db' and 'qr' in 'dbalign' and 'qralign'.
*
* i, j represent the beginning cell in the matrix.
* k is the first valid offset in the backtrace buffer.
*/
static void
pretty_print(int i, int j, int k)
{
char *d, *q;
int l;
d = dbalign;
q = qralign;
for (l = k; l < (dblen + qrlen); l++) {
#ifdef DEBUG_CROSSOVERS
int a;
if (BT_ISCROSSOVER(backtrace[l])
&& (BT_TYPE(backtrace[l]) == BACK_A_DELETION
|| BT_TYPE(backtrace[l]) == BACK_B_DELETION
|| BT_TYPE(backtrace[l]) == BACK_C_DELETION
|| BT_TYPE(backtrace[l]) == BACK_D_DELETION)) {
fprintf(stderr, "sw-full-cs: crossover in \"deletion\" (really, insertion):\n");
fprintf(stderr, "db:");
for (a = 0; a < _glen; a++)
fprintf(stderr, "%c", base_translate(db[a], false));
fprintf(stderr, "\n");
fprintf(stderr, "qr[0]:");
for (a = 0; a < _rlen; a++)
fprintf(stderr, "%c", base_translate(qr[0][a], false));
fprintf(stderr, "\n");
}
#endif
switch (BT_TYPE(backtrace[l])) {
case BACK_A_DELETION:
*d++ = '-';
if (BT_ISCROSSOVER(backtrace[l]))
*q++ = (char)tolower((int)base_translate(qr[0][i++], false));
else
*q++ = base_translate(qr[0][i++], false);
break;
case BACK_B_DELETION:
*d++ = '-';
if (BT_ISCROSSOVER(backtrace[l]))
*q++ = (char)tolower((int)base_translate(qr[1][i++], false));
else
*q++ = base_translate(qr[1][i++], false);
break;
case BACK_C_DELETION:
*d++ = '-';
if (BT_ISCROSSOVER(backtrace[l]))
*q++ = (char)tolower((int)base_translate(qr[2][i++], false));
else
*q++ = base_translate(qr[2][i++], false);
break;
case BACK_D_DELETION:
*d++ = '-';
if (BT_ISCROSSOVER(backtrace[l]))
*q++ = (char)tolower((int)base_translate(qr[3][i++], false));
else
*q++ = base_translate(qr[3][i++], false);
break;
case BACK_INSERTION:
*d++ = base_translate(db[j++], false);
*q++ = '-';
break;
case BACK_A_MATCH_MISMATCH:
*d++ = base_translate(db[j++], false);
if (BT_ISCROSSOVER(backtrace[l]))
*q++ = (char)tolower((int)base_translate(qr[0][i++], false));
else
*q++ = base_translate(qr[0][i++], false);
break;
case BACK_B_MATCH_MISMATCH:
*d++ = base_translate(db[j++], false);
if (BT_ISCROSSOVER(backtrace[l]))
*q++ = (char)tolower((int)base_translate(qr[1][i++], false));
else
*q++ = base_translate(qr[1][i++], false);
break;
case BACK_C_MATCH_MISMATCH:
*d++ = base_translate(db[j++], false);
if (BT_ISCROSSOVER(backtrace[l]))
*q++ = (char)tolower((int)base_translate(qr[2][i++], false));
else
*q++ = base_translate(qr[2][i++], false);
break;
case BACK_D_MATCH_MISMATCH:
*d++ = base_translate(db[j++], false);
if (BT_ISCROSSOVER(backtrace[l]))
*q++ = (char)tolower((int)base_translate(qr[3][i++], false));
else
*q++ = base_translate(qr[3][i++], false);
break;
default:
fprintf(stderr, "INTERNAL ERROR: backtrace[l] = 0x%x\n", backtrace[l]);
assert(0);
}
if ((BT_TYPE(backtrace[l]) == BACK_A_MATCH_MISMATCH || BT_TYPE(backtrace[l]) == BACK_B_MATCH_MISMATCH
|| BT_TYPE(backtrace[l]) == BACK_C_MATCH_MISMATCH || BT_TYPE(backtrace[l]) == BACK_D_MATCH_MISMATCH)
&& (*(q-1) == 'n' || *(q-1) == 'N')) {
if (BT_ISCROSSOVER(backtrace[l]))
*(q-1) = (char)tolower(*(d-1));
else
*(q-1) = *(d-1);
}
}
*d = *q = '\0';
}
int
sw_full_cs_cleanup(void) {
free(db);
int i;
for (i=0; i<4; i++) {
free(qr[i]);
}
free(swmatrix);
free(backtrace);
free(dbalign);
free(qralign);
return 0;
}
int
sw_full_cs_setup(int _dblen, int _qrlen, int _a_gap_open, int _a_gap_ext, int _b_gap_open, int _b_gap_ext,
int _match, int _mismatch, int _global_xover_penalty, bool reset_stats,
int _anchor_width, int _indel_taboo_len)
{
int i;
dblen = _dblen;
db = (int8_t *)malloc(dblen * sizeof(db[0]));
if (db == NULL)
return (1);
qrlen = _qrlen;
for (i = 0; i < 4; i++) {
qr[i] = (int8_t *)malloc(qrlen * sizeof(qr[0]));
if (qr[i] == NULL)
return (1);
}
swmatrix = (struct swcell *)malloc((dblen + 1) * (qrlen + 1) *
sizeof(swmatrix[0]));
if (swmatrix == NULL)
return (1);
backtrace = (uint8_t *)malloc((dblen + qrlen) * sizeof(backtrace[0]));
if (backtrace == NULL)
return (1);
dbalign = (char *)malloc((dblen + qrlen + 1) * sizeof(dbalign[0]));
if (dbalign == NULL)
return (1);
qralign = (char *)malloc((dblen + qrlen + 1) * sizeof(dbalign[0]));
if (qralign == NULL)
return (1);
a_gap_open = -(_a_gap_open);
a_gap_ext = -(_a_gap_ext);
b_gap_open = -(_b_gap_open);
b_gap_ext = -(_b_gap_ext);
match = _match;
mismatch = _mismatch;
global_xover_penalty = _global_xover_penalty;
if (reset_stats) {
swcells = swinvocs = 0;
sw_tc.type = DEF_FAST_TIME_COUNTER;
sw_tc.counter = 0;
}
anchor_width = _anchor_width;
indel_taboo_len = _indel_taboo_len;
initialised = 1;
return (0);
}
void
sw_full_cs_stats(uint64_t *invocs, uint64_t *cells, double *secs)
{
if (invocs != NULL)
*invocs = swinvocs;
if (cells != NULL)
*cells = swcells;
if (secs != NULL)
*secs = time_counter_get_secs(&sw_tc);
}
void
sw_full_cs(uint32_t *genome_ls, int goff, int glen, uint32_t *read, int rlen,
int initbp, int threshscore, struct sw_full_results *sfr, bool revcmpl, bool is_rna,
struct anchor * anchors, int anchors_cnt, int local_alignment, int * crossover_score)
{
struct sw_full_results scratch;
int i, j, k;
//llint before = rdtsc(), after;
TIME_COUNTER_START(sw_tc);
if (!initialised)
abort();
swinvocs++;
assert(glen > 0 && glen <= dblen);
assert(rlen > 0 && rlen <= qrlen);
if (sfr == NULL) {
sfr = &scratch;
memset(sfr, 0, sizeof(*sfr));
}
memset(backtrace, 0, (dblen + qrlen) * sizeof(backtrace[0]));
dbalign[0] = qralign[0] = '\0';
for (i = 0; i < glen; i++)
db[i] = (int8_t)EXTRACT(genome_ls, goff + i);
/*
* Generate each possible letter space sequence from the colour space
* read. qr[0] corresponds to initbp, which is given initial preference.
*/
assert(initbp >= 0 && initbp <= 3);
for (i = 0; i < 4; i++) {
int letter = (i + initbp) % 4;
for (j = 0; j < rlen; j++) {
int base = EXTRACT(read, j);
if (base == BASE_N || base == BASE_X) {
qr[i][j] = BASE_N;
letter = (i + initbp) % 4;
} else {
qr[i][j] = (int8_t)cstols(letter, base, is_rna);
letter = qr[i][j];
}
}
}
#ifdef DEBUG_SW
fprintf(stderr, "db: ");
for (j = 0; j < glen; j++)
fprintf(stderr, "%c", base_translate(db[j], false));
fprintf(stderr, "\n");
for (i = 0; i < 4; i++) {
fprintf(stderr, "qr[%u]: ", i);
for (j = 0; j < rlen; j++)
fprintf(stderr, "%c", base_translate(qr[i][j], false));
fprintf(stderr, "\n");
}
#endif
#ifdef DEBUG_CROSSOVERS
_glen = glen;
_rlen = rlen;
#endif
sfr->score = full_sw(glen, rlen, threshscore, &i, &j, &k, revcmpl, anchors, anchors_cnt, local_alignment, crossover_score);
if (sfr->score >= 0 && sfr->score >= threshscore) {
k = do_backtrace(glen, i, j, k, sfr);
pretty_print(sfr->read_start, sfr->genome_start, k);
sfr->gmapped = j - sfr->genome_start + 1;
sfr->genome_start += goff;
sfr->rmapped = i - sfr->read_start + 1;
sfr->dbalign = xstrdup(dbalign);
sfr->qralign = xstrdup(qralign);
} else {
sfr->score = 0;
}
#ifdef DEBUG_SW
fprintf(stderr, "reported alignment:\n\t%s\n\t%s\n", sfr->dbalign, sfr->qralign);
#endif
//swcells += (glen * rlen);
//after = rdtsc();
//swticks += MAX(after - before, 0);
TIME_COUNTER_STOP(sw_tc);
}
|
GB_unaryop__ainv_bool_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_bool_bool
// op(A') function: GB_tran__ainv_bool_bool
// C type: bool
// A type: bool
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_bool_bool
(
bool *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_bool_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__ne_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_int8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__ne_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_int8)
// A*D function (colscale): GB (_AxD__ne_int8)
// D*A function (rowscale): GB (_DxB__ne_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_int8)
// C=scalar+B GB (_bind1st__ne_int8)
// C=scalar+B' GB (_bind1st_tran__ne_int8)
// C=A+scalar GB (_bind2nd__ne_int8)
// C=A'+scalar GB (_bind2nd_tran__ne_int8)
// C type: bool
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_INT8 || GxB_NO_NE_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__ne_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bondarenko_2004.c | #include <stdlib.h>
#include "bondarenko_2004.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
sv[0] = -82.4202f; // V millivolt
sv[1] = 0.115001f; // Cai msvromolar
sv[2] = 0.115001f; // Cass msvromolar
sv[3] = 1299.5f; // CaJSR msvromolar
sv[4] = 1299.5f; // CaNSR msvromolar
sv[5] = 0.0f; // P_RyR dimensionless
sv[6] = 11.2684f; // LTRPN_Ca msvromolar
sv[7] = 125.29f; // HTRPN_Ca msvromolar
sv[8] = 0.149102e-4f; // P_O1 dimensionless
sv[9] = 0.951726e-10f; // P_O2 dimensionless
sv[10] = 0.16774e-3f; // P_C2 dimensionless
sv[11] = 0.930308e-18f; // O dimensionless
sv[12] = 0.124216e-3f; // C2 dimensionless
sv[13] = 0.578679e-8f; // C3 dimensionless
sv[14] = 0.119816e-12f; // C4 dimensionless
sv[15] = 0.497923e-18f; // I1 dimensionless
sv[16] = 0.345847e-13f; // I2 dimensionless
sv[17] = 0.185106e-13f; // I3 dimensionless
sv[18] = 14237.1f; // Nai msvromolar
sv[19] = 0.020752f; // C_Na2 dimensionless
sv[20] = 0.279132e-3f; // C_Na1 dimensionless
sv[21] = 0.713483e-6f; // O_Na dimensionless
sv[22] = 0.153176e-3f; // IF_Na dimensionless
sv[23] = 0.673345e-6f; // I1_Na dimensionless
sv[24] = 0.155787e-8f; // I2_Na dimensionless
sv[25] = 0.0113879f; // sv_Na2 dimensionless
sv[26] = 0.34278f; // sv_Na3 dimensionless
sv[27] = 143720.0f; // Ki msvromolar
sv[28] = 0.265563e-2f; // ato_f dimensionless
sv[29] = 0.999977f; // ito_f dimensionless
sv[30] = 0.417069e-3f; // ato_s dimensionless
sv[31] = 0.998543f; // ito_s dimensionless
sv[32] = 0.262753e-3f; // nKs dimensionless
sv[33] = 0.417069e-3f; // aur dimensionless
sv[34] = 0.998543f; // iur dimensionless
sv[35] = 0.417069e-3f; // aKss dimensionless
sv[36] = 1.0f; // iKss dimensionless
sv[37] = 0.641229e-3f; // C_K2 dimensionless
sv[38] = 0.992513e-3f; // C_K1 dimensionless
sv[39] = 0.175298e-3f; // O_K dimensionless
sv[40] = 0.319129e-4f; // I_K dimensionless
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current);
for(int i = 0; i < NEQ; i++)
sv[i] = dt*rDY[i] + rY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current) {
// State variables
const real V_old_ = sv[0]; // initial value = -82.4202 millivolt
const real Cai_old_ = sv[1]; // initial value = 0.115001 micromolar
const real Cass_old_ = sv[2]; // initial value = 0.115001 micromolar
const real CaJSR_old_ = sv[3]; // initial value = 1299.5 micromolar
const real CaNSR_old_ = sv[4]; // initial value = 1299.5 micromolar
const real P_RyR_old_ = sv[5]; // initial value = 0 dimensionless
const real LTRPN_Ca_old_ = sv[6]; // initial value = 11.2684 micromolar
const real HTRPN_Ca_old_ = sv[7]; // initial value = 125.29 micromolar
const real P_O1_old_ = sv[8]; // initial value = 0.149102e-4 dimensionless
const real P_O2_old_ = sv[9]; // initial value = 0.951726e-10 dimensionless
const real P_C2_old_ = sv[10]; // initial value = 0.16774e-3 dimensionless
const real O_old_ = sv[11]; // initial value = 0.930308e-18 dimensionless
const real C2_old_ = sv[12]; // initial value = 0.124216e-3 dimensionless
const real C3_old_ = sv[13]; // initial value = 0.578679e-8 dimensionless
const real C4_old_ = sv[14]; // initial value = 0.119816e-12 dimensionless
const real I1_old_ = sv[15]; // initial value = 0.497923e-18 dimensionless
const real I2_old_ = sv[16]; // initial value = 0.345847e-13 dimensionless
const real I3_old_ = sv[17]; // initial value = 0.185106e-13 dimensionless
const real Nai_old_ = sv[18]; // initial value = 14237.1 micromolar
const real C_Na2_old_ = sv[19]; // initial value = 0.020752 dimensionless
const real C_Na1_old_ = sv[20]; // initial value = 0.279132e-3 dimensionless
const real O_Na_old_ = sv[21]; // initial value = 0.713483e-6 dimensionless
const real IF_Na_old_ = sv[22]; // initial value = 0.153176e-3 dimensionless
const real I1_Na_old_ = sv[23]; // initial value = 0.673345e-6 dimensionless
const real I2_Na_old_ = sv[24]; // initial value = 0.155787e-8 dimensionless
const real IC_Na2_old_ = sv[25]; // initial value = 0.0113879 dimensionless
const real IC_Na3_old_ = sv[26]; // initial value = 0.34278 dimensionless
const real Ki_old_ = sv[27]; // initial value = 143720 micromolar
const real ato_f_old_ = sv[28]; // initial value = 0.265563e-2 dimensionless
const real ito_f_old_ = sv[29]; // initial value = 0.999977 dimensionless
const real ato_s_old_ = sv[30]; // initial value = 0.417069e-3 dimensionless
const real ito_s_old_ = sv[31]; // initial value = 0.998543 dimensionless
const real nKs_old_ = sv[32]; // initial value = 0.262753e-3 dimensionless
const real aur_old_ = sv[33]; // initial value = 0.417069e-3 dimensionless
const real iur_old_ = sv[34]; // initial value = 0.998543 dimensionless
const real aKss_old_ = sv[35]; // initial value = 0.417069e-3 dimensionless
const real iKss_old_ = sv[36]; // initial value = 1 dimensionless
const real C_K2_old_ = sv[37]; // initial value = 0.641229e-3 dimensionless
const real C_K1_old_ = sv[38]; // initial value = 0.992513e-3 dimensionless
const real O_K_old_ = sv[39]; // initial value = 0.175298e-3 dimensionless
const real I_K_old_ = sv[40]; // initial value = 0.319129e-4 dimensionless
// Parameters
const real Acap = 1.534e-4f; // cm2
const real Cm = 1.0f; // microF_per_cm2
const real Vmyo = 25.84e-6f; // microlitre
const real F = 96.5f; // coulomb_per_millimole
const real VJSR = 0.12e-6f; // microlitre
const real Vss = 1.485e-9f; // microlitre
const real VNSR = 2.098e-6f; // microlitre
const real CMDN_tot = 50.0f; // micromolar
const real Km_CMDN = 0.238f; // micromolar
const real CSQN_tot = 15000.0f; // micromolar
const real Km_CSQN = 800.0f; // micromolar
const real v1 = 4.5f; // per_millisecond
const real tau_tr = 20.0f; // millisecond
const real tau_xfer = 8.0f; // millisecond
const real v2 = 1.74e-5f; // per_millisecond
const real v3 = 0.45f; // micromolar_per_millisecond
const real Km_up = 0.5f; // micromolar
const real k_plus_htrpn = 0.00237f; // per_micromolar_millisecond
const real HTRPN_tot = 140.0f; // micromolar
const real k_plus_ltrpn = 0.0327f; // per_micromolar_millisecond
const real LTRPN_tot = 70.0f; // micromolar
const real k_minus_htrpn = 3.2e-5f; // per_millisecond
const real k_minus_ltrpn = 0.0196f; // per_millisecond
const real i_CaL_max = 7.0f; // picoA_per_picoF
const real k_plus_a = 0.006075f; // micromolar4_per_millisecond
const real n = 4.0f; // dimensionless
const real k_minus_b = 0.965f; // per_millisecond
const real k_minus_c = 0.0008f; // per_millisecond
const real k_minus_a = 0.07125f; // per_millisecond
const real k_plus_b = 0.00405f; // micromolar3_per_millisecond
const real m = 3.0f; // dimensionless
const real k_plus_c = 0.009f; // per_millisecond
const real g_CaL = 0.1729f; // milliS_per_microF
const real E_CaL = 63.0f; // millivolt
const real Kpcb = 0.0005f; // per_millisecond
const real Kpc_max = 0.23324f; // per_millisecond
const real Kpc_half = 20.0f; // micromolar
const real i_pCa_max = 1.0f; // picoA_per_picoF
const real Km_pCa = 0.5f; // micromolar
const real k_NaCa = 292.8f; // picoA_per_picoF
const real K_mNa = 87500.0f; // micromolar
const real Nao = 140000.0f; // micromolar
const real K_mCa = 1380.0f; // micromolar
const real Cao = 1800.0f; // micromolar
const real k_sat = 0.1f; // dimensionless
const real eta = 0.35f; // dimensionless
const real R = 8.314f; // joule_per_mole_kelvin
const real T = 298.0f; // kelvin
const real g_Cab = 0.000367f; // milliS_per_microF
const real g_Na = 13.0f; // milliS_per_microF
const real Ko = 5400.0f; // micromolar
const real g_Nab = 0.0026f; // milliS_per_microF
const real g_Kto_f = 0.4067f; // milliS_per_microF
const real g_Kto_s = 0.0f; // milliS_per_microF
const real g_Ks = 0.00575f; // milliS_per_microF
const real g_Kur = 0.16f; // milliS_per_microF
const real g_Kss = 0.05f; // milliS_per_microF
const real g_Kr = 0.078f; // milliS_per_microF
const real kf = 0.023761f; // per_millisecond
const real kb = 0.036778f; // per_millisecond
const real i_NaK_max = 0.88f; // picoA_per_picoF
const real Km_Nai = 21000.0f; // micromolar
const real Km_Ko = 1500.0f; // micromolar
const real g_ClCa = 10.0f; // milliS_per_microF
const real Km_Cl = 10.0f; // micromolar
const real E_Cl = -40.0f; // millivolt
// Algebraic Equations
real calc_i_stim = stim_current; //0
real calc_Bi = powf((1.0f+((CMDN_tot*Km_CMDN)/powf((Km_CMDN+Cai_old_),2.0f))),(-1.0f)); //6
real calc_Bss = powf((1.0f+((CMDN_tot*Km_CMDN)/powf((Km_CMDN+Cass_old_),2.0f))),(-1.0f)); //7
real calc_BJSR = powf((1.0f+((CSQN_tot*Km_CSQN)/powf((Km_CSQN+CaJSR_old_),2.0f))),(-1.0f)); //8
real calc_J_rel = (v1*(P_O1_old_+P_O2_old_)*(CaJSR_old_-Cass_old_)*P_RyR_old_); //9
real calc_J_tr = ((CaNSR_old_-CaJSR_old_)/tau_tr); //10
real calc_J_xfer = ((Cass_old_-Cai_old_)/tau_xfer); //11
real calc_J_leak = (v2*(CaNSR_old_-Cai_old_)); //12
real calc_J_up = ((v3*powf(Cai_old_,2.0f))/(powf(Km_up,2.0f)+powf(Cai_old_,2.0f))); //13
real calc_J_trpn = (((k_plus_htrpn*Cai_old_*(HTRPN_tot-HTRPN_Ca_old_))+(k_plus_ltrpn*Cai_old_*(LTRPN_tot-LTRPN_Ca_old_)))-((k_minus_htrpn*HTRPN_Ca_old_)+(k_minus_ltrpn*LTRPN_Ca_old_))); //14
real calc_P_C1 = (1.0f-(P_C2_old_+P_O1_old_+P_O2_old_)); //19
real calc_i_CaL = (g_CaL*O_old_*(V_old_-E_CaL)); //22
real calc_C1 = (1.0f-(O_old_+C2_old_+C3_old_+C4_old_+I1_old_+I2_old_+I3_old_)); //24
real calc_alpha = ((0.4f*expf(((V_old_+12.0f)/10.0f))*((1.0f+(0.7f*expf(((-powf((V_old_+40.0f),2.0f))/10.0f))))-(0.75f*expf(((-powf((V_old_+20.0f),2.0f))/400.0f)))))/(1.0f+(0.12f*expf(((V_old_+12.0f)/10.0f))))); //31
real calc_beta = (0.05f*expf(((-(V_old_+12.0f))/13.0f))); //32
real calc_gamma = ((Kpc_max*Cass_old_)/(Kpc_half+Cass_old_)); //33
real calc_Kpcf = (13.0f*(1.0f-expf(((-powf((V_old_+14.5f),2.0f))/100.0f)))); //34
real calc_i_pCa = ((i_pCa_max*powf(Cai_old_,2.0f))/(powf(Km_pCa,2.0f)+powf(Cai_old_,2.0f))); //35
real calc_i_NaCa = (((((((k_NaCa*1.0f)/(powf(K_mNa,3.0)+powf(Nao,3.0)))*1.0f)/(K_mCa+Cao))*1.0f)/(1.0f+(k_sat*expf((((eta-1.0f)*V_old_*F)/(R*T))))))*((expf(((eta*V_old_*F)/(R*T)))*powf(Nai_old_,3.0)*Cao)-(expf((((eta-1.0f)*V_old_*F)/(R*T)))*powf(Nao,3.0)*Cai_old_))); //36
real calc_E_CaN = (((R*T)/(2.0f*F))*logf((Cao/Cai_old_))); //38
real calc_E_Na = (((R*T)/F)*logf((((0.9f*Nao)+(0.1f*Ko))/((0.9f*Nai_old_)+(0.1f*Ki_old_))))); //41
real calc_C_Na3 = (1.0f-(O_Na_old_+C_Na1_old_+C_Na2_old_+IF_Na_old_+I1_Na_old_+I2_Na_old_+IC_Na2_old_+IC_Na3_old_)); //42
real calc_alpha_Na11 = (3.802f/((0.1027f*expf(((-(V_old_+2.5f))/17.0f)))+(0.2f*expf(((-(V_old_+2.5f))/150.0f))))); //51
real calc_alpha_Na12 = (3.802f/((0.1027f*expf(((-(V_old_+2.5f))/15.0f)))+(0.23f*expf(((-(V_old_+2.5f))/150.0f))))); //52
real calc_alpha_Na13 = (3.802f/((0.1027f*expf(((-(V_old_+2.5f))/12.0f)))+(0.25f*expf(((-(V_old_+2.5f))/150.0f))))); //53
real calc_beta_Na11 = (0.1917f*expf(((-(V_old_+2.5f))/20.3f))); //54
real calc_beta_Na12 = (0.2f*expf(((-(V_old_-2.5f))/20.3f))); //55
real calc_beta_Na13 = (0.22f*expf(((-(V_old_-7.5f))/20.3f))); //56
real calc_alpha_Na3 = (7e-7f*expf(((-(V_old_+7.0f))/7.7f))); //57
real calc_beta_Na3 = (0.00854f+(0.00002f*V_old_)); //58
real calc_alpha_Na2 = (1.0f/((0.188495f*expf(((-(V_old_+7.0f))/16.6f)))+0.393956f)); //59
real calc_E_K = (((R*T)/F)*logf((Ko/Ki_old_))); //68
real calc_alpha_a = (0.18064f*expf((0.03577f*(V_old_+ 30.0f)))); //71
real calc_beta_a = (0.3956f*expf(((-0.06237f)*(V_old_+ 30.0f)))); //72
real calc_alpha_i = ((0.000152f*expf(((-(V_old_+13.5f))/7.0f)))/((0.067083f*expf(((-(V_old_+33.5f))/7.0f)))+1.0f)); //73
real calc_beta_i = ((0.00095f*expf(((V_old_+33.5f)/7.0f)))/((0.051335f*expf(((V_old_+33.5f)/7.0f)))+1.0f)); //74
real calc_ass = (1.0f/(1.0f+expf(((-(V_old_+22.5f))/7.7f)))); //78
real calc_iss = (1.0f/(1.0f+expf(((V_old_+45.2f)/5.7f)))); //79
real calc_tau_ta_s = ((0.493f*expf(((-0.0629f)*V_old_)))+2.058f); //80
real calc_tau_ti_s = (270.0f+(1050.0f/(1.0f+expf(((V_old_+45.2f)/5.7f))))); //81
real calc_alpha_n = (V_old_ != -26.5f)?((0.00000481333f*(V_old_+26.5f))/(1.0f-expf(((-0.128f)*(V_old_+26.5f))))): 0.000037604f; //85
real calc_beta_n = (0.0000953333f*expf(((-0.038f)*(V_old_+26.5f)))); //86
real calc_tau_aur = ((0.493f*expf(((-0.0629f)*V_old_)))+2.058f); //90
real calc_tau_iur = (1200.0f-(170.0f/(1.0f+expf(((V_old_+45.2f)/5.7f))))); //91
real calc_tau_Kss = ((39.3f*expf(((-0.0862f)*V_old_)))+13.17f); //95
real calc_i_Kr = (g_Kr*O_K_old_*(V_old_-(((R*T)/F)*logf((((0.98f*Ko)+(0.02f*Nao))/((0.98f*Ki_old_)+(0.02f*Nai_old_))))))); //96
real calc_C_K0 = (1.0f-(C_K1_old_+C_K2_old_+O_K_old_+I_K_old_)); //97
real calc_alpha_a0 = (0.022348f*expf((0.01176f*V_old_))); //102
real calc_beta_a0 = (0.047002f*expf(((-0.0631f)*V_old_))); //103
real calc_alpha_a1 = (0.013733f*expf((0.038198f*V_old_))); //104
real calc_beta_a1 = (0.0000689f*expf(((-0.04178f)*V_old_))); //105
real calc_alpha_i_duplicated_rapid_delayed_rectifier_potassium_current = (0.090821f*expf((0.023391f*(V_old_+5.0f)))); //106
real calc_beta_i_duplicated_rapid_delayed_rectifier_potassium_current = (0.006497f*expf(((-0.03268f)*(V_old_+5.0f)))); //107
real calc_sigma = ((1.0f/7.0f)*(expf((Nao/67300.0f))-1.0f)); //110
real calc_O_ClCa = (0.2f/(1.0f+expf(((-(V_old_-46.7f))/7.8f)))); //112
real calc_beta_Na2 = ((calc_alpha_Na13*calc_alpha_Na2*calc_alpha_Na3)/(calc_beta_Na13*calc_beta_Na3)); //60
real calc_alpha_Na4 = (calc_alpha_Na2/1000.0f); //61
real calc_beta_Na4 = calc_alpha_Na3; //62
real calc_alpha_Na5 = (calc_alpha_Na2/95000.0f); //63
real calc_beta_Na5 = (calc_alpha_Na3/50.0f); //64
real calc_i_Nab = (g_Nab*(V_old_-calc_E_Na)); //65
real calc_i_Kto_s = (g_Kto_s*ato_s_old_*ito_s_old_*(V_old_-calc_E_K)); //75
real calc_i_K1 = ((((0.2938f*Ko)/(Ko+210.0f))*(V_old_-calc_E_K))/(1.0f+expf((0.0896f*(V_old_-calc_E_K))))); //82
real calc_i_Ks = (g_Ks*powf(nKs_old_,2.0f)*(V_old_-calc_E_K)); //83
real calc_i_Kur = (g_Kur*aur_old_*iur_old_*(V_old_-calc_E_K)); //87
real calc_i_Kss = (g_Kss*aKss_old_*iKss_old_*(V_old_-calc_E_K)); //92
real calc_i_Cab = (g_Cab*(V_old_-calc_E_CaN)); //37
real calc_i_Na = (g_Na*O_Na_old_*(V_old_-calc_E_Na)); //40
real calc_i_Kto_f = (g_Kto_f*powf(ato_f_old_,3.0)*ito_f_old_*(V_old_-calc_E_K)); //67
real calc_f_NaK = (1.0f/(1.0f+(0.1245f*expf((((-0.1f)*V_old_*F)/(R*T))))+(0.0365f*calc_sigma*expf((((-V_old_)*F)/(R*T)))))); //109
real calc_i_ClCa = (((g_ClCa*calc_O_ClCa*Cai_old_)/(Cai_old_+Km_Cl))*(V_old_-E_Cl)); //111
real calc_i_NaK = ((((i_NaK_max*calc_f_NaK*1.0f)/(1.0f+powf((Km_Nai/Nai_old_),1.5)))*Ko)/(Ko+Km_Ko)); //108
// Differential Equations
real d_dt_V = (-(calc_i_CaL+calc_i_pCa+calc_i_NaCa+calc_i_Cab+calc_i_Na+calc_i_Nab+calc_i_NaK+calc_i_Kto_f+calc_i_Kto_s+calc_i_K1+calc_i_Ks+calc_i_Kur+calc_i_Kss+calc_i_Kr+calc_i_ClCa+calc_i_stim)); // 1
real d_dt_Cai = (calc_Bi*((calc_J_leak+calc_J_xfer)-(calc_J_up+calc_J_trpn+((((calc_i_Cab+calc_i_pCa)-(2.0f*calc_i_NaCa))*Acap*Cm)/(2.0f*Vmyo*F))))); // 2
real d_dt_Cass = (calc_Bss*(((calc_J_rel*VJSR)/Vss)-(((calc_J_xfer*Vmyo)/Vss)+((calc_i_CaL*Acap*Cm)/(2.0f*Vss*F))))); // 3
real d_dt_CaJSR = (calc_BJSR*(calc_J_tr-calc_J_rel)); // 4
real d_dt_CaNSR = ((((calc_J_up-calc_J_leak)*Vmyo)/VNSR)-((calc_J_tr*VJSR)/VNSR)); // 5
real d_dt_P_RyR = (((-0.04f)*P_RyR_old_)-(((0.1f*calc_i_CaL)/i_CaL_max)*expf(((-powf((V_old_-5.0f),2.0f))/648.0f)))); // 15
real d_dt_LTRPN_Ca = ((k_plus_ltrpn*Cai_old_*(LTRPN_tot-LTRPN_Ca_old_))-(k_minus_ltrpn*LTRPN_Ca_old_)); // 16
real d_dt_HTRPN_Ca = ((k_plus_htrpn*Cai_old_*(HTRPN_tot-HTRPN_Ca_old_))-(k_minus_htrpn*HTRPN_Ca_old_)); // 17
real d_dt_P_O1 = (((k_plus_a*powf(Cass_old_,n)*calc_P_C1)+(k_minus_b*P_O2_old_)+(k_minus_c*P_C2_old_))-((k_minus_a*P_O1_old_)+(k_plus_b*powf(Cass_old_,m)*P_O1_old_)+(k_plus_c*P_O1_old_))); // 18
real d_dt_P_O2 = ((k_plus_b*powf(Cass_old_,m)*P_O1_old_)-(k_minus_b*P_O2_old_)); // 20
real d_dt_P_C2 = ((k_plus_c*P_O1_old_)-(k_minus_c*P_C2_old_)); // 21
real d_dt_O = (((calc_alpha*C4_old_)+(Kpcb*I1_old_)+(0.001f*((calc_alpha*I2_old_)-(calc_Kpcf*O_old_))))-((4.0f*calc_beta*O_old_)+(calc_gamma*O_old_))); // 23
real d_dt_C2 = (((4.0f*calc_alpha*calc_C1)+(2.0f*calc_beta*C3_old_))-((calc_beta*C2_old_)+(3.0f*calc_alpha*C2_old_))); // 25
real d_dt_C3 = (((3.0f*calc_alpha*C2_old_)+(3.0f*calc_beta*C4_old_))-((2.0f*calc_beta*C3_old_)+(2.0f*calc_alpha*C3_old_))); // 26
real d_dt_C4 = (((2.0f*calc_alpha*C3_old_)+(4.0f*calc_beta*O_old_)+(0.01f*((4.0f*Kpcb*calc_beta*I1_old_)-(calc_alpha*calc_gamma*C4_old_)))+(0.002f*((4.0f*calc_beta*I2_old_)-(calc_Kpcf*C4_old_)))+(4.0f*calc_beta*Kpcb*I3_old_))-((3.0f*calc_beta*C4_old_)+(calc_alpha*C4_old_)+(1.0f*calc_gamma*calc_Kpcf*C4_old_))); // 27
real d_dt_I1 = (((calc_gamma*O_old_)+(0.001f*((calc_alpha*I3_old_)-(calc_Kpcf*I1_old_)))+(0.01f*((calc_alpha*calc_gamma*C4_old_)-(4.0f*calc_beta*Kpcb*I1_old_))))-(Kpcb*I1_old_)); // 28
real d_dt_I2 = (((0.001f*((calc_Kpcf*O_old_)-(calc_alpha*I2_old_)))+(Kpcb*I3_old_)+(0.002f*((calc_Kpcf*C4_old_)-(4.0f*calc_beta*I2_old_))))-(calc_gamma*I2_old_)); // 29
real d_dt_I3 = (((0.001f*((calc_Kpcf*I1_old_)-(calc_alpha*I3_old_)))+(calc_gamma*I2_old_)+(1.0f*calc_gamma*calc_Kpcf*C4_old_))-((4.0f*calc_beta*Kpcb*I3_old_)+(Kpcb*I3_old_))); // 30
real d_dt_Nai = (((-(calc_i_Na+calc_i_Nab+(3.0f*calc_i_NaK)+(3.0f*calc_i_NaCa)))*Acap*Cm)/(Vmyo*F)); // 39
real d_dt_C_Na2 = (((calc_alpha_Na11*calc_C_Na3)+(calc_beta_Na12*C_Na1_old_)+(calc_alpha_Na3*IC_Na2_old_))-((calc_beta_Na11*C_Na2_old_)+(calc_alpha_Na12*C_Na2_old_)+(calc_beta_Na3*C_Na2_old_))); // 43
real d_dt_C_Na1 = (((calc_alpha_Na12*C_Na2_old_)+(calc_beta_Na13*O_Na_old_)+(calc_alpha_Na3*IF_Na_old_))-((calc_beta_Na12*C_Na1_old_)+(calc_alpha_Na13*C_Na1_old_)+(calc_beta_Na3*C_Na1_old_))); // 44
real d_dt_O_Na = (((calc_alpha_Na13*C_Na1_old_)+(calc_beta_Na2*IF_Na_old_))-((calc_beta_Na13*O_Na_old_)+(calc_alpha_Na2*O_Na_old_))); // 45
real d_dt_IF_Na = (((calc_alpha_Na2*O_Na_old_)+(calc_beta_Na3*C_Na1_old_)+(calc_beta_Na4*I1_Na_old_)+(calc_alpha_Na12*IC_Na2_old_))-((calc_beta_Na2*IF_Na_old_)+(calc_alpha_Na3*IF_Na_old_)+(calc_alpha_Na4*IF_Na_old_)+(calc_beta_Na12*IF_Na_old_))); // 46
real d_dt_I1_Na = (((calc_alpha_Na4*IF_Na_old_)+(calc_beta_Na5*I2_Na_old_))-((calc_beta_Na4*I1_Na_old_)+(calc_alpha_Na5*I1_Na_old_))); // 47
real d_dt_I2_Na = ((calc_alpha_Na5*I1_Na_old_)-(calc_beta_Na5*I2_Na_old_)); // 48
real d_dt_IC_Na2 = (((calc_alpha_Na11*IC_Na3_old_)+(calc_beta_Na12*IF_Na_old_)+(calc_beta_Na3*C_Na2_old_))-((calc_beta_Na11*IC_Na2_old_)+(calc_alpha_Na12*IC_Na2_old_)+(calc_alpha_Na3*IC_Na2_old_))); // 49
real d_dt_IC_Na3 = (((calc_beta_Na11*IC_Na2_old_)+(calc_beta_Na3*calc_C_Na3))-((calc_alpha_Na11*IC_Na3_old_)+(calc_alpha_Na3*IC_Na3_old_))); // 50
real d_dt_Ki = (((-((calc_i_Kto_f+calc_i_Kto_s+calc_i_K1+calc_i_Ks+calc_i_Kss+calc_i_Kur+calc_i_Kr)-(2.0f*calc_i_NaK)))*Acap*Cm)/(Vmyo*F)); // 66
real d_dt_ato_f = ((calc_alpha_a*(1.0f-ato_f_old_))-(calc_beta_a*ato_f_old_)); // 69
real d_dt_ito_f = ((calc_alpha_i*(1.0f-ito_f_old_))-(calc_beta_i*ito_f_old_)); // 70
real d_dt_ato_s = ((calc_ass-ato_s_old_)/calc_tau_ta_s); // 76
real d_dt_ito_s = ((calc_iss-ito_s_old_)/calc_tau_ti_s); // 77
real d_dt_nKs = ((calc_alpha_n*(1.0f-nKs_old_))-(calc_beta_n*nKs_old_)); // 84
real d_dt_aur = ((calc_ass-aur_old_)/calc_tau_aur); // 88
real d_dt_iur = ((calc_iss-iur_old_)/calc_tau_iur); // 89
real d_dt_aKss = ((calc_ass-aKss_old_)/calc_tau_Kss); // 93
real d_dt_iKss = 0.0f; // 94
real d_dt_C_K2 = (((kf*C_K1_old_)+(calc_beta_a1*O_K_old_))-((kb*C_K2_old_)+(calc_alpha_a1*C_K2_old_))); // 98
real d_dt_C_K1 = (((calc_alpha_a0*calc_C_K0)+(kb*C_K2_old_))-((calc_beta_a0*C_K1_old_)+(kf*C_K1_old_))); // 99
real d_dt_O_K = (((calc_alpha_a1*C_K2_old_)+(calc_beta_i_duplicated_rapid_delayed_rectifier_potassium_current*I_K_old_))-((calc_beta_a1*O_K_old_)+(calc_alpha_i_duplicated_rapid_delayed_rectifier_potassium_current*O_K_old_))); // 100
real d_dt_I_K = ((calc_alpha_i_duplicated_rapid_delayed_rectifier_potassium_current*O_K_old_)-(calc_beta_i_duplicated_rapid_delayed_rectifier_potassium_current*I_K_old_)); // 101
rDY_[0] = d_dt_V;
rDY_[1] = d_dt_Cai;
rDY_[2] = d_dt_Cass;
rDY_[3] = d_dt_CaJSR;
rDY_[4] = d_dt_CaNSR;
rDY_[5] = d_dt_P_RyR;
rDY_[6] = d_dt_LTRPN_Ca;
rDY_[7] = d_dt_HTRPN_Ca;
rDY_[8] = d_dt_P_O1;
rDY_[9] = d_dt_P_O2;
rDY_[10] = d_dt_P_C2;
rDY_[11] = d_dt_O;
rDY_[12] = d_dt_C2;
rDY_[13] = d_dt_C3;
rDY_[14] = d_dt_C4;
rDY_[15] = d_dt_I1;
rDY_[16] = d_dt_I2;
rDY_[17] = d_dt_I3;
rDY_[18] = d_dt_Nai;
rDY_[19] = d_dt_C_Na2;
rDY_[20] = d_dt_C_Na1;
rDY_[21] = d_dt_O_Na;
rDY_[22] = d_dt_IF_Na;
rDY_[23] = d_dt_I1_Na;
rDY_[24] = d_dt_I2_Na;
rDY_[25] = d_dt_IC_Na2;
rDY_[26] = d_dt_IC_Na3;
rDY_[27] = d_dt_Ki;
rDY_[28] = d_dt_ato_f;
rDY_[29] = d_dt_ito_f;
rDY_[30] = d_dt_ato_s;
rDY_[31] = d_dt_ito_s;
rDY_[32] = d_dt_nKs;
rDY_[33] = d_dt_aur;
rDY_[34] = d_dt_iur;
rDY_[35] = d_dt_aKss;
rDY_[36] = d_dt_iKss;
rDY_[37] = d_dt_C_K2;
rDY_[38] = d_dt_C_K1;
rDY_[39] = d_dt_O_K;
rDY_[40] = d_dt_I_K;
} |
rotth_omp45.c | /*
* rotth.c
*
* does rotation of complex array tvar through real angle thetb
*
*/
#ifdef _OPENMP
#include <omp.h>
#endif
#include <stdio.h>
#include "light.h"
#include "pf3dbench.h"
#include "util.h"
#include "runparm.h"
#include "pf3dbenchvars.h"
#define tvar3D(a,b,c) tvar[CELTNDX3(a,b,c)]
#define thetb3D(a,b,c) thetb[CELTNDX3(a,b,c)]
void rotth_mult_omp45(int nzl, rcomplex * restrict tvar, real * restrict thetb)
{
#pragma omp target teams num_teams(num_teams) map(to:thetb[0:ngtot]) map(tofrom:tvar[0:ngtot])
{
int iz;
#ifdef _OPENMP
#pragma omp distribute private(iz)
#endif
for (iz= 0; iz < nzl; iz++) {
rotth_omp45(tvar, thetb, iz);
}
}
}
void rotth_mult_omp45_pre(int nzl, rcomplex * restrict tvar, real * restrict thetb)
{
int ix, iy, iz;
#pragma omp target teams num_teams(num_teams)
{
#pragma omp distribute private(iz)
for (iz= 0; iz < nzl; iz++) {
#ifdef _OPENMP
#pragma omp parallel for COLLAPSE(2) private(ix, iy)
#endif
for (iy=0; iy<nyl; iy++) {
for (ix=0; ix<nxl; ix++) {
tvar3D(ix,iy,iz)= tvar3D(ix,iy,iz)*( COS(thetb3D(ix,iy,iz))+IREAL*SIN(thetb3D(ix,iy,iz)) );
}
}
}
}
/* No need to bring data back with:
#pragma omp target update from(tvar[0:ngtot])
That will happen when the unmap call is made.
*/
}
void rotth_premap(rcomplex * restrict tvar, real * restrict thetb)
{
#pragma omp target enter data map(to:thetb[0:ngtot],tvar[0:ngtot])
}
void rotth_unmap(rcomplex * restrict tvar, real * restrict thetb)
{
#pragma omp target exit data map(from:tvar[0:ngtot]) map(release:thetb[0:ngtot])
}
void rotth_omp45(rcomplex * restrict tvar, real * restrict thetb, int iz)
{
int ix, iy;
#ifdef _OPENMP
#pragma omp parallel for COLLAPSE(2) private(ix, iy)
#endif
for (iy=0; iy<nyl; iy++) {
for (ix=0; ix<nxl; ix++) {
tvar3D(ix,iy,iz)= tvar3D(ix,iy,iz)*( COS(thetb3D(ix,iy,iz))+IREAL*SIN(thetb3D(ix,iy,iz)) );
}
}
}
void rotth_omp45_pre3D(int nzl, rcomplex * restrict tvar, real * restrict thetb,
int izlo, int izhi)
{
/* #pragma omp target teams num_teams(num_teams) map(to:thetb[0:ngtot]) map(tofrom:tvar[0:ngtot]) */
#pragma omp target teams num_teams(num_teams) map(to:izlo,izhi)
{
int ix, iy, iz;
#pragma omp distribute parallel for COLLAPSE(3)
for (iz= izlo; iz <= izhi; iz++) {
for (iy=0; iy<nyl; iy++) {
for (ix=0; ix<nxl; ix++) {
tvar3D(ix,iy,iz)= tvar3D(ix,iy,iz)*( COS(thetb3D(ix,iy,iz))+IREAL*SIN(thetb3D(ix,iy,iz)) );
}
}
}
}
/* No need to bring data back with:
#pragma omp target update from(tvar[0:ngtot])
That will happen when the unmap call is made.
*/
}
|
stream_malloc.c | // Copyright 2009-2021 NTESS. Under the terms
// of Contract DE-NA0003525 with NTESS, the U.S.
// Government retains certain rights in this software.
//
// Copyright (c) 2009-2021, NTESS
// All rights reserved.
//
// Portions are copyright of other developers:
// See the file CONTRIBUTORS.TXT in the top level directory
// the distribution for more information.
//
// This file is part of the SST software package. For license
// information, see the LICENSE file in the top level directory of the
// distribution.
#include <stdio.h>
#include <stdlib.h>
#include "ariel.h"
int main(int argc, char* argv[]) {
const int LENGTH = 2000;
printf("Allocating arrays of size %d elements.\n", LENGTH);
ariel_malloc_flag(0, 1, 1);
double* a = (double*) malloc(sizeof(double) * LENGTH);
ariel_malloc_flag(1, 1, 1);
double* b = (double*) malloc(sizeof(double) * LENGTH);
ariel_malloc_flag(2, 1, 1);
double* c = (double*) malloc(sizeof(double) * LENGTH);
printf("Done allocating arrays.\n");
int i;
for(i = 0; i < LENGTH; ++i) {
a[i] = i;
b[i] = LENGTH - i;
c[i] = 0;
}
printf("Perfoming the fast_c compute loop...\n");
#pragma omp parallel for
for(i = 0; i < LENGTH; ++i) {
//printf("issuing a write to: %llu (fast_c)\n", ((unsigned long long int) &fast_c[i]));
c[i] = 2.0 * a[i] + 1.5 * b[i];
}
double sum = 0;
for(i = 0; i < LENGTH; ++i) {
sum += c[i];
}
printf("Sum of arrays is: %f\n", sum);
printf("Freeing arrays...\n");
free(a);
free(b);
free(c);
printf("Done.\n");
}
|
NetCDFMesh.h | /**
* @file
* This file is part of PUMGen
*
* For conditions of distribution and use, please see the copyright
* notice in the file 'COPYING' at the root directory of this package
* and the copyright notice at https://github.com/SeisSol/PUMGen
*
* @copyright 2017 Technical University of Munich
* @author Sebastian Rettenberger <sebastian.rettenberger@tum.de>
*/
#ifndef NETCDF_MESH_H
#define NETCDF_MESH_H
#include <mpi.h>
#include <netcdf.h>
#include <netcdf_par.h>
#include <PCU.h>
#include <apfConvert.h>
#include <apfMDS.h>
#include <apfMesh2.h>
#include <gmi_null.h>
#include "utils/logger.h"
#include "MeshInput.h"
#include "NetCDFPartition.h"
#include "ParallelVertexFilter.h"
/**
* Read PUMGen generated mesh files
*/
class NetCDFMesh : public MeshInput {
public:
NetCDFMesh(const char* meshFile, MPI_Comm comm = MPI_COMM_WORLD) {
int rank = 0;
int nProcs = 1;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &nProcs);
gmi_register_null();
gmi_model* model = gmi_load(".null");
m_mesh = apf::makeEmptyMdsMesh(model, 3, false);
int ncFile;
checkNcError(nc_open_par(meshFile, NC_MPIIO, comm, MPI_INFO_NULL, &ncFile));
// Get number of partitions
int ncDimPart;
checkNcError(nc_inq_dimid(ncFile, "partitions", &ncDimPart));
size_t nPartitions;
checkNcError(nc_inq_dimlen(ncFile, ncDimPart, &nPartitions));
// Local partitions
unsigned int nMaxLocalPart = (nPartitions + nProcs - 1) / nProcs;
unsigned int nLocalPart = nMaxLocalPart;
if (nPartitions < (rank + 1) * nMaxLocalPart)
nLocalPart = std::max(0, static_cast<int>(nPartitions - rank * nMaxLocalPart));
MPI_Comm commIO;
MPI_Comm_split(MPI_COMM_WORLD, (nLocalPart > 0 ? 0 : MPI_UNDEFINED), 0, &commIO);
// Reopen netCDF file with correct communicator
checkNcError(nc_close(ncFile));
if (nLocalPart > 0)
checkNcError(nc_open_par(meshFile, NC_MPIIO, commIO, MPI_INFO_NULL, &ncFile));
PCU_Switch_Comm(commIO);
unsigned int nElements = 0;
unsigned int nVertices = 0;
int* elements = 0L;
double* vertices = 0L;
int* boundaries = 0L;
int* groups = 0L;
if (nLocalPart > 0) {
// Create netCDF variables
int ncVarElemSize;
checkNcError(nc_inq_varid(ncFile, "element_size", &ncVarElemSize));
collectiveAccess(ncFile, ncVarElemSize);
int ncVarElemVertices;
checkNcError(nc_inq_varid(ncFile, "element_vertices", &ncVarElemVertices));
collectiveAccess(ncFile, ncVarElemVertices);
int ncVarElemBoundaries;
checkNcError(nc_inq_varid(ncFile, "element_boundaries", &ncVarElemBoundaries));
collectiveAccess(ncFile, ncVarElemBoundaries);
int ncVarElemGroup;
bool useGroups = true;
if (nc_inq_varid(ncFile, "element_group", &ncVarElemGroup) != NC_NOERR) {
useGroups = false;
logWarning() << "No group found, using group 0 for all elements";
} else {
collectiveAccess(ncFile, ncVarElemGroup);
}
int ncVarVrtxSize;
checkNcError(nc_inq_varid(ncFile, "vertex_size", &ncVarVrtxSize));
collectiveAccess(ncFile, ncVarVrtxSize);
int ncVarVrtxCoords;
checkNcError(nc_inq_varid(ncFile, "vertex_coordinates", &ncVarVrtxCoords));
collectiveAccess(ncFile, ncVarVrtxCoords);
Partition* partitions = new Partition[nLocalPart];
// Read elements
logInfo(rank) << "Reading netCDF file";
for (unsigned int i = 0; i < nMaxLocalPart; i++) {
unsigned int j = i % nLocalPart;
size_t start[3] = {j + rank * nMaxLocalPart, 0, 0};
// Element size
unsigned int size;
checkNcError(nc_get_var1_uint(ncFile, ncVarElemSize, start, &size));
partitions[j].setElemSize(size);
size_t count[3] = {1, size, 4};
// Elements
checkNcError(
nc_get_vara_int(ncFile, ncVarElemVertices, start, count, partitions[j].elements()));
// Boundaries and group
checkNcError(
nc_get_vara_int(ncFile, ncVarElemBoundaries, start, count, partitions[j].boundaries()));
if (useGroups)
checkNcError(
nc_get_vara_int(ncFile, ncVarElemGroup, start, count, partitions[j].groups()));
// Vertex size
checkNcError(nc_get_var1_uint(ncFile, ncVarVrtxSize, start, &size));
partitions[j].setVrtxSize(size);
// Vertices
count[1] = size;
count[2] = 3;
checkNcError(
nc_get_vara_double(ncFile, ncVarVrtxCoords, start, count, partitions[j].vertices()));
}
checkNcError(nc_close(ncFile));
for (unsigned int i = 0; i < nLocalPart; i++) {
nElements += partitions[i].nElements();
nVertices += partitions[i].nVertices();
}
// Copy to the buffer
unsigned int* elementsLocal = new unsigned int[nElements * 4];
elements = new int[nElements * 4];
vertices = new double[nVertices * 3];
boundaries = new int[nElements * 4];
groups = new int[nElements];
unsigned int elementOffset = 0;
unsigned int vertexOffset = 0;
for (unsigned int i = 0; i < nLocalPart; i++) {
#ifdef _OPENMP
#pragma omp parallel
#endif
for (unsigned int j = 0; j < partitions[i].nElements() * 4; j++)
elementsLocal[elementOffset * 4 + j] = partitions[i].elements()[j] + vertexOffset;
memcpy(&vertices[vertexOffset * 3], partitions[i].vertices(),
partitions[i].nVertices() * 3 * sizeof(double));
partitions[i].convertBoundary();
memcpy(&boundaries[elementOffset * 4], partitions[i].boundaries(),
partitions[i].nElements() * 4 * sizeof(int));
memcpy(&groups[elementOffset], partitions[i].groups(),
partitions[i].nElements() * sizeof(int));
elementOffset += partitions[i].nElements();
vertexOffset += partitions[i].nVertices();
}
logInfo(rank) << "Running vertex filter";
ParallelVertexFilter filter(commIO);
filter.filter(nVertices, vertices);
// Create filtered vertex list
delete[] vertices;
nVertices = filter.numLocalVertices();
vertices = new double[nVertices * 3];
memcpy(vertices, filter.localVertices(), nVertices * 3 * sizeof(double));
logInfo(rank) << "Converting local to global vertex identifier";
#ifdef _OPENMP
#pragma omp parallel
#endif
for (unsigned int i = 0; i < nElements * 4; i++)
elements[i] = filter.globalIds()[elementsLocal[i]];
delete[] partitions;
}
logInfo(rank) << "Constructing the mesh";
apf::GlobalToVert vertMap;
apf::construct(m_mesh, elements, nElements, apf::Mesh::TET, vertMap);
delete[] elements;
apf::alignMdsRemotes(m_mesh);
apf::deriveMdsModel(m_mesh);
logInfo(rank) << "Set coordinates in APF";
apf::setCoords(m_mesh, vertices, nVertices, vertMap);
delete[] vertices;
// Set boundaries
apf::MeshTag* boundaryTag = m_mesh->createIntTag("boundary condition", 1);
apf::MeshIterator* it = m_mesh->begin(3);
unsigned int i = 0;
while (apf::MeshEntity* element = m_mesh->iterate(it)) {
apf::Adjacent adjacent;
m_mesh->getAdjacent(element, 2, adjacent);
for (unsigned int j = 0; j < 4; j++) {
if (!boundaries[i * 4 + j])
continue;
m_mesh->setIntTag(adjacent[j], boundaryTag, &boundaries[i * 4 + j]);
}
i++;
}
m_mesh->end(it);
delete[] boundaries;
// Set groups
apf::MeshTag* groupTag = m_mesh->createIntTag("group", 1);
it = m_mesh->begin(3);
i = 0;
while (apf::MeshEntity* element = m_mesh->iterate(it)) {
m_mesh->setIntTag(element, groupTag, &groups[i]);
i++;
}
m_mesh->end(it);
delete[] groups;
PCU_Switch_Comm(MPI_COMM_WORLD);
}
private:
/**
* Switch to collective access for a netCDf variable
*/
static void collectiveAccess(int ncFile, int ncVar) {
checkNcError(nc_var_par_access(ncFile, ncVar, NC_COLLECTIVE));
}
static void checkNcError(int error) {
if (error != NC_NOERR)
logError() << "Error while reading netCDF file:" << nc_strerror(error);
}
};
#endif // NETCDF_MESH_H
|
TestQuantArray0.h | #ifndef __TESTQUANTARRAY0__
#define __TESTQUANTARRAY0__
#include "../Headers/Common.h"
#include "../Headers/Timer.h"
#include "../Headers/Range.h"
#include "../Headers/Ref.h"
#include "../Headers/Thread.h"
#include "../Headers/NDArrayQuantCPU.h"
#include "../Headers/NDArrayApproxCPU.h"
#include "../Headers/OpsQuant.h"
#include "../Tests/Utils.h"
int testMNIST(const string lutfile = "../Utils/LUT_HEAM.txt")
{
float S_input = 1.0 / 255.0;
int Z_input = 0;
const size_t NUMTHREADS = 20;
const string WEIGHTFOLDER = "../Weights/MNIST/";
NDArray::_loadLUT(lutfile);
unordered_map<string, float> map_S_weights;
unordered_map<string, int> map_Z_weights;
unordered_map<string, vector<Scalar>> map_Q_weights;
unordered_map<string, NDArray> map_weights;
unordered_map<string, float> map_S_biases;
unordered_map<string, int> map_Z_biases;
unordered_map<string, vector<int>> map_Q_biases;
unordered_map<string, float> map_S_activations;
unordered_map<string, int> map_Z_activations;
vector<string> namesLayers = {"Conv1", "Conv2", "Conv3", "FC1", "FC_Logits"};
vector<size_t> sizesWeights = {5*5*1*16, 5*5*16*32, 5*5*32*64, 1024*256, 256*10};
vector<size_t> sizesBiases = {16, 32, 64, 256, 10};
vector<vector<size_t>> shapesWeights = {{5*5*1, 16}, {5*5*16, 32}, {5*5*32, 64}, {1024, 256}, {256, 10}};
for(size_t idx = 0; idx < namesLayers.size(); idx++)
{
float S_weights;
int Z_weights;
vector<Scalar> Q_weights(sizesWeights[idx]);
ifstream fin_weights(WEIGHTFOLDER + namesLayers[idx] + "_weights.txt");
if(!fin_weights)
{
cerr << "ERROR: failed to open the file. " << endl;
exit(1);
}
fin_weights >> S_weights;
fin_weights >> Z_weights;
for(size_t jdx = 0; jdx < sizesWeights[idx]; jdx++)
{
int tmp;
fin_weights >> tmp;
Q_weights[jdx] = tmp;
}
fin_weights.close();
NDArray weights(S_weights, Z_weights, shapesWeights[idx], Q_weights);
map_S_weights[namesLayers[idx]] = S_weights;
map_Z_weights[namesLayers[idx]] = Z_weights;
map_Q_weights[namesLayers[idx]] = Q_weights;
map_weights[namesLayers[idx]] = weights;
}
for(size_t idx = 0; idx < namesLayers.size(); idx++)
{
float S_biases;
int Z_biases;
vector<int> Q_biases(sizesBiases[idx]);
ifstream fin_biases(WEIGHTFOLDER + namesLayers[idx] + "_biases.txt");
if(!fin_biases)
{
cerr << "ERROR: failed to open the file. " << endl;
exit(1);
}
fin_biases >> S_biases;
fin_biases >> Z_biases;
for(size_t jdx = 0; jdx < sizesBiases[idx]; jdx++)
{
int tmp;
fin_biases >> tmp;
Q_biases[jdx] = tmp;
}
fin_biases.close();
map_S_biases[namesLayers[idx]] = S_biases;
map_Z_biases[namesLayers[idx]] = Z_biases;
map_Q_biases[namesLayers[idx]] = Q_biases;
}
for(size_t idx = 0; idx < namesLayers.size(); idx++)
{
float S_activations;
int Z_activations;
ifstream fin_activations(WEIGHTFOLDER + namesLayers[idx] + "_activations.txt");
if(!fin_activations)
{
cerr << "ERROR: failed to open the file. " << endl;
exit(1);
}
fin_activations >> S_activations;
fin_activations >> Z_activations;
fin_activations.close();
map_S_activations[namesLayers[idx]] = S_activations;
map_Z_activations[namesLayers[idx]] = Z_activations;
}
vector<Ref<NDArray>> image_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
image_pool.push_back(new NDArray(S_input, Z_input, {28, 28, 1}));
}
vector<Ref<Var>> input_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
input_pool.push_back(new Var(image_pool[idx]));
}
cout << "Building Conv1: S = " << map_S_weights["Conv1"] << "; Z = " << map_Z_weights["Conv1"] << "; S_act = " << map_S_activations["Conv1"] << "; Z_act = " << map_Z_activations["Conv1"] << endl;
vector<Ref<Var>> weightsConv1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv1_pool.push_back(new Var(map_weights["Conv1"]));
}
vector<Ref<vector<int>>> biasesConv1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv1_pool.push_back(new vector<int>(map_Q_biases["Conv1"]));
}
vector<Ref<Var>> preconv1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
preconv1_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv1"], map_Z_activations["Conv1"], input_pool[idx], weightsConv1_pool[idx], biasesConv1_pool[idx], {5, 5}, {1, 1}, true));
}
vector<Ref<Var>> conv1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv1_pool.push_back(MaxPool<NDArray>(preconv1_pool[idx], {2, 2}, {2, 2}, true));
}
cout << "Building Conv2: S = " << map_S_weights["Conv2"] << "; Z = " << map_Z_weights["Conv2"] << "; S_act = " << map_S_activations["Conv2"] << "; Z_act = " << map_Z_activations["Conv2"] << endl;
vector<Ref<Var>> weightsConv2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv2_pool.push_back(new Var(map_weights["Conv2"]));
}
vector<Ref<vector<int>>> biasesConv2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv2_pool.push_back(new vector<int>(map_Q_biases["Conv2"]));
}
vector<Ref<Var>> preconv2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
preconv2_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv2"], map_Z_activations["Conv2"], conv1_pool[idx], weightsConv2_pool[idx], biasesConv2_pool[idx], {5, 5}, {1, 1}, true));
}
vector<Ref<Var>> conv2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv2_pool.push_back(MaxPool<NDArray>(preconv2_pool[idx], {2, 2}, {2, 2}, true));
}
cout << "Building Conv3: S = " << map_S_weights["Conv3"] << "; Z = " << map_Z_weights["Conv3"] << "; S_act = " << map_S_activations["Conv3"] << "; Z_act = " << map_Z_activations["Conv3"] << endl;
vector<Ref<Var>> weightsConv3_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv3_pool.push_back(new Var(map_weights["Conv3"]));
}
vector<Ref<vector<int>>> biasesConv3_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv3_pool.push_back(new vector<int>(map_Q_biases["Conv3"]));
}
vector<Ref<Var>> preconv3_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
preconv3_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv3"], map_Z_activations["Conv3"], conv2_pool[idx], weightsConv3_pool[idx], biasesConv3_pool[idx], {5, 5}, {1, 1}, true));
}
vector<Ref<Var>> conv3_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv3_pool.push_back(MaxPool<NDArray>(preconv3_pool[idx], {2, 2}, {2, 2}, true));
}
vector<Ref<Var>> flatten_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
flatten_pool.push_back(Flatten<NDArray>(conv3_pool[idx]));
}
cout << "Building FC1: S = " << map_S_weights["FC1"] << "; Z = " << map_Z_weights["FC1"] << "; S_act = " << map_S_activations["FC1"] << "; Z_act = " << map_Z_activations["FC1"] << endl;
vector<Ref<Var>> weightsFC1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsFC1_pool.push_back(new Var(map_weights["FC1"]));
}
vector<Ref<vector<int>>> biasesFC1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesFC1_pool.push_back(new vector<int>(map_Q_biases["FC1"]));
}
vector<Ref<Var>> fc1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
fc1_pool.push_back(MADReLU<NDArray>(map_S_activations["FC1"], map_Z_activations["FC1"], flatten_pool[idx], weightsFC1_pool[idx], biasesFC1_pool[idx]));
}
cout << "Building FC2: S = " << map_S_weights["FC_Logits"] << "; Z = " << map_Z_weights["FC_Logits"] << "; S_act = " << map_S_activations["FC_Logits"] << "; Z_act = " << map_Z_activations["FC_Logits"] << endl;
vector<Ref<Var>> weightsFC2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsFC2_pool.push_back(new Var(map_weights["FC_Logits"]));
}
vector<Ref<vector<int>>> biasesFC2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesFC2_pool.push_back(new vector<int>(map_Q_biases["FC_Logits"]));
}
vector<Ref<Var>> fc2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
fc2_pool.push_back(MAD<NDArray>(map_S_activations["FC_Logits"], map_Z_activations["FC_Logits"], fc1_pool[idx], weightsFC2_pool[idx], biasesFC2_pool[idx]));
}
vector<vector<float>> images = getImages();
vector<unsigned> labels = getLabels();
float count = 0;
size_t TestSize = 10000;
for(size_t idx = 0; idx < TestSize; idx+=NUMTHREADS)
{
for(size_t jdx = 0; jdx < NUMTHREADS; jdx++)
{
vector<Scalar> Q_image(784);
for(size_t kdx = 0; kdx < 784; kdx++)
{
float tmp = images[idx+jdx][kdx];
Q_image[kdx] = (Z_input + tmp / S_input > 0) ? round(Z_input + tmp / S_input) : 0;
}
image_pool[jdx]->set(Q_image);
}
#pragma omp parallel for num_threads(NUMTHREADS)
for(size_t jdx = 0; jdx < NUMTHREADS; jdx++)
{
fc2_pool[jdx]->evaluate(idx+1);
}
for(size_t jdx = 0; jdx < NUMTHREADS; jdx++)
{
size_t label = fc2_pool[jdx]->value().posmax();
if(label == labels[idx+jdx])
{
count++;
}
//fc2_pool[jdx]->value().eval().print();
cout << "Sample No." << (idx+jdx+1) << " " << "; Label: " << labels[idx+jdx] << ", Predicted: " << label << " -> " << ((label == labels[idx+jdx]) ? "Right" : "Wrong") << endl;
}
}
cout << "Accuray: " << (count / TestSize) << endl;
return 0;
}
int testFashionMNIST(const string &lutfile = "../Utils/LUT_HEAM.txt")
{
float S_input = 1.0 / 255.0;
int Z_input = 0;
const size_t NUMTHREADS = 20;
const string WEIGHTFOLDER = "../Weights/FashionMNIST/";
NDArray::_loadLUT(lutfile);
unordered_map<string, float> map_S_weights;
unordered_map<string, int> map_Z_weights;
unordered_map<string, vector<Scalar>> map_Q_weights;
unordered_map<string, NDArray> map_weights;
unordered_map<string, float> map_S_biases;
unordered_map<string, int> map_Z_biases;
unordered_map<string, vector<int>> map_Q_biases;
unordered_map<string, float> map_S_activations;
unordered_map<string, int> map_Z_activations;
vector<string> namesLayers = {"Conv1", "Conv2", "Conv3", "FC1", "FC_Logits"};
vector<size_t> sizesWeights = {5*5*1*32, 5*5*32*64, 5*5*64*128, 2048*512, 512*10};
vector<size_t> sizesBiases = {32, 64, 128, 512, 10};
vector<vector<size_t>> shapesWeights = {{5*5*1, 32}, {5*5*32, 64}, {5*5*64, 128}, {2048, 512}, {512, 10}};
for(size_t idx = 0; idx < namesLayers.size(); idx++)
{
float S_weights;
int Z_weights;
vector<Scalar> Q_weights(sizesWeights[idx]);
ifstream fin_weights(WEIGHTFOLDER + namesLayers[idx] + "_weights.txt");
if(!fin_weights)
{
cerr << "ERROR: failed to open the file. " << endl;
exit(1);
}
fin_weights >> S_weights;
fin_weights >> Z_weights;
for(size_t jdx = 0; jdx < sizesWeights[idx]; jdx++)
{
int tmp;
fin_weights >> tmp;
Q_weights[jdx] = tmp;
}
fin_weights.close();
NDArray weights(S_weights, Z_weights, shapesWeights[idx], Q_weights);
map_S_weights[namesLayers[idx]] = S_weights;
map_Z_weights[namesLayers[idx]] = Z_weights;
map_Q_weights[namesLayers[idx]] = Q_weights;
map_weights[namesLayers[idx]] = weights;
}
for(size_t idx = 0; idx < namesLayers.size(); idx++)
{
float S_biases;
int Z_biases;
vector<int> Q_biases(sizesBiases[idx]);
ifstream fin_biases(WEIGHTFOLDER + namesLayers[idx] + "_biases.txt");
if(!fin_biases)
{
cerr << "ERROR: failed to open the file. " << endl;
exit(1);
}
fin_biases >> S_biases;
fin_biases >> Z_biases;
for(size_t jdx = 0; jdx < sizesBiases[idx]; jdx++)
{
int tmp;
fin_biases >> tmp;
Q_biases[jdx] = tmp;
}
fin_biases.close();
map_S_biases[namesLayers[idx]] = S_biases;
map_Z_biases[namesLayers[idx]] = Z_biases;
map_Q_biases[namesLayers[idx]] = Q_biases;
}
for(size_t idx = 0; idx < namesLayers.size(); idx++)
{
float S_activations;
int Z_activations;
ifstream fin_activations(WEIGHTFOLDER + namesLayers[idx] + "_activations.txt");
if(!fin_activations)
{
cerr << "ERROR: failed to open the file. " << endl;
exit(1);
}
fin_activations >> S_activations;
fin_activations >> Z_activations;
fin_activations.close();
map_S_activations[namesLayers[idx]] = S_activations;
map_Z_activations[namesLayers[idx]] = Z_activations;
}
vector<Ref<NDArray>> image_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
image_pool.push_back(new NDArray(S_input, Z_input, {28, 28, 1}));
}
vector<Ref<Var>> input_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
input_pool.push_back(new Var(image_pool[idx]));
}
cout << "Building Conv1: S = " << map_S_weights["Conv1"] << "; Z = " << map_Z_weights["Conv1"] << "; S_act = " << map_S_activations["Conv1"] << "; Z_act = " << map_Z_activations["Conv1"] << endl;
vector<Ref<Var>> weightsConv1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv1_pool.push_back(new Var(map_weights["Conv1"]));
}
vector<Ref<vector<int>>> biasesConv1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv1_pool.push_back(new vector<int>(map_Q_biases["Conv1"]));
}
vector<Ref<Var>> preconv1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
preconv1_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv1"], map_Z_activations["Conv1"], input_pool[idx], weightsConv1_pool[idx], biasesConv1_pool[idx], {5, 5}, {1, 1}, true));
}
vector<Ref<Var>> conv1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv1_pool.push_back(MaxPool<NDArray>(preconv1_pool[idx], {2, 2}, {2, 2}, true));
}
cout << "Building Conv2: S = " << map_S_weights["Conv2"] << "; Z = " << map_Z_weights["Conv2"] << "; S_act = " << map_S_activations["Conv2"] << "; Z_act = " << map_Z_activations["Conv2"] << endl;
vector<Ref<Var>> weightsConv2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv2_pool.push_back(new Var(map_weights["Conv2"]));
}
vector<Ref<vector<int>>> biasesConv2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv2_pool.push_back(new vector<int>(map_Q_biases["Conv2"]));
}
vector<Ref<Var>> preconv2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
preconv2_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv2"], map_Z_activations["Conv2"], conv1_pool[idx], weightsConv2_pool[idx], biasesConv2_pool[idx], {5, 5}, {1, 1}, true));
}
vector<Ref<Var>> conv2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv2_pool.push_back(MaxPool<NDArray>(preconv2_pool[idx], {2, 2}, {2, 2}, true));
}
cout << "Building Conv3: S = " << map_S_weights["Conv3"] << "; Z = " << map_Z_weights["Conv3"] << "; S_act = " << map_S_activations["Conv3"] << "; Z_act = " << map_Z_activations["Conv3"] << endl;
vector<Ref<Var>> weightsConv3_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv3_pool.push_back(new Var(map_weights["Conv3"]));
}
vector<Ref<vector<int>>> biasesConv3_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv3_pool.push_back(new vector<int>(map_Q_biases["Conv3"]));
}
vector<Ref<Var>> preconv3_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
preconv3_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv3"], map_Z_activations["Conv3"], conv2_pool[idx], weightsConv3_pool[idx], biasesConv3_pool[idx], {5, 5}, {1, 1}, true));
}
vector<Ref<Var>> conv3_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv3_pool.push_back(MaxPool<NDArray>(preconv3_pool[idx], {2, 2}, {2, 2}, true));
}
vector<Ref<Var>> flatten_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
flatten_pool.push_back(Flatten<NDArray>(conv3_pool[idx]));
}
cout << "Building FC1: S = " << map_S_weights["FC1"] << "; Z = " << map_Z_weights["FC1"] << "; S_act = " << map_S_activations["FC1"] << "; Z_act = " << map_Z_activations["FC1"] << endl;
vector<Ref<Var>> weightsFC1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsFC1_pool.push_back(new Var(map_weights["FC1"]));
}
vector<Ref<vector<int>>> biasesFC1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesFC1_pool.push_back(new vector<int>(map_Q_biases["FC1"]));
}
vector<Ref<Var>> fc1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
fc1_pool.push_back(MADReLU<NDArray>(map_S_activations["FC1"], map_Z_activations["FC1"], flatten_pool[idx], weightsFC1_pool[idx], biasesFC1_pool[idx]));
}
cout << "Building FC2: S = " << map_S_weights["FC_Logits"] << "; Z = " << map_Z_weights["FC_Logits"] << "; S_act = " << map_S_activations["FC_Logits"] << "; Z_act = " << map_Z_activations["FC_Logits"] << endl;
vector<Ref<Var>> weightsFC2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsFC2_pool.push_back(new Var(map_weights["FC_Logits"]));
}
vector<Ref<vector<int>>> biasesFC2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesFC2_pool.push_back(new vector<int>(map_Q_biases["FC_Logits"]));
}
vector<Ref<Var>> fc2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
fc2_pool.push_back(MAD<NDArray>(map_S_activations["FC_Logits"], map_Z_activations["FC_Logits"], fc1_pool[idx], weightsFC2_pool[idx], biasesFC2_pool[idx]));
}
vector<vector<float>> images = getImagesFashion();
vector<unsigned> labels = getLabelsFashion();
float count = 0;
size_t TestSize = 10000;
for(size_t idx = 0; idx < TestSize; idx+=NUMTHREADS)
{
for(size_t jdx = 0; jdx < NUMTHREADS; jdx++)
{
vector<Scalar> Q_image(784);
for(size_t kdx = 0; kdx < 784; kdx++)
{
float tmp = images[idx+jdx][kdx];
Q_image[kdx] = (Z_input + tmp / S_input > 0) ? round(Z_input + tmp / S_input) : 0;
}
image_pool[jdx]->set(Q_image);
}
#pragma omp parallel for num_threads(NUMTHREADS)
for(size_t jdx = 0; jdx < NUMTHREADS; jdx++)
{
fc2_pool[jdx]->evaluate(idx+1);
}
for(size_t jdx = 0; jdx < NUMTHREADS; jdx++)
{
size_t label = fc2_pool[jdx]->value().posmax();
if(label == labels[idx+jdx])
{
count++;
}
//fc2_pool[jdx]->value().eval().print();
cout << "Sample No." << (idx+jdx+1) << " " << "; Label: " << labels[idx+jdx] << ", Predicted: " << label << " -> " << ((label == labels[idx+jdx]) ? "Right" : "Wrong") << endl;
}
}
cout << "Accuray: " << (count / TestSize) << endl;
return 0;
}
int testCIFAR10(const string &lutfile = "../Utils/LUT_HEAM.txt")
{
float S_input = 1.0 / 255.0;
int Z_input = 0;
const size_t NUMTHREADS = 20;
const string WEIGHTFOLDER = "../Weights/CIFAR10/";
NDArray::_loadLUT(lutfile);
unordered_map<string, float> map_S_weights;
unordered_map<string, int> map_Z_weights;
unordered_map<string, vector<Scalar>> map_Q_weights;
unordered_map<string, NDArray> map_weights;
unordered_map<string, float> map_S_biases;
unordered_map<string, int> map_Z_biases;
unordered_map<string, vector<int>> map_Q_biases;
unordered_map<string, float> map_S_activations;
unordered_map<string, int> map_Z_activations;
vector<string> namesLayers = {"Conv1", "Conv2", "Conv3", "FC1", "FC_Logits"};
vector<size_t> sizesWeights = {5*5*3*32, 5*5*32*64, 5*5*64*128, 2048*512, 512*10};
vector<size_t> sizesBiases = {32, 64, 128, 512, 10};
vector<vector<size_t>> shapesWeights = {{5*5*3, 32}, {5*5*32, 64}, {5*5*64, 128}, {2048, 512}, {512, 10}};
for(size_t idx = 0; idx < namesLayers.size(); idx++)
{
float S_weights;
int Z_weights;
vector<Scalar> Q_weights(sizesWeights[idx]);
ifstream fin_weights(WEIGHTFOLDER + namesLayers[idx] + "_weights.txt");
if(!fin_weights)
{
cerr << "ERROR: failed to open the file. " << endl;
exit(1);
}
fin_weights >> S_weights;
fin_weights >> Z_weights;
for(size_t jdx = 0; jdx < sizesWeights[idx]; jdx++)
{
int tmp;
fin_weights >> tmp;
Q_weights[jdx] = tmp;
}
fin_weights.close();
NDArray weights(S_weights, Z_weights, shapesWeights[idx], Q_weights);
map_S_weights[namesLayers[idx]] = S_weights;
map_Z_weights[namesLayers[idx]] = Z_weights;
map_Q_weights[namesLayers[idx]] = Q_weights;
map_weights[namesLayers[idx]] = weights;
}
for(size_t idx = 0; idx < namesLayers.size(); idx++)
{
float S_biases;
int Z_biases;
vector<int> Q_biases(sizesBiases[idx]);
ifstream fin_biases(WEIGHTFOLDER + namesLayers[idx] + "_biases.txt");
if(!fin_biases)
{
cerr << "ERROR: failed to open the file. " << endl;
exit(1);
}
fin_biases >> S_biases;
fin_biases >> Z_biases;
for(size_t jdx = 0; jdx < sizesBiases[idx]; jdx++)
{
int tmp;
fin_biases >> tmp;
Q_biases[jdx] = tmp;
}
fin_biases.close();
map_S_biases[namesLayers[idx]] = S_biases;
map_Z_biases[namesLayers[idx]] = Z_biases;
map_Q_biases[namesLayers[idx]] = Q_biases;
}
for(size_t idx = 0; idx < namesLayers.size(); idx++)
{
float S_activations;
int Z_activations;
ifstream fin_activations(WEIGHTFOLDER + namesLayers[idx] + "_activations.txt");
if(!fin_activations)
{
cerr << "ERROR: failed to open the file. " << endl;
exit(1);
}
fin_activations >> S_activations;
fin_activations >> Z_activations;
fin_activations.close();
map_S_activations[namesLayers[idx]] = S_activations;
map_Z_activations[namesLayers[idx]] = Z_activations;
}
vector<Ref<NDArray>> image_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
image_pool.push_back(new NDArray(S_input, Z_input, {28, 28, 3}));
}
vector<Ref<Var>> input_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
input_pool.push_back(new Var(image_pool[idx]));
}
cout << "Building Conv1: S = " << map_S_weights["Conv1"] << "; Z = " << map_Z_weights["Conv1"] << "; S_act = " << map_S_activations["Conv1"] << "; Z_act = " << map_Z_activations["Conv1"] << endl;
vector<Ref<Var>> weightsConv1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv1_pool.push_back(new Var(map_weights["Conv1"]));
}
vector<Ref<vector<int>>> biasesConv1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv1_pool.push_back(new vector<int>(map_Q_biases["Conv1"]));
}
vector<Ref<Var>> preconv1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
preconv1_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv1"], map_Z_activations["Conv1"], input_pool[idx], weightsConv1_pool[idx], biasesConv1_pool[idx], {5, 5}, {1, 1}, true));
}
vector<Ref<Var>> conv1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv1_pool.push_back(MaxPool<NDArray>(preconv1_pool[idx], {2, 2}, {2, 2}, true));
}
cout << "Building Conv2: S = " << map_S_weights["Conv2"] << "; Z = " << map_Z_weights["Conv2"] << "; S_act = " << map_S_activations["Conv2"] << "; Z_act = " << map_Z_activations["Conv2"] << endl;
vector<Ref<Var>> weightsConv2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv2_pool.push_back(new Var(map_weights["Conv2"]));
}
vector<Ref<vector<int>>> biasesConv2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv2_pool.push_back(new vector<int>(map_Q_biases["Conv2"]));
}
vector<Ref<Var>> preconv2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
preconv2_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv2"], map_Z_activations["Conv2"], conv1_pool[idx], weightsConv2_pool[idx], biasesConv2_pool[idx], {5, 5}, {1, 1}, true));
}
vector<Ref<Var>> conv2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv2_pool.push_back(MaxPool<NDArray>(preconv2_pool[idx], {2, 2}, {2, 2}, true));
}
cout << "Building Conv3: S = " << map_S_weights["Conv3"] << "; Z = " << map_Z_weights["Conv3"] << "; S_act = " << map_S_activations["Conv3"] << "; Z_act = " << map_Z_activations["Conv3"] << endl;
vector<Ref<Var>> weightsConv3_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv3_pool.push_back(new Var(map_weights["Conv3"]));
}
vector<Ref<vector<int>>> biasesConv3_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv3_pool.push_back(new vector<int>(map_Q_biases["Conv3"]));
}
vector<Ref<Var>> preconv3_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
preconv3_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv3"], map_Z_activations["Conv3"], conv2_pool[idx], weightsConv3_pool[idx], biasesConv3_pool[idx], {5, 5}, {1, 1}, true));
}
vector<Ref<Var>> conv3_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv3_pool.push_back(MaxPool<NDArray>(preconv3_pool[idx], {2, 2}, {2, 2}, true));
}
vector<Ref<Var>> flatten_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
flatten_pool.push_back(Flatten<NDArray>(conv3_pool[idx]));
}
cout << "Building FC1: S = " << map_S_weights["FC1"] << "; Z = " << map_Z_weights["FC1"] << "; S_act = " << map_S_activations["FC1"] << "; Z_act = " << map_Z_activations["FC1"] << endl;
vector<Ref<Var>> weightsFC1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsFC1_pool.push_back(new Var(map_weights["FC1"]));
}
vector<Ref<vector<int>>> biasesFC1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesFC1_pool.push_back(new vector<int>(map_Q_biases["FC1"]));
}
vector<Ref<Var>> fc1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
fc1_pool.push_back(MADReLU<NDArray>(map_S_activations["FC1"], map_Z_activations["FC1"], flatten_pool[idx], weightsFC1_pool[idx], biasesFC1_pool[idx]));
}
Ref<Var> weightsConv1 = new Var(map_weights["Conv1"]);
Ref<vector<int>> biasesConv1 = new vector<int>(map_Q_biases["Conv1"]);
cout << "Weight Conv1: " << endl;
weightsConv1->value().print();
weightsConv1->value().eval().print();
NDArrayFloatCPU<float> _weightsConv1 = weightsConv1->value().eval();
cout << "Bias Conv1: " << endl;
print(*biasesConv1);
NDArrayFloatCPU<float> _biasesConv1(sizesBiases[0]);
for(size_t idx = 0; idx < _biasesConv1.size(); idx++)
{
_biasesConv1[idx] = map_S_biases["Conv1"] * (*biasesConv1)[idx];
}
Ref<Var> weightsConv2 = new Var(map_weights["Conv2"]);
Ref<vector<int>> biasesConv2 = new vector<int>(map_Q_biases["Conv2"]);
cout << "Weight Conv2: " << endl;
weightsConv2->value().print();
weightsConv2->value().eval().print();
NDArrayFloatCPU<float> _weightsConv2 = weightsConv2->value().eval();
cout << "Bias Conv2: " << endl;
print(*biasesConv2);
NDArrayFloatCPU<float> _biasesConv2(sizesBiases[1]);
for(size_t idx = 0; idx < _biasesConv2.size(); idx++)
{
_biasesConv2[idx] = map_S_biases["Conv2"] * (*biasesConv2)[idx];
}
Ref<Var> weightsConv3 = new Var(map_weights["Conv3"]);
Ref<vector<int>> biasesConv3 = new vector<int>(map_Q_biases["Conv3"]);
cout << "Weight Conv3: " << endl;
weightsConv3->value().print();
weightsConv3->value().eval().print();
NDArrayFloatCPU<float> _weightsConv3 = weightsConv3->value().eval();
cout << "Bias Conv3: " << endl;
print(*biasesConv3);
NDArrayFloatCPU<float> _biasesConv3(sizesBiases[2]);
for(size_t idx = 0; idx < _biasesConv3.size(); idx++)
{
_biasesConv3[idx] = map_S_biases["Conv3"] * (*biasesConv3)[idx];
}
Ref<Var> weightsFC1 = new Var(map_weights["FC1"]);
Ref<vector<int>> biasesFC1 = new vector<int>(map_Q_biases["FC1"]);
cout << "Weight FC1: " << endl;
weightsFC1->value().print();
weightsFC1->value().eval().print();
NDArrayFloatCPU<float> _weightsFC1 = weightsFC1->value().eval();
cout << "Bias FC1: " << endl;
print(*biasesFC1);
NDArrayFloatCPU<float> _biasesFC1(sizesBiases[3]);
for(size_t idx = 0; idx < _biasesFC1.size(); idx++)
{
_biasesFC1[idx] = map_S_biases["FC1"] * (*biasesFC1)[idx];
}
Ref<Var> weightsFC2 = new Var(map_weights["FC_Logits"]);
Ref<vector<int>> biasesFC2 = new vector<int>(map_Q_biases["FC_Logits"]);
cout << "Weight FC2: " << endl;
weightsFC2->value().print();
weightsFC2->value().eval().print();
NDArrayFloatCPU<float> _weightsFC2 = weightsFC2->value().eval();
cout << "Bias FC2: " << endl;
print(*biasesFC2);
NDArrayFloatCPU<float> _biasesFC2(sizesBiases[4]);
for(size_t idx = 0; idx < _biasesFC2.size(); idx++)
{
_biasesFC2[idx] = map_S_biases["FC_Logits"] * (*biasesFC2)[idx];
}
cout << "Building FC2: S = " << map_S_weights["FC_Logits"] << "; Z = " << map_Z_weights["FC_Logits"] << "; S_act = " << map_S_activations["FC_Logits"] << "; Z_act = " << map_Z_activations["FC_Logits"] << endl;
vector<Ref<Var>> weightsFC2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsFC2_pool.push_back(new Var(map_weights["FC_Logits"]));
}
vector<Ref<vector<int>>> biasesFC2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesFC2_pool.push_back(new vector<int>(map_Q_biases["FC_Logits"]));
}
vector<Ref<Var>> fc2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
fc2_pool.push_back(MAD<NDArray>(map_S_activations["FC_Logits"], map_Z_activations["FC_Logits"], fc1_pool[idx], weightsFC2_pool[idx], biasesFC2_pool[idx]));
}
vector<vector<float>> images = getImagesCIFAR10();
vector<unsigned> labels = getLabelsCIFAR10();
float count = 0, _count = 0;
size_t TestSize = 10000;
for(size_t idx = 0; idx < TestSize; idx+=NUMTHREADS)
{
for(size_t jdx = 0; jdx < NUMTHREADS; jdx++)
{
vector<Scalar> Q_image(784*3);
for(size_t kdx = 0; kdx < 784*3; kdx++)
{
Q_image[kdx] = images[idx+jdx][kdx];
}
image_pool[jdx]->set(Q_image);
}
int _labels[NUMTHREADS] = {0, };
#pragma omp parallel for num_threads(NUMTHREADS)
for(size_t jdx = 0; jdx < NUMTHREADS; jdx++)
{
fc2_pool[jdx]->evaluate(idx+1);
NDArrayFloatCPU<float> _image({28, 28, 3}, images[idx+jdx]);
_image = _image * (1.0 / 255.0);
NDArrayFloatCPU<float> _conv1 = (_image.im2col({5, 5}, {1, 1}, true) * _weightsConv1).reshape({28, 28, 32}).addChWise(_biasesConv1).ReLU().maxPool({2, 2}, {2, 2}, true);
// std::cout << _conv1.shape()[0] << ", " << _conv1.shape()[1] << ", " << _conv1.shape()[2] << std::endl;
NDArrayFloatCPU<float> _conv2 = (_conv1.im2col({5, 5}, {1, 1}, true) * _weightsConv2).reshape({14, 14, 64}).addChWise(_biasesConv2).ReLU().maxPool({2, 2}, {2, 2}, true);
// std::cout << _conv2.shape()[0] << ", " << _conv2.shape()[1] << ", " << _conv2.shape()[2] << std::endl;
NDArrayFloatCPU<float> _conv3 = (_conv2.im2col({5, 5}, {1, 1}, true) * _weightsConv3).reshape({7, 7, 128}).addChWise(_biasesConv3).ReLU().maxPool({2, 2}, {2, 2}, true);
// std::cout << _conv3.shape()[0] << ", " << _conv3.shape()[1] << ", " << _conv3.shape()[2] << std::endl;
NDArrayFloatCPU<float> _fc1 = (_conv3.reshape({1, 2048}) * _weightsFC1 +_biasesFC1.reshape({1, 512})).ReLU();
NDArrayFloatCPU<float> _fc2 = _fc1 * _weightsFC2 +_biasesFC2.reshape({1, 10});
_labels[jdx] = _fc2.posmax();
}
for(size_t jdx = 0; jdx < NUMTHREADS; jdx++)
{
size_t label = fc2_pool[jdx]->value().posmax();
size_t _label = _labels[jdx];
if(label == labels[idx+jdx])
{
count++;
}
if(_label == labels[idx+jdx])
{
_count++;
}
//fc2_pool[jdx]->value().eval().print();
cout << "Sample No." << (idx+jdx+1) << " " << "; Label: " << labels[idx+jdx] << ", Predicted: " << label << " / " << _label << " -> " << ((label == labels[idx+jdx]) ? "Right" : "Wrong") << " / " << ((_label == labels[idx+jdx]) ? "Right" : "Wrong") << "; Accuracy: " << (count / (idx+jdx+1)) << " / " << (_count / (idx+jdx+1)) << endl;
}
}
cout << "Accuray: " << (count / TestSize) << endl;
cout << "_Accuray: " << (_count / TestSize) << endl;
return 0;
}
int testCIFAR10AlexNet(const string &lutfile = "../Utils/LUT_HEAM.txt")
{
float S_input = 1.0 / 255.0;
int Z_input = 0;
const size_t NUMTHREADS = 20;
const string WEIGHTFOLDER = "../Weights/CIFAR10/";
NDArray::_loadLUT(lutfile);
unordered_map<string, float> map_S_weights;
unordered_map<string, int> map_Z_weights;
unordered_map<string, vector<Scalar>> map_Q_weights;
unordered_map<string, NDArray> map_weights;
unordered_map<string, float> map_S_biases;
unordered_map<string, int> map_Z_biases;
unordered_map<string, vector<int>> map_Q_biases;
unordered_map<string, float> map_S_activations;
unordered_map<string, int> map_Z_activations;
vector<string> namesLayers = {"Conv1a", "Conv1b", "Conv2a", "Conv2b", "Conv3a", "Conv3b", "Conv4a", "Conv4b", "FC1", "FC2", "FC_Logits"};
vector<size_t> sizesWeights = {3*3*3*96, 3*3*96*96, 3*3*96*256, 3*3*256*256, 3*3*256*384, 3*3*384*384, 3*3*384*256, 3*3*256*256, 4096*4096, 4096*4096, 4096*10};
vector<size_t> sizesBiases = {96, 96, 256, 256, 384, 384, 256, 256, 4096, 4096, 10};
vector<vector<size_t>> shapesWeights = {{3*3*3, 96}, {3*3*96, 96}, {3*3*96, 256}, {3*3*256, 256}, {3*3*256, 384}, {3*3*384, 384}, {3*3*384, 256}, {3*3*256, 256}, {4096, 4096}, {4096, 4096}, {4096, 10}};
for(size_t idx = 0; idx < namesLayers.size(); idx++)
{
float S_weights;
int Z_weights;
vector<Scalar> Q_weights(sizesWeights[idx]);
ifstream fin_weights(WEIGHTFOLDER + namesLayers[idx] + "_weights.txt");
if(!fin_weights)
{
cerr << "ERROR: failed to open the file. " << endl;
exit(1);
}
fin_weights >> S_weights;
fin_weights >> Z_weights;
for(size_t jdx = 0; jdx < sizesWeights[idx]; jdx++)
{
int tmp;
fin_weights >> tmp;
Q_weights[jdx] = tmp;
}
fin_weights.close();
NDArray weights(S_weights, Z_weights, shapesWeights[idx], Q_weights);
map_S_weights[namesLayers[idx]] = S_weights;
map_Z_weights[namesLayers[idx]] = Z_weights;
map_Q_weights[namesLayers[idx]] = Q_weights;
map_weights[namesLayers[idx]] = weights;
}
for(size_t idx = 0; idx < namesLayers.size(); idx++)
{
float S_biases;
int Z_biases;
vector<int> Q_biases(sizesBiases[idx]);
ifstream fin_biases(WEIGHTFOLDER + namesLayers[idx] + "_biases.txt");
if(!fin_biases)
{
cerr << "ERROR: failed to open the file. " << endl;
exit(1);
}
fin_biases >> S_biases;
fin_biases >> Z_biases;
for(size_t jdx = 0; jdx < sizesBiases[idx]; jdx++)
{
int tmp;
fin_biases >> tmp;
Q_biases[jdx] = tmp;
}
fin_biases.close();
map_S_biases[namesLayers[idx]] = S_biases;
map_Z_biases[namesLayers[idx]] = Z_biases;
map_Q_biases[namesLayers[idx]] = Q_biases;
}
for(size_t idx = 0; idx < namesLayers.size(); idx++)
{
float S_activations;
int Z_activations;
ifstream fin_activations(WEIGHTFOLDER + namesLayers[idx] + "_activations.txt");
if(!fin_activations)
{
cerr << "ERROR: failed to open the file. " << endl;
exit(1);
}
fin_activations >> S_activations;
fin_activations >> Z_activations;
fin_activations.close();
map_S_activations[namesLayers[idx]] = S_activations;
map_Z_activations[namesLayers[idx]] = Z_activations;
}
vector<Ref<NDArray>> image_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
image_pool.push_back(new NDArray(S_input, Z_input, {28, 28, 3}));
}
vector<Ref<Var>> input_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
input_pool.push_back(new Var(image_pool[idx]));
}
assert((namesLayers.size() == sizesWeights.size()) && (namesLayers.size() == sizesBiases.size()) && (namesLayers.size() == shapesWeights.size()));
cout << "Building Conv1a: S = " << map_S_weights["Conv1a"] << "; Z = " << map_Z_weights["Conv1a"] << "; S_act = " << map_S_activations["Conv1a"] << "; Z_act = " << map_Z_activations["Conv1a"] << endl;
vector<Ref<Var>> weightsConv1a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv1a_pool.push_back(new Var(map_weights["Conv1a"]));
}
vector<Ref<vector<int>>> biasesConv1a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv1a_pool.push_back(new vector<int>(map_Q_biases["Conv1a"]));
}
vector<Ref<Var>> conv1a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv1a_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv1a"], map_Z_activations["Conv1a"], input_pool[idx], weightsConv1a_pool[idx], biasesConv1a_pool[idx], {3, 3}, {1, 1}, true));
}
cout << "Building Conv1b: S = " << map_S_weights["Conv1b"] << "; Z = " << map_Z_weights["Conv1b"] << "; S_act = " << map_S_activations["Conv1b"] << "; Z_act = " << map_Z_activations["Conv1b"] << endl;
vector<Ref<Var>> weightsConv1b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv1b_pool.push_back(new Var(map_weights["Conv1b"]));
}
vector<Ref<vector<int>>> biasesConv1b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv1b_pool.push_back(new vector<int>(map_Q_biases["Conv1b"]));
}
vector<Ref<Var>> conv1b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv1b_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv1b"], map_Z_activations["Conv1b"], conv1a_pool[idx], weightsConv1b_pool[idx], biasesConv1b_pool[idx], {3, 3}, {1, 1}, true));
}
cout << "Building Conv2a: S = " << map_S_weights["Conv2a"] << "; Z = " << map_Z_weights["Conv2a"] << "; S_act = " << map_S_activations["Conv2a"] << "; Z_act = " << map_Z_activations["Conv2a"] << endl;
vector<Ref<Var>> weightsConv2a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv2a_pool.push_back(new Var(map_weights["Conv2a"]));
}
vector<Ref<vector<int>>> biasesConv2a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv2a_pool.push_back(new vector<int>(map_Q_biases["Conv2a"]));
}
vector<Ref<Var>> conv2a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv2a_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv2a"], map_Z_activations["Conv2a"], conv1b_pool[idx], weightsConv2a_pool[idx], biasesConv2a_pool[idx], {3, 3}, {1, 1}, true));
}
cout << "Building Conv2b: S = " << map_S_weights["Conv2b"] << "; Z = " << map_Z_weights["Conv2b"] << "; S_act = " << map_S_activations["Conv2b"] << "; Z_act = " << map_Z_activations["Conv2b"] << endl;
vector<Ref<Var>> weightsConv2b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv2b_pool.push_back(new Var(map_weights["Conv2b"]));
}
vector<Ref<vector<int>>> biasesConv2b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv2b_pool.push_back(new vector<int>(map_Q_biases["Conv2b"]));
}
vector<Ref<Var>> preconv2b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
preconv2b_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv2b"], map_Z_activations["Conv2b"], conv2a_pool[idx], weightsConv2b_pool[idx], biasesConv2b_pool[idx], {3, 3}, {1, 1}, true));
}
vector<Ref<Var>> conv2b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv2b_pool.push_back(MaxPool<NDArray>(preconv2b_pool[idx], {2, 2}, {2, 2}, true));
}
cout << "Building Conv3a: S = " << map_S_weights["Conv3a"] << "; Z = " << map_Z_weights["Conv3a"] << "; S_act = " << map_S_activations["Conv3a"] << "; Z_act = " << map_Z_activations["Conv3a"] << endl;
vector<Ref<Var>> weightsConv3a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv3a_pool.push_back(new Var(map_weights["Conv3a"]));
}
vector<Ref<vector<int>>> biasesConv3a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv3a_pool.push_back(new vector<int>(map_Q_biases["Conv3a"]));
}
vector<Ref<Var>> conv3a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv3a_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv3a"], map_Z_activations["Conv3a"], conv2b_pool[idx], weightsConv3a_pool[idx], biasesConv3a_pool[idx], {3, 3}, {1, 1}, true));
}
cout << "Building Conv3b: S = " << map_S_weights["Conv3b"] << "; Z = " << map_Z_weights["Conv3b"] << "; S_act = " << map_S_activations["Conv3b"] << "; Z_act = " << map_Z_activations["Conv3b"] << endl;
vector<Ref<Var>> weightsConv3b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv3b_pool.push_back(new Var(map_weights["Conv3b"]));
}
vector<Ref<vector<int>>> biasesConv3b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv3b_pool.push_back(new vector<int>(map_Q_biases["Conv3b"]));
}
vector<Ref<Var>> preconv3b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
preconv3b_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv3b"], map_Z_activations["Conv3b"], conv3a_pool[idx], weightsConv3b_pool[idx], biasesConv3b_pool[idx], {3, 3}, {1, 1}, true));
}
vector<Ref<Var>> conv3b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv3b_pool.push_back(MaxPool<NDArray>(preconv3b_pool[idx], {2, 2}, {2, 2}, true));
}
cout << "Building Conv4a: S = " << map_S_weights["Conv4a"] << "; Z = " << map_Z_weights["Conv4a"] << "; S_act = " << map_S_activations["Conv4a"] << "; Z_act = " << map_Z_activations["Conv4a"] << endl;
vector<Ref<Var>> weightsConv4a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv4a_pool.push_back(new Var(map_weights["Conv4a"]));
}
vector<Ref<vector<int>>> biasesConv4a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv4a_pool.push_back(new vector<int>(map_Q_biases["Conv4a"]));
}
vector<Ref<Var>> conv4a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv4a_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv4a"], map_Z_activations["Conv4a"], conv3b_pool[idx], weightsConv4a_pool[idx], biasesConv4a_pool[idx], {3, 3}, {1, 1}, true));
}
cout << "Building Conv4b: S = " << map_S_weights["Conv4b"] << "; Z = " << map_Z_weights["Conv4b"] << "; S_act = " << map_S_activations["Conv4b"] << "; Z_act = " << map_Z_activations["Conv4b"] << endl;
vector<Ref<Var>> weightsConv4b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv4b_pool.push_back(new Var(map_weights["Conv4b"]));
}
vector<Ref<vector<int>>> biasesConv4b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv4b_pool.push_back(new vector<int>(map_Q_biases["Conv4b"]));
}
vector<Ref<Var>> preconv4b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
preconv4b_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv4b"], map_Z_activations["Conv4b"], conv4a_pool[idx], weightsConv4b_pool[idx], biasesConv4b_pool[idx], {3, 3}, {1, 1}, true));
}
vector<Ref<Var>> conv4b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv4b_pool.push_back(MaxPool<NDArray>(preconv4b_pool[idx], {2, 2}, {2, 2}, true));
}
vector<Ref<Var>> flatten_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
flatten_pool.push_back(Flatten<NDArray>(conv4b_pool[idx]));
}
cout << "Building FC1: S = " << map_S_weights["FC1"] << "; Z = " << map_Z_weights["FC1"] << "; S_act = " << map_S_activations["FC1"] << "; Z_act = " << map_Z_activations["FC1"] << endl;
vector<Ref<Var>> weightsFC1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsFC1_pool.push_back(new Var(map_weights["FC1"]));
}
vector<Ref<vector<int>>> biasesFC1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesFC1_pool.push_back(new vector<int>(map_Q_biases["FC1"]));
}
vector<Ref<Var>> fc1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
fc1_pool.push_back(MADReLU<NDArray>(map_S_activations["FC1"], map_Z_activations["FC1"], flatten_pool[idx], weightsFC1_pool[idx], biasesFC1_pool[idx]));
}
cout << "Building FC2: S = " << map_S_weights["FC2"] << "; Z = " << map_Z_weights["FC2"] << "; S_act = " << map_S_activations["FC2"] << "; Z_act = " << map_Z_activations["FC2"] << endl;
vector<Ref<Var>> weightsFC2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsFC2_pool.push_back(new Var(map_weights["FC2"]));
}
vector<Ref<vector<int>>> biasesFC2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesFC2_pool.push_back(new vector<int>(map_Q_biases["FC2"]));
}
vector<Ref<Var>> fc2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
fc2_pool.push_back(MADReLU<NDArray>(map_S_activations["FC2"], map_Z_activations["FC2"], fc1_pool[idx], weightsFC2_pool[idx], biasesFC2_pool[idx]));
}
cout << "Building FC_Logits: S = " << map_S_weights["FC_Logits"] << "; Z = " << map_Z_weights["FC_Logits"] << "; S_act = " << map_S_activations["FC_Logits"] << "; Z_act = " << map_Z_activations["FC_Logits"] << endl;
vector<Ref<Var>> weightsFC3_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsFC3_pool.push_back(new Var(map_weights["FC_Logits"]));
}
vector<Ref<vector<int>>> biasesFC3_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesFC3_pool.push_back(new vector<int>(map_Q_biases["FC_Logits"]));
}
vector<Ref<Var>> fc3_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
fc3_pool.push_back(MAD<NDArray>(map_S_activations["FC_Logits"], map_Z_activations["FC_Logits"], fc2_pool[idx], weightsFC3_pool[idx], biasesFC3_pool[idx]));
}
vector<vector<float>> images = getImagesCIFAR10();
vector<unsigned> labels = getLabelsCIFAR10();
float count = 0;
size_t TestSize = 10000;
for(size_t idx = 0; idx < TestSize; idx+=NUMTHREADS)
{
for(size_t jdx = 0; jdx < NUMTHREADS; jdx++)
{
vector<Scalar> Q_image(784*3);
for(size_t kdx = 0; kdx < 784*3; kdx++)
{
Q_image[kdx] = images[idx+jdx][kdx];
}
image_pool[jdx]->set(Q_image);
}
#pragma omp parallel for num_threads(NUMTHREADS)
for(size_t jdx = 0; jdx < NUMTHREADS; jdx++)
{
fc3_pool[jdx]->evaluate(idx+1);
}
for(size_t jdx = 0; jdx < NUMTHREADS; jdx++)
{
size_t label = fc3_pool[jdx]->value().posmax();
if(label == labels[idx+jdx])
{
count++;
}
// fc2_pool[jdx]->value().eval().print();
cout << "Sample No." << (idx+jdx+1) << " " << "; Label: " << labels[idx+jdx] << ", Predicted: " << label << " / " << labels[idx+jdx] << " -> " << ((label == labels[idx+jdx]) ? "Right" : "Wrong") << "; Accuracy: " << (count / (idx+jdx+1)) << endl;
}
}
cout << "Accuray: " << (count / TestSize) << endl;
return 0;
}
int testCIFAR10AlexNetExport(const string &lutfile = "../Utils/LUT_HEAM.txt")
{
float S_input = 1.0 / 255.0;
int Z_input = 0;
const size_t NUMTHREADS = 20;
const string WEIGHTFOLDER = "../Weights/CIFAR10/";
NDArray::_loadLUT(lutfile);
unordered_map<string, float> map_S_weights;
unordered_map<string, int> map_Z_weights;
unordered_map<string, vector<Scalar>> map_Q_weights;
unordered_map<string, NDArray> map_weights;
unordered_map<string, float> map_S_biases;
unordered_map<string, int> map_Z_biases;
unordered_map<string, vector<int>> map_Q_biases;
unordered_map<string, float> map_S_activations;
unordered_map<string, int> map_Z_activations;
vector<string> namesLayers = {"Conv1a", "Conv1b", "Conv2a", "Conv2b", "Conv3a", "Conv3b", "Conv4a", "Conv4b", "FC1", "FC2", "FC_Logits"};
vector<size_t> sizesWeights = {3*3*3*96, 3*3*96*96, 3*3*96*256, 3*3*256*256, 3*3*256*384, 3*3*384*384, 3*3*384*256, 3*3*256*256, 4096*4096, 4096*4096, 4096*10};
vector<size_t> sizesBiases = {96, 96, 256, 256, 384, 384, 256, 256, 4096, 4096, 10};
vector<vector<size_t>> shapesWeights = {{3*3*3, 96}, {3*3*96, 96}, {3*3*96, 256}, {3*3*256, 256}, {3*3*256, 384}, {3*3*384, 384}, {3*3*384, 256}, {3*3*256, 256}, {4096, 4096}, {4096, 4096}, {4096, 10}};
for(size_t idx = 0; idx < namesLayers.size(); idx++)
{
float S_weights;
int Z_weights;
vector<Scalar> Q_weights(sizesWeights[idx]);
ifstream fin_weights(WEIGHTFOLDER + namesLayers[idx] + "_weights.txt");
if(!fin_weights)
{
cerr << "ERROR: failed to open the file. " << endl;
exit(1);
}
fin_weights >> S_weights;
fin_weights >> Z_weights;
for(size_t jdx = 0; jdx < sizesWeights[idx]; jdx++)
{
int tmp;
fin_weights >> tmp;
Q_weights[jdx] = tmp;
}
fin_weights.close();
NDArray weights(S_weights, Z_weights, shapesWeights[idx], Q_weights);
map_S_weights[namesLayers[idx]] = S_weights;
map_Z_weights[namesLayers[idx]] = Z_weights;
map_Q_weights[namesLayers[idx]] = Q_weights;
map_weights[namesLayers[idx]] = weights;
}
for(size_t idx = 0; idx < namesLayers.size(); idx++)
{
float S_biases;
int Z_biases;
vector<int> Q_biases(sizesBiases[idx]);
ifstream fin_biases(WEIGHTFOLDER + namesLayers[idx] + "_biases.txt");
if(!fin_biases)
{
cerr << "ERROR: failed to open the file. " << endl;
exit(1);
}
fin_biases >> S_biases;
fin_biases >> Z_biases;
for(size_t jdx = 0; jdx < sizesBiases[idx]; jdx++)
{
int tmp;
fin_biases >> tmp;
Q_biases[jdx] = tmp;
}
fin_biases.close();
map_S_biases[namesLayers[idx]] = S_biases;
map_Z_biases[namesLayers[idx]] = Z_biases;
map_Q_biases[namesLayers[idx]] = Q_biases;
}
for(size_t idx = 0; idx < namesLayers.size(); idx++)
{
float S_activations;
int Z_activations;
ifstream fin_activations(WEIGHTFOLDER + namesLayers[idx] + "_activations.txt");
if(!fin_activations)
{
cerr << "ERROR: failed to open the file. " << endl;
exit(1);
}
fin_activations >> S_activations;
fin_activations >> Z_activations;
fin_activations.close();
map_S_activations[namesLayers[idx]] = S_activations;
map_Z_activations[namesLayers[idx]] = Z_activations;
}
vector<Ref<NDArray>> image_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
image_pool.push_back(new NDArray(S_input, Z_input, {28, 28, 3}));
}
vector<Ref<Var>> input_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
input_pool.push_back(new Var(image_pool[idx]));
}
assert((namesLayers.size() == sizesWeights.size()) && (namesLayers.size() == sizesBiases.size()) && (namesLayers.size() == shapesWeights.size()));
cout << "Building Conv1a: S = " << map_S_weights["Conv1a"] << "; Z = " << map_Z_weights["Conv1a"] << "; S_act = " << map_S_activations["Conv1a"] << "; Z_act = " << map_Z_activations["Conv1a"] << endl;
vector<Ref<Var>> weightsConv1a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv1a_pool.push_back(new Var(map_weights["Conv1a"]));
}
vector<Ref<vector<int>>> biasesConv1a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv1a_pool.push_back(new vector<int>(map_Q_biases["Conv1a"]));
}
vector<Ref<Var>> conv1a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv1a_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv1a"], map_Z_activations["Conv1a"], input_pool[idx], weightsConv1a_pool[idx], biasesConv1a_pool[idx], {3, 3}, {1, 1}, true));
}
cout << "Building Conv1b: S = " << map_S_weights["Conv1b"] << "; Z = " << map_Z_weights["Conv1b"] << "; S_act = " << map_S_activations["Conv1b"] << "; Z_act = " << map_Z_activations["Conv1b"] << endl;
vector<Ref<Var>> weightsConv1b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv1b_pool.push_back(new Var(map_weights["Conv1b"]));
}
vector<Ref<vector<int>>> biasesConv1b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv1b_pool.push_back(new vector<int>(map_Q_biases["Conv1b"]));
}
vector<Ref<Var>> conv1b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv1b_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv1b"], map_Z_activations["Conv1b"], conv1a_pool[idx], weightsConv1b_pool[idx], biasesConv1b_pool[idx], {3, 3}, {1, 1}, true));
}
cout << "Building Conv2a: S = " << map_S_weights["Conv2a"] << "; Z = " << map_Z_weights["Conv2a"] << "; S_act = " << map_S_activations["Conv2a"] << "; Z_act = " << map_Z_activations["Conv2a"] << endl;
vector<Ref<Var>> weightsConv2a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv2a_pool.push_back(new Var(map_weights["Conv2a"]));
}
vector<Ref<vector<int>>> biasesConv2a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv2a_pool.push_back(new vector<int>(map_Q_biases["Conv2a"]));
}
vector<Ref<Var>> conv2a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv2a_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv2a"], map_Z_activations["Conv2a"], conv1b_pool[idx], weightsConv2a_pool[idx], biasesConv2a_pool[idx], {3, 3}, {1, 1}, true));
}
cout << "Building Conv2b: S = " << map_S_weights["Conv2b"] << "; Z = " << map_Z_weights["Conv2b"] << "; S_act = " << map_S_activations["Conv2b"] << "; Z_act = " << map_Z_activations["Conv2b"] << endl;
vector<Ref<Var>> weightsConv2b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv2b_pool.push_back(new Var(map_weights["Conv2b"]));
}
vector<Ref<vector<int>>> biasesConv2b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv2b_pool.push_back(new vector<int>(map_Q_biases["Conv2b"]));
}
vector<Ref<Var>> preconv2b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
preconv2b_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv2b"], map_Z_activations["Conv2b"], conv2a_pool[idx], weightsConv2b_pool[idx], biasesConv2b_pool[idx], {3, 3}, {1, 1}, true));
}
vector<Ref<Var>> conv2b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv2b_pool.push_back(MaxPool<NDArray>(preconv2b_pool[idx], {2, 2}, {2, 2}, true));
}
cout << "Building Conv3a: S = " << map_S_weights["Conv3a"] << "; Z = " << map_Z_weights["Conv3a"] << "; S_act = " << map_S_activations["Conv3a"] << "; Z_act = " << map_Z_activations["Conv3a"] << endl;
vector<Ref<Var>> weightsConv3a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv3a_pool.push_back(new Var(map_weights["Conv3a"]));
}
vector<Ref<vector<int>>> biasesConv3a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv3a_pool.push_back(new vector<int>(map_Q_biases["Conv3a"]));
}
vector<Ref<Var>> conv3a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv3a_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv3a"], map_Z_activations["Conv3a"], conv2b_pool[idx], weightsConv3a_pool[idx], biasesConv3a_pool[idx], {3, 3}, {1, 1}, true));
}
cout << "Building Conv3b: S = " << map_S_weights["Conv3b"] << "; Z = " << map_Z_weights["Conv3b"] << "; S_act = " << map_S_activations["Conv3b"] << "; Z_act = " << map_Z_activations["Conv3b"] << endl;
vector<Ref<Var>> weightsConv3b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv3b_pool.push_back(new Var(map_weights["Conv3b"]));
}
vector<Ref<vector<int>>> biasesConv3b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv3b_pool.push_back(new vector<int>(map_Q_biases["Conv3b"]));
}
vector<Ref<Var>> preconv3b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
preconv3b_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv3b"], map_Z_activations["Conv3b"], conv3a_pool[idx], weightsConv3b_pool[idx], biasesConv3b_pool[idx], {3, 3}, {1, 1}, true));
}
vector<Ref<Var>> conv3b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv3b_pool.push_back(MaxPool<NDArray>(preconv3b_pool[idx], {2, 2}, {2, 2}, true));
}
cout << "Building Conv4a: S = " << map_S_weights["Conv4a"] << "; Z = " << map_Z_weights["Conv4a"] << "; S_act = " << map_S_activations["Conv4a"] << "; Z_act = " << map_Z_activations["Conv4a"] << endl;
vector<Ref<Var>> weightsConv4a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv4a_pool.push_back(new Var(map_weights["Conv4a"]));
}
vector<Ref<vector<int>>> biasesConv4a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv4a_pool.push_back(new vector<int>(map_Q_biases["Conv4a"]));
}
vector<Ref<Var>> conv4a_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv4a_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv4a"], map_Z_activations["Conv4a"], conv3b_pool[idx], weightsConv4a_pool[idx], biasesConv4a_pool[idx], {3, 3}, {1, 1}, true));
}
cout << "Building Conv4b: S = " << map_S_weights["Conv4b"] << "; Z = " << map_Z_weights["Conv4b"] << "; S_act = " << map_S_activations["Conv4b"] << "; Z_act = " << map_Z_activations["Conv4b"] << endl;
vector<Ref<Var>> weightsConv4b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsConv4b_pool.push_back(new Var(map_weights["Conv4b"]));
}
vector<Ref<vector<int>>> biasesConv4b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesConv4b_pool.push_back(new vector<int>(map_Q_biases["Conv4b"]));
}
vector<Ref<Var>> preconv4b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
preconv4b_pool.push_back(Conv2DReLU<NDArray>(map_S_activations["Conv4b"], map_Z_activations["Conv4b"], conv4a_pool[idx], weightsConv4b_pool[idx], biasesConv4b_pool[idx], {3, 3}, {1, 1}, true));
}
vector<Ref<Var>> conv4b_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
conv4b_pool.push_back(MaxPool<NDArray>(preconv4b_pool[idx], {2, 2}, {2, 2}, true));
}
vector<Ref<Var>> flatten_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
flatten_pool.push_back(Flatten<NDArray>(conv4b_pool[idx]));
}
cout << "Building FC1: S = " << map_S_weights["FC1"] << "; Z = " << map_Z_weights["FC1"] << "; S_act = " << map_S_activations["FC1"] << "; Z_act = " << map_Z_activations["FC1"] << endl;
vector<Ref<Var>> weightsFC1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsFC1_pool.push_back(new Var(map_weights["FC1"]));
}
vector<Ref<vector<int>>> biasesFC1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesFC1_pool.push_back(new vector<int>(map_Q_biases["FC1"]));
}
vector<Ref<Var>> fc1_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
fc1_pool.push_back(MADReLU<NDArray>(map_S_activations["FC1"], map_Z_activations["FC1"], flatten_pool[idx], weightsFC1_pool[idx], biasesFC1_pool[idx]));
}
cout << "Building FC2: S = " << map_S_weights["FC2"] << "; Z = " << map_Z_weights["FC2"] << "; S_act = " << map_S_activations["FC2"] << "; Z_act = " << map_Z_activations["FC2"] << endl;
vector<Ref<Var>> weightsFC2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsFC2_pool.push_back(new Var(map_weights["FC2"]));
}
vector<Ref<vector<int>>> biasesFC2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesFC2_pool.push_back(new vector<int>(map_Q_biases["FC2"]));
}
vector<Ref<Var>> fc2_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
fc2_pool.push_back(MADReLU<NDArray>(map_S_activations["FC2"], map_Z_activations["FC2"], fc1_pool[idx], weightsFC2_pool[idx], biasesFC2_pool[idx]));
}
cout << "Building FC_Logits: S = " << map_S_weights["FC_Logits"] << "; Z = " << map_Z_weights["FC_Logits"] << "; S_act = " << map_S_activations["FC_Logits"] << "; Z_act = " << map_Z_activations["FC_Logits"] << endl;
vector<Ref<Var>> weightsFC3_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
weightsFC3_pool.push_back(new Var(map_weights["FC_Logits"]));
}
vector<Ref<vector<int>>> biasesFC3_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
biasesFC3_pool.push_back(new vector<int>(map_Q_biases["FC_Logits"]));
}
vector<Ref<Var>> fc3_pool;
for(size_t idx = 0; idx < NUMTHREADS; idx++)
{
fc3_pool.push_back(MAD<NDArray>(map_S_activations["FC_Logits"], map_Z_activations["FC_Logits"], fc2_pool[idx], weightsFC3_pool[idx], biasesFC3_pool[idx]));
}
vector<vector<float>> images = getImagesCIFAR10();
vector<unsigned> labels = getLabelsCIFAR10();
float count = 0;
size_t TestSize = 1000;
vector<vector<unsigned>> layer0(TestSize, vector<unsigned>());
vector<vector<unsigned>> layer1(TestSize, vector<unsigned>());
vector<vector<unsigned>> layer2(TestSize, vector<unsigned>());
vector<vector<unsigned>> layer3(TestSize, vector<unsigned>());
vector<vector<unsigned>> layer4(TestSize, vector<unsigned>());
vector<vector<unsigned>> layer5(TestSize, vector<unsigned>());
vector<vector<unsigned>> layer6(TestSize, vector<unsigned>());
vector<vector<unsigned>> layer7(TestSize, vector<unsigned>());
vector<vector<unsigned>> layer8(TestSize, vector<unsigned>());
vector<vector<unsigned>> layer9(TestSize, vector<unsigned>());
vector<vector<unsigned>> layer10(TestSize, vector<unsigned>());
vector<vector<unsigned>> layer11(TestSize, vector<unsigned>());
for(size_t idx = 0; idx < TestSize; idx+=NUMTHREADS)
{
for(size_t jdx = 0; jdx < NUMTHREADS; jdx++)
{
vector<Scalar> Q_image(784*3);
for(size_t kdx = 0; kdx < 784*3; kdx++)
{
Q_image[kdx] = images[idx+jdx][kdx];
}
image_pool[jdx]->set(Q_image);
}
#pragma omp parallel for num_threads(NUMTHREADS)
for(size_t jdx = 0; jdx < NUMTHREADS; jdx++)
{
fc3_pool[jdx]->evaluate(idx+1);
}
for(size_t jdx = 0; jdx < NUMTHREADS; jdx++)
{
size_t index = idx + jdx;
auto temp0 = image_pool[jdx].value().im2col({3, 3}, {1, 1}, true);
for(size_t kdx = 0; kdx < temp0.size(); kdx++)
{
layer0[index].push_back(temp0[kdx]);
}
auto temp1 = conv1a_pool[jdx].value().value().im2col({3, 3}, {1, 1}, true);
for(size_t kdx = 0; kdx < temp1.size(); kdx++)
{
layer1[index].push_back(temp1[kdx]);
}
auto temp2 = conv1b_pool[jdx].value().value().im2col({3, 3}, {1, 1}, true);
for(size_t kdx = 0; kdx < temp2.size(); kdx++)
{
layer2[index].push_back(temp2[kdx]);
}
auto temp3 = conv2a_pool[jdx].value().value().im2col({3, 3}, {1, 1}, true);
for(size_t kdx = 0; kdx < temp3.size(); kdx++)
{
layer3[index].push_back(temp3[kdx]);
}
auto temp4 = conv2b_pool[jdx].value().value().im2col({3, 3}, {1, 1}, true);
for(size_t kdx = 0; kdx < temp4.size(); kdx++)
{
layer4[index].push_back(temp4[kdx]);
}
auto temp5 = conv3a_pool[jdx].value().value().im2col({3, 3}, {1, 1}, true);
for(size_t kdx = 0; kdx < temp5.size(); kdx++)
{
layer5[index].push_back(temp5[kdx]);
}
auto temp6 = conv3b_pool[jdx].value().value().im2col({3, 3}, {1, 1}, true);
for(size_t kdx = 0; kdx < temp6.size(); kdx++)
{
layer6[index].push_back(temp6[kdx]);
}
auto temp7 = conv4a_pool[jdx].value().value().im2col({3, 3}, {1, 1}, true);
for(size_t kdx = 0; kdx < temp7.size(); kdx++)
{
layer7[index].push_back(temp7[kdx]);
}
auto temp8 = conv4b_pool[jdx].value().value().im2col({3, 3}, {1, 1}, true);
for(size_t kdx = 0; kdx < temp8.size(); kdx++)
{
layer8[index].push_back(temp8[kdx]);
}
auto temp9 = fc1_pool[jdx].value().value();
for(size_t kdx = 0; kdx < temp9.size(); kdx++)
{
layer9[index].push_back(temp9[kdx]);
}
auto temp10 = fc2_pool[jdx].value().value();
for(size_t kdx = 0; kdx < temp10.size(); kdx++)
{
layer10[index].push_back(temp10[kdx]);
}
auto temp11 = fc3_pool[jdx].value().value();
for(size_t kdx = 0; kdx < temp11.size(); kdx++)
{
layer11[index].push_back(temp11[kdx]);
}
size_t label = fc3_pool[jdx]->value().posmax();
if(label == labels[idx+jdx])
{
count++;
}
// fc2_pool[jdx]->value().eval().print();
cout << "Sample No." << (idx+jdx+1) << " " << "; Label: " << labels[idx+jdx] << ", Predicted: " << label << " / " << labels[idx+jdx] << " -> " << ((label == labels[idx+jdx]) ? "Right" : "Wrong") << "; Accuracy: " << (count / (idx+jdx+1)) << endl;
}
}
cout << "Accuray: " << (count / TestSize) << endl;
ofstream fout0("images_AlexNet.txt");
ofstream fout1("layer1_AlexNet.txt");
ofstream fout2("layer2_AlexNet.txt");
ofstream fout3("layer3_AlexNet.txt");
ofstream fout4("layer4_AlexNet.txt");
ofstream fout5("layer5_AlexNet.txt");
ofstream fout6("layer6_AlexNet.txt");
ofstream fout7("layer7_AlexNet.txt");
ofstream fout8("layer8_AlexNet.txt");
ofstream fout9("layer9_AlexNet.txt");
ofstream fout10("layer10_AlexNet.txt");
ofstream fout11("layer11_AlexNet.txt");
for(const auto &line: layer0)
{
for(const auto &elem: line)
{
fout0 << elem << " ";
}
fout0 << endl;
}
for(const auto &line: layer1)
{
for(const auto &elem: line)
{
fout1 << elem << " ";
}
fout1 << endl;
}
for(const auto &line: layer2)
{
for(const auto &elem: line)
{
fout2 << elem << " ";
}
fout2 << endl;
}
for(const auto &line: layer3)
{
for(const auto &elem: line)
{
fout3 << elem << " ";
}
fout3 << endl;
}
for(const auto &line: layer4)
{
for(const auto &elem: line)
{
fout4 << elem << " ";
}
fout4 << endl;
}
for(const auto &line: layer5)
{
for(const auto &elem: line)
{
fout5 << elem << " ";
}
fout5 << endl;
}
for(const auto &line: layer5)
{
for(const auto &elem: line)
{
fout5 << elem << " ";
}
fout5 << endl;
}
for(const auto &line: layer6)
{
for(const auto &elem: line)
{
fout6 << elem << " ";
}
fout6 << endl;
}
for(const auto &line: layer7)
{
for(const auto &elem: line)
{
fout7 << elem << " ";
}
fout7 << endl;
}
for(const auto &line: layer8)
{
for(const auto &elem: line)
{
fout8 << elem << " ";
}
fout8 << endl;
}
for(const auto &line: layer9)
{
for(const auto &elem: line)
{
fout9 << elem << " ";
}
fout9 << endl;
}
for(const auto &line: layer10)
{
for(const auto &elem: line)
{
fout10 << elem << " ";
}
fout10 << endl;
}
for(const auto &line: layer11)
{
for(const auto &elem: line)
{
fout11 << elem << " ";
}
fout11 << endl;
}
fout0.close();
fout1.close();
fout2.close();
fout3.close();
fout4.close();
fout5.close();
fout6.close();
fout7.close();
fout8.close();
fout9.close();
fout10.close();
fout11.close();
return 0;
}
#endif
|
pi-v17.c | /*
* Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x)
* between 0 and 1.
*
* parallel version using OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h> /* OpenMP */
#if _DEBUG_
#define _DEBUG_ 1
#else
#define _DEBUG_ 0
#include "extrae_user_events.h"
#define PROGRAM 1000
#define PI_COMPUTATION 1
#define END 0
#endif
int main(int argc, char *argv[]) {
double x, sum=0.0, pi=0.0;
#if _DEBUG_
double start,end;
#endif
int i;
const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n";
if (argc < 2) {
fprintf(stderr, Usage);
exit(1);
}
int num_steps = atoi(argv[1]);
double step = 1.0/(double) num_steps;
#if _DEBUG_
start= omp_get_wtime();
#else
Extrae_event (PROGRAM, PI_COMPUTATION);
#endif
/* do computation -- using just two threads */
// WARNING : incorrect code
#pragma omp parallel private(i,x) reduction(+:sum)
{
#if _DEBUG_
int id = omp_get_thread_num();
#endif
#pragma omp for schedule(dynamic, num_steps/4) nowait
for (i=0; i < num_steps/2; i++) {
x = (i+0.5)*step;
sum += 4.0/(1.0+x*x);
#if _DEBUG_
printf("thread id:%d it:%d\n",id,i);
#endif
}
#pragma omp for schedule(dynamic, num_steps/4) nowait
for (i=num_steps/2; i < num_steps; i++) {
x = (i+0.5)*step;
sum += 4.0/(1.0+x*x);
#if _DEBUG_
printf("thread id:%d it:%d\n",id,i);
#endif
}
#pragma omp barrier
#pragma omp single
pi = step * sum;
}
#if _DEBUG_
end = omp_get_wtime();
printf("Wall clock execution time = %.9f seconds\n", end-start);
#else
Extrae_event (PROGRAM, END);
#endif
/* print results */
printf("Value of pi = %12.10f\n", pi);
return EXIT_SUCCESS;
}
|
GnatNearestNeighbors.h | //
// Copyright (c) 2009, Markus Rickert
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef RL_MATH_GNATNEARESTNEIGHBORS_H
#define RL_MATH_GNATNEARESTNEIGHBORS_H
#include <algorithm>
#include <iterator>
#include <limits>
#include <random>
#include <type_traits>
#include <utility>
#include <vector>
#include <boost/optional.hpp>
namespace rl
{
namespace math
{
/**
* Geometric Near-Neighbor Access Tree (GNAT).
*
* Sergey Brin. Near neighbor search in large metric spaces. In Proceedings of
* the International Conference on Very Large Data Bases, pages 574-584,
* Zurich, Switzerland, September, 1985.
*
* http://www.vldb.org/conf/1995/P574.PDF
*/
template<typename MetricT>
class GnatNearestNeighbors
{
private:
struct Node;
public:
typedef const typename MetricT::Value& const_reference;
typedef ::std::ptrdiff_t difference_type;
typedef typename MetricT::Value& reference;
typedef ::std::size_t size_type;
typedef typename MetricT::Value value_type;
typedef typename MetricT::Distance Distance;
typedef MetricT Metric;
typedef typename MetricT::Value Value;
typedef ::std::pair<Distance, Value> Neighbor;
explicit GnatNearestNeighbors(const Metric& metric) :
checks(),
generator(::std::random_device()()),
metric(metric),
nodeDataMax(50),
nodeDegree(8),
nodeDegreeMax(12),
nodeDegreeMin(4),
root(0, 0, nodeDegree, nodeDataMax, true),
values(0)
{
}
explicit GnatNearestNeighbors(Metric&& metric = Metric()) :
checks(),
generator(::std::random_device()()),
metric(::std::move(metric)),
nodeDataMax(50),
nodeDegree(8),
nodeDegreeMax(12),
nodeDegreeMin(4),
root(0, 0, nodeDegree, nodeDataMax, true),
values(0)
{
}
template<typename InputIterator>
GnatNearestNeighbors(InputIterator first, InputIterator last, const Metric& metric) :
checks(),
generator(::std::random_device()()),
metric(metric),
nodeDataMax(50),
nodeDegree(8),
nodeDegreeMax(12),
nodeDegreeMin(4),
root(first, last, 0, 0, nodeDegree, nodeDataMax, true),
values(::std::distance(first, last))
{
if (this->root.data.size() > this->nodeDataMax && this->root.data.size() > this->root.degree)
{
this->split(this->root);
}
}
template<typename InputIterator>
GnatNearestNeighbors(InputIterator first, InputIterator last, Metric&& metric = Metric()) :
checks(),
generator(::std::random_device()()),
metric(::std::move(metric)),
nodeDataMax(50),
nodeDegree(8),
nodeDegreeMax(12),
nodeDegreeMin(4),
root(first, last, nullptr, 0, 0, nodeDegree, nodeDataMax, true),
values(::std::distance(first, last))
{
if (this->root.data.size() > this->nodeDataMax && this->root.data.size() > this->root.degree)
{
this->split(this->root);
}
}
~GnatNearestNeighbors()
{
}
void clear()
{
this->root.children.clear();
this->root.children.reserve(this->nodeDegree);
this->root.data.clear();
this->root.data.reserve(this->nodeDataMax + 1);
this->values = 0;
}
::std::vector<Value> data() const
{
::std::vector<Value> data;
data.reserve(this->values);
this->data(this->root, data);
return data;
}
bool empty() const
{
return this->root.removed && this->root.data.empty() && this->root.children.empty();
}
::boost::optional<::std::size_t> getChecks() const
{
return this->checks;
}
::std::size_t getNodeDataMax() const
{
return this->nodeDataMax;
}
::std::size_t getNodeDegree() const
{
return this->nodeDegree;
}
::std::size_t getNodeDegreeMax() const
{
return this->nodeDegreeMax;
}
::std::size_t getNodeDegreeMin() const
{
return this->nodeDegreeMin;
}
template<typename InputIterator>
void insert(InputIterator first, InputIterator last)
{
if (this->empty())
{
this->root.data.insert(this->root.data.end(), first, last);
if (this->root.data.size() > this->nodeDataMax && this->root.data.size() > this->root.degree)
{
this->split(this->root);
}
this->values += ::std::distance(first, last);
}
else
{
for (InputIterator i = first; i != last; ++i)
{
this->push(*i);
}
}
}
::std::vector<Neighbor> nearest(const Value& query, const ::std::size_t& k, const bool& sorted = true) const
{
return this->search(query, &k, nullptr, sorted);
}
void push(const Value& value)
{
this->push(this->root, value);
++this->values;
}
::std::vector<Neighbor> radius(const Value& query, const Distance& radius, const bool& sorted = true) const
{
return this->search(query, nullptr, &radius, sorted);
}
void seed(const ::std::mt19937::result_type& value)
{
this->generator.seed(value);
}
void setChecks(const ::boost::optional<::std::size_t>& checks)
{
this->checks = checks;
}
void setNodeDataMax(const ::std::size_t& nodeDataMax)
{
this->nodeDataMax = nodeDataMax;
}
void setNodeDegree(const ::std::size_t& nodeDegree)
{
this->nodeDegree = nodeDegree;
}
void setNodeDegreeMax(const ::std::size_t& nodeDegreeMax)
{
this->nodeDegreeMax = nodeDegreeMax;
}
void setNodeDegreeMin(const ::std::size_t& nodeDegreeMin)
{
this->nodeDegreeMin = nodeDegreeMin;
}
::std::size_t size() const
{
return this->values;
}
void swap(GnatNearestNeighbors& other)
{
using ::std::swap;
swap(this->generator, other.generator);
swap(this->metric, other.metric);
swap(this->nodeDegree, other.nodeDegree);
swap(this->nodeDegreeMax, other.nodeDegreeMax);
swap(this->nodeDegreeMin, other.nodeDegreeMin);
swap(this->nodeDataMax, other.nodeDataMax);
swap(this->root, other.root);
swap(this->values, other.values);
}
friend void swap(GnatNearestNeighbors& lhs, GnatNearestNeighbors& rhs)
{
lhs.swap(rhs);
}
protected:
private:
typedef ::std::pair<Distance, const Node*> Branch;
struct BranchCompare
{
bool operator()(const Branch& lhs, const Branch& rhs) const
{
return lhs.first - lhs.second->max[lhs.second->index] > rhs.first - rhs.second->max[rhs.second->index];
}
};
struct NeighborCompare
{
bool operator()(const Neighbor& lhs, const Neighbor& rhs) const
{
return lhs.first < rhs.first;
}
};
struct Node
{
Node(const ::std::size_t& index, const ::std::size_t& siblings, const ::std::size_t& degree, const ::std::size_t& capacity, const bool& removed = false) :
children(),
data(),
degree(degree),
index(index),
max(siblings + 1, -::std::numeric_limits<Distance>::infinity()),
min(siblings + 1, ::std::numeric_limits<Distance>::infinity()),
pivot(),
removed(removed)
{
this->children.reserve(degree);
this->data.reserve(capacity + 1);
}
template<typename InputIterator>
Node(InputIterator first, InputIterator last, const ::std::size_t& index, const ::std::size_t& siblings, const ::std::size_t& degree, const ::std::size_t& capacity, const bool& removed = false) :
children(),
data(first, last),
degree(degree),
index(index),
max(siblings + 1, -::std::numeric_limits<Distance>::infinity()),
min(siblings + 1, ::std::numeric_limits<Distance>::infinity()),
pivot(),
removed(removed)
{
this->children.reserve(degree);
this->data.reserve(capacity + 1);
}
~Node()
{
}
void swap(Node& other)
{
using ::std::swap;
swap(this->children, other.children);
swap(this->data, other.data);
swap(this->degree, other.degree);
swap(this->index, other.index);
swap(this->max, other.max);
swap(this->min, other.min);
swap(this->pivot, other.pivot);
swap(this->removed, other.removed);
}
friend void swap(Node& lhs, Node& rhs)
{
lhs.swap(rhs);
}
::std::vector<Node> children;
::std::vector<Value> data;
::std::size_t degree;
::std::size_t index;
::std::vector<Distance> max;
::std::vector<Distance> min;
Value pivot;
bool removed;
};
void choose(const Node& node, ::std::vector<::std::size_t>& centers, ::std::vector<::std::vector<Distance>>& distances)
{
::std::size_t k = node.degree;
::std::vector<Distance> min(node.data.size(), ::std::numeric_limits<Distance>::infinity());
::std::uniform_int_distribution<::std::size_t> distribution(0, node.data.size() - 1);
centers[0] = distribution(this->generator);
for (::std::size_t i = 0; i < k - 1; ++i)
{
Distance max = Distance();
for (::std::size_t j = 0; j < node.data.size(); ++j)
{
distances[i][j] = j != centers[i] ? this->metric(node.data[j], node.data[centers[i]]) : 0;
min[j] = ::std::min(min[j], distances[i][j]);
if (min[j] > max)
{
max = min[j];
centers[i + 1] = j;
}
}
}
for (::std::size_t j = 0; j < node.data.size(); ++j)
{
distances[k - 1][j] = this->metric(node.data[j], node.data[centers[k - 1]]);
}
}
void data(const Node& node, ::std::vector<Value>& data) const
{
data.insert(data.end(), node.data.begin(), node.data.end());
for (::std::size_t i = 0; i < node.children.size(); ++i)
{
data.push_back(node.children[i].pivot);
this->data(node.children[i], data);
}
}
void push(Node& node, const Value& value)
{
if (node.children.empty())
{
node.data.push_back(value);
if (node.data.size() > this->nodeDataMax && node.data.size() > node.degree)
{
this->split(node);
}
}
else
{
::std::vector<Distance> distances(node.children.size());
::std::size_t index = 0;
Distance min = ::std::numeric_limits<Distance>::infinity();
for (::std::size_t i = 0; i < node.children.size(); ++i)
{
distances[i] = this->metric(value, node.children[i].pivot);
if (distances[i] < min)
{
index = i;
min = distances[i];
}
}
for (::std::size_t i = 0; i < node.children.size(); ++i)
{
node.children[i].max[index] = ::std::max(node.children[i].max[index], distances[i]);
node.children[i].min[index] = ::std::min(node.children[i].min[index], distances[i]);
}
this->push(node.children[index], value);
}
}
::std::vector<Neighbor> search(const Value& query, const ::std::size_t* k, const Distance* radius, const bool& sorted) const
{
::std::vector<Neighbor> neighbors;
if (this->empty())
{
return neighbors;
}
if (nullptr != k)
{
neighbors.reserve(::std::min(*k, this->size()));
}
::std::size_t checks = 0;
::std::vector<Branch> branches;
this->search(this->root, query, k, radius, branches, neighbors, checks);
while (!branches.empty() && (!this->checks || checks < this->checks))
{
Branch branch = ::std::move(branches.front());
::std::pop_heap(branches.begin(), branches.end(), BranchCompare());
branches.pop_back();
if (nullptr == k || *k == neighbors.size())
{
Distance distance = nullptr != radius ? *radius : neighbors.front().first;
if (branch.first - distance > branch.second->max[branch.second->index] ||
branch.first + distance < branch.second->min[branch.second->index])
{
continue;
}
}
this->search(*branch.second, query, k, radius, branches, neighbors, checks);
}
if (sorted)
{
::std::sort_heap(neighbors.begin(), neighbors.end(), NeighborCompare());
}
return neighbors;
}
void search(const Node& node, const Value& query, const ::std::size_t* k, const Distance* radius, ::std::vector<Branch>& branches, ::std::vector<Neighbor>& neighbors, ::std::size_t& checks) const
{
if (node.children.empty())
{
for (::std::size_t i = 0; i < node.data.size(); ++i)
{
Distance distance = this->metric(query, node.data[i]);
if (nullptr == k || neighbors.size() < *k || distance < neighbors.front().first)
{
if (nullptr == radius || distance < *radius)
{
if (nullptr != k && *k == neighbors.size())
{
::std::pop_heap(neighbors.begin(), neighbors.end(), NeighborCompare());
neighbors.pop_back();
}
neighbors.emplace_back(::std::piecewise_construct, ::std::forward_as_tuple(distance), ::std::forward_as_tuple(node.data[i]));
::std::push_heap(neighbors.begin(), neighbors.end(), NeighborCompare());
}
}
if (this->checks && ++checks > this->checks)
{
return;
}
}
}
else
{
::std::vector<Distance> distances(node.children.size());
::std::vector<bool> removed(node.children.size(), false);
for (::std::size_t i = 0; i < node.children.size(); ++i)
{
if (!removed[i])
{
distances[i] = this->metric(query, node.children[i].pivot);
if (!node.children[i].removed)
{
if (nullptr == k || neighbors.size() < *k || distances[i] < neighbors.front().first)
{
if (nullptr == radius || distances[i] < *radius)
{
if (nullptr != k && *k == neighbors.size())
{
::std::pop_heap(neighbors.begin(), neighbors.end(), NeighborCompare());
neighbors.pop_back();
}
neighbors.emplace_back(::std::piecewise_construct, ::std::forward_as_tuple(distances[i]), ::std::forward_as_tuple(node.children[i].pivot));
::std::push_heap(neighbors.begin(), neighbors.end(), NeighborCompare());
}
}
}
if (nullptr == k || *k == neighbors.size())
{
Distance distance = nullptr != radius ? *radius : neighbors.front().first;
for (::std::size_t j = 0; j < node.children.size(); ++j)
{
if (i != j && !removed[j])
{
if (distances[i] - distance > node.children[i].max[j] ||
distances[i] + distance < node.children[i].min[j])
{
removed[j] = true;
}
}
}
}
if (this->checks && ++checks > this->checks)
{
return;
}
}
}
for (::std::size_t i = 0; i < node.children.size(); ++i)
{
if (!removed[i])
{
Distance distance = nullptr != radius ? *radius : neighbors.front().first;
if (distances[i] - distance <= node.children[i].max[i] &&
distances[i] + distance >= node.children[i].min[i])
{
branches.emplace_back(distances[i], &node.children[i]);
::std::push_heap(branches.begin(), branches.end(), BranchCompare());
}
}
}
}
}
void split(Node& node)
{
::std::vector<::std::vector<Distance>> distances(node.degree, ::std::vector<Distance>(node.data.size()));
::std::vector<::std::size_t> centers(node.degree);
this->choose(node, centers, distances);
for (::std::size_t i = 0; i < centers.size(); ++i)
{
node.children.emplace_back(i, node.degree - 1, this->nodeDegree, this->nodeDataMax);
node.children[i].pivot = ::std::move(node.data[centers[i]]);
}
for (::std::size_t i = 0; i < node.data.size(); ++i)
{
::std::size_t index = 0;
Distance min = ::std::numeric_limits<Distance>::infinity();
for (::std::size_t j = 0; j < centers.size(); ++j)
{
Distance distance = distances[j][i];
if (distance < min)
{
index = j;
min = distance;
}
}
for (::std::size_t j = 0; j < centers.size(); ++j)
{
if (i != centers[j])
{
node.children[j].max[index] = ::std::max(node.children[j].max[index], distances[j][i]);
node.children[j].min[index] = ::std::min(node.children[j].min[index], distances[j][i]);
}
}
if (i != centers[index])
{
node.children[index].data.push_back(::std::move(node.data[i]));
}
}
for (::std::size_t i = 0; i < node.children.size(); ++i)
{
node.children[i].degree = ::std::min(::std::max(this->nodeDegree * node.children[i].data.size() / node.data.size(), this->nodeDegreeMin), this->nodeDegreeMax);
if (node.children[i].data.empty())
{
node.children[i].max[i] = Distance();
node.children[i].min[i] = Distance();
}
}
#ifdef _OPENMP
::std::size_t size = node.data.size();
#endif
node.data.clear();
node.data.shrink_to_fit();
#ifdef _OPENMP
#pragma omp parallel for if (size > 2 * this->nodeDataMax)
#if _OPENMP < 200805
for (::std::ptrdiff_t i = 0; i < node.children.size(); ++i)
#else
for (::std::size_t i = 0; i < node.children.size(); ++i)
#endif
#else
for (::std::size_t i = 0; i < node.children.size(); ++i)
#endif
{
if (node.children[i].data.size() > this->nodeDataMax && node.children[i].data.size() > node.children[i].degree)
{
this->split(node.children[i]);
}
}
}
::boost::optional<::std::size_t> checks;
::std::mt19937 generator;
Metric metric;
::std::size_t nodeDataMax;
::std::size_t nodeDegree;
::std::size_t nodeDegreeMax;
::std::size_t nodeDegreeMin;
Node root;
::std::size_t values;
};
}
}
#endif // RL_MATH_GNATNEARESTNEIGHBORS_H
|
contract_graph.h | /******************************************************************************
* contract_graph.h
*
* Source of VieCut.
*
******************************************************************************
* Copyright (C) 2017-2018 Alexander Noe <alexander.noe@univie.ac.at>
*
* Published under the MIT license in the LICENSE file.
*****************************************************************************/
#pragma once
#include <algorithm>
#include <functional>
#include <memory>
#include <unordered_set>
#include <utility>
#include <vector>
#include "common/configuration.h"
#include "common/definitions.h"
#include "data-structures/definitions.h"
#include "data_structure/graph_access.h"
#include "data_structure/mutable_graph.h"
#include "parallel/data_structure/union_find.h"
#include "tlx/logger.hpp"
#include "tools/hash.h"
#include "tools/timer.h"
class contraction {
public:
static constexpr bool debug = false;
static std::shared_ptr<graph_access> deleteEdge(
std::shared_ptr<graph_access>, EdgeID) {
LOG1 << "DELETE EDGE NOT IMPLEMENTED YET";
exit(2);
}
static std::pair<std::shared_ptr<graph_access>, std::vector<NodeID> >
contractEdge(std::shared_ptr<graph_access>,
std::vector<NodeID>,
EdgeID) {
LOG1 << "CONTRACT EDGE NOT IMPLEMENTED YET";
exit(2);
}
static inline uint64_t get_uint64_from_pair(NodeID cluster_a,
NodeID cluster_b) {
if (cluster_a > cluster_b) {
std::swap(cluster_a, cluster_b);
}
return ((uint64_t)cluster_a << 32) | cluster_b;
}
static inline std::pair<NodeID, NodeID> get_pair_from_uint64(
uint64_t data) {
NodeID first = data >> 32;
NodeID second = data;
return std::make_pair(first, second);
}
static void findTrivialCuts(std::shared_ptr<graph_access> G,
std::vector<NodeID>* m,
std::vector<std::vector<NodeID> >* rm,
NodeWeight target_mindeg) {
// create non-const references for better syntax
std::vector<NodeID>& mapping = *m;
std::vector<std::vector<NodeID> >& rev_mapping = *rm;
LOG << "target min degree: " << target_mindeg;
#pragma omp parallel for schedule(dynamic, 1024)
for (NodeID p = 0; p < rev_mapping.size(); ++p) {
NodeID bestNode;
NodeWeight improve = 0;
NodeWeight node_degree = 0;
NodeWeight block_degree = 0;
if (rev_mapping[p].size() < std::log2(G->number_of_nodes())) {
NodeID improve_idx;
for (NodeID node = 0; node < rev_mapping[p].size(); ++node) {
for (EdgeID e : G->edges_of(rev_mapping[p][node])) {
NodeID contracted_target = mapping[G->getEdgeTarget(e)];
if (contracted_target == p) {
node_degree += G->getEdgeWeight(e);
continue;
}
node_degree -= G->getEdgeWeight(e);
block_degree += G->getEdgeWeight(e);
}
if (improve > node_degree) {
improve = node_degree;
bestNode = rev_mapping[p][node];
improve_idx = node;
}
node_degree = 0;
}
if (improve > 0 &&
block_degree + improve < target_mindeg &&
rev_mapping[p].size() > 1) {
target_mindeg = block_degree + improve;
rev_mapping[p].erase(rev_mapping[p].begin() +
improve_idx);
VIECUT_ASSERT_LT(bestNode, G->number_of_nodes());
rev_mapping.push_back({ bestNode });
mapping[bestNode] = rev_mapping.size() - 1;
}
}
}
LOG << "target min degree now: " << target_mindeg;
}
// contraction global_mincut for small number of nodes in constructed graph,
// we assume a full mesh and remove nonexistent edges afterwards.
static std::shared_ptr<graph_access> contractGraphFullMesh(
std::shared_ptr<graph_access> G,
const std::vector<NodeID>& mapping,
size_t num_nodes) {
auto contracted = std::make_shared<graph_access>();
std::vector<EdgeWeight> intermediate(num_nodes * (num_nodes - 1), 0);
#pragma omp parallel
{
std::vector<EdgeWeight> p_intermediate(
num_nodes * (num_nodes - 1), 0);
#pragma omp for schedule(dynamic, 1024)
for (NodeID n = 0; n < G->number_of_nodes(); ++n) {
NodeID src = mapping[n];
for (EdgeID e : G->edges_of(n)) {
NodeID tgt = mapping[G->getEdgeTarget(e)];
if (tgt != src) {
EdgeID edge_id =
src * (num_nodes - 1) + tgt - (tgt > src);
p_intermediate[edge_id] += G->getEdgeWeight(e);
}
}
}
#pragma omp critical
{
for (size_t i = 0; i < intermediate.size(); ++i) {
intermediate[i] += p_intermediate[i];
}
}
}
EdgeID existing_edges = intermediate.size();
for (auto e : intermediate) {
if (e == 0)
--existing_edges;
}
contracted->start_construction(num_nodes, existing_edges);
for (size_t i = 0; i < num_nodes; ++i) {
contracted->new_node();
for (size_t j = 0; j < num_nodes; ++j) {
if (i == j)
continue;
EdgeID edge_id = i * (num_nodes - 1) + j - (j > i);
if (intermediate[edge_id] > 0) {
EdgeID edge = contracted->new_edge(i, j);
contracted->setEdgeWeight(edge, intermediate[edge_id]);
}
}
}
contracted->finish_construction();
return contracted;
}
static std::shared_ptr<mutable_graph> fromUnionFind(
std::shared_ptr<mutable_graph> G,
union_find* uf) {
std::vector<std::vector<NodeID> > rev_mapping(uf->n());
std::vector<NodeID> part(G->number_of_nodes(), UNDEFINED_NODE);
NodeID current_pid = 0;
for (NodeID n : G->nodes()) {
NodeID part_id = uf->Find(n);
if (part[part_id] == UNDEFINED_NODE) {
part[part_id] = current_pid++;
}
rev_mapping[part[part_id]].push_back(G->containedVertices(n)[0]);
}
for (size_t i = 0; i < rev_mapping.size(); ++i) {
if (rev_mapping[i].size() > 1) {
std::unordered_set<NodeID> vtx_to_ctr;
for (auto v : rev_mapping[i]) {
vtx_to_ctr.emplace(G->getCurrentPosition(v));
}
G->contractVertexSet(vtx_to_ctr);
}
}
return G;
}
static std::shared_ptr<graph_access> fromUnionFind(
std::shared_ptr<graph_access> G,
union_find* uf) {
std::vector<std::vector<NodeID> > rev_mapping;
std::vector<NodeID> mapping(G->number_of_nodes());
std::vector<NodeID> part(G->number_of_nodes(), UNDEFINED_NODE);
NodeID current_pid = 0;
for (NodeID n : G->nodes()) {
NodeID part_id = uf->Find(n);
if (part[part_id] == UNDEFINED_NODE) {
part[part_id] = current_pid++;
rev_mapping.emplace_back();
}
mapping[n] = part[part_id];
if (configuration::getConfig()->save_cut) {
G->setPartitionIndex(n, part[part_id]);
}
rev_mapping[part[part_id]].push_back(n);
}
return contractGraph(G, mapping, rev_mapping.size());
}
static std::shared_ptr<graph_access>
contractGraph(std::shared_ptr<graph_access> G,
const std::vector<NodeID>& mapping,
size_t num_nodes,
const std::vector<std::vector<NodeID> >& = { }) {
if (num_nodes > std::sqrt(G->number_of_nodes())) {
LOG << "SPARSE CONTRACT!";
return contractGraphSparse(G, mapping, num_nodes);
} else {
LOG << "FULL MESH CONTRACT";
return contractGraphFullMesh(G, mapping, num_nodes);
}
}
// altered version of KaHiPs matching contraction
static std::shared_ptr<graph_access>
contractGraphSparse(std::shared_ptr<graph_access> G,
const std::vector<NodeID>& mapping,
size_t num_nodes) {
// contested edge (both incident vertices have at least V/5 vertices)
// compute value for this edge on every processor to allow parallelism
timer t;
EdgeID contested_edge = 0;
NodeID block0 = 0;
NodeID block1 = 0;
if (G->number_of_edges() * 0.02
< G->number_of_nodes() * G->number_of_nodes() &&
G->number_of_nodes() > 100) {
std::vector<uint32_t> el(num_nodes);
for (size_t i = 0; i < mapping.size(); ++i) {
++el[mapping[i]];
}
std::vector<uint32_t> orig_el = el;
std::nth_element(el.begin(), el.begin() + 1, el.end(),
std::greater<uint32_t>());
if (el[1] > G->number_of_nodes() / 5) {
block0 = std::distance(orig_el.begin(),
std::find(orig_el.begin(),
orig_el.end(), el[0]));
block1 = std::distance(orig_el.begin(),
std::find(orig_el.begin(),
orig_el.end(), el[1]));
contested_edge = get_uint64_from_pair(block1, block0);
}
}
EdgeWeight sumweight_contested = 0;
auto coarser = std::make_shared<graph_access>();
std::vector<std::vector<std::pair<PartitionID, EdgeWeight> > >
building_tool(num_nodes);
std::vector<size_t> degrees(num_nodes);
growt::uaGrow<xxhash<uint64_t> > new_edges(1024 * 1024);
t.restart();
std::vector<size_t> cur_degrees(num_nodes);
#pragma omp parallel
{
EdgeWeight contested_weight = 0;
std::vector<uint64_t> my_keys;
auto handle = new_edges.get_handle();
#pragma omp for schedule(guided)
for (NodeID n = 0; n < G->number_of_nodes(); ++n) {
NodeID p = mapping[n];
for (EdgeID e : G->edges_of(n)) {
NodeID contracted_target = mapping[G->getEdgeTarget(e)];
if (contracted_target <= p) {
// self-loops are not in graph
// smaller do not need to be stored
// as their other side will be
continue;
}
EdgeWeight edge_weight = G->getEdgeWeight(e);
uint64_t key = get_uint64_from_pair(p, contracted_target);
if (key != contested_edge) {
if (handle.insert_or_update(key, edge_weight,
[](size_t& lhs,
const size_t& rhs) {
lhs += rhs;
}, edge_weight).second) {
#pragma omp atomic
++degrees[p];
#pragma omp atomic
++degrees[contracted_target];
my_keys.push_back(key);
}
} else {
contested_weight += edge_weight;
}
}
}
if (contested_edge > 0) {
#pragma omp critical
{
sumweight_contested += contested_weight;
}
#pragma omp barrier
#pragma omp single
{
if (sumweight_contested > 0) {
handle.insert_or_update(contested_edge,
sumweight_contested,
[](size_t& lhs,
const size_t& rhs) {
lhs += rhs;
}, sumweight_contested);
my_keys.push_back(contested_edge);
++degrees[block0];
++degrees[block1];
}
}
}
#pragma omp single
{
size_t num_edges = 0;
coarser->start_construction(num_nodes, 0);
for (size_t i = 0; i < degrees.size(); ++i) {
cur_degrees[i] = num_edges;
num_edges += degrees[i];
coarser->new_node_hacky(num_edges);
}
coarser->resize_m(num_edges);
}
for (auto edge_uint : my_keys) {
auto edge = get_pair_from_uint64(edge_uint);
auto edge_weight = (*handle.find(edge_uint)).second;
size_t firstdeg, seconddeg;
while (true) {
firstdeg = cur_degrees[edge.first];
size_t plusone = cur_degrees[edge.first] + 1;
if (__sync_bool_compare_and_swap(&cur_degrees[edge.first],
firstdeg, plusone))
break;
}
while (true) {
seconddeg = cur_degrees[edge.second];
size_t plusone = cur_degrees[edge.second] + 1;
if (__sync_bool_compare_and_swap(&cur_degrees[edge.second],
seconddeg,
plusone))
break;
}
coarser->new_edge_and_reverse(edge.first, edge.second,
firstdeg, seconddeg, edge_weight);
}
}
coarser->finish_construction();
return coarser;
}
static std::shared_ptr<graph_access>
contractGraphSparseNoHash(std::shared_ptr<graph_access> G,
const std::vector<NodeID>& mapping,
const std::vector<std::vector<NodeID> >&
rev_mapping, size_t num_nodes) {
std::vector<std::vector<NodeID> > rev_map;
if (rev_mapping.size() == 0) {
// create reverse mapping if it wasnt before
rev_map.resize(num_nodes);
for (size_t i = 0; i < mapping.size(); ++i) {
rev_map[mapping[i]].push_back(i);
}
} else {
rev_map = rev_mapping;
}
auto contracted = std::make_shared<graph_access>();
std::vector<std::vector<std::pair<NodeID, EdgeWeight> > > edges;
edges.resize(rev_map.size());
#pragma omp parallel
{
#pragma omp single nowait
{
double average_degree =
static_cast<double>(G->number_of_edges()) /
static_cast<double>(G->number_of_nodes());
EdgeID expected_edges = num_nodes * average_degree;
// one worker can do this vector allocation while the others
// build the contracted graph
contracted->start_construction(num_nodes,
std::min(G->number_of_edges(),
2 * expected_edges));
}
// first: coarse vertex which set this (to avoid total invalidation)
// second: edge id in contracted graph
std::vector<std::pair<NodeID, EdgeWeight> > edge_positions(
num_nodes,
std::make_pair(UNDEFINED_NODE, UNDEFINED_EDGE));
std::vector<NodeID> non_null;
#pragma omp for schedule(dynamic)
for (NodeID p = 0; p < num_nodes; ++p) {
for (NodeID node = 0; node < rev_map[p].size(); ++node) {
for (EdgeID e : G->edges_of(rev_map[p][node])) {
NodeID contracted_target = mapping[G->getEdgeTarget(e)];
if (contracted_target == p)
continue;
NodeID last_use =
edge_positions[contracted_target].first;
if (last_use == p) {
edge_positions[contracted_target].second +=
G->getEdgeWeight(e);
} else {
edge_positions[contracted_target].first = p;
edge_positions[contracted_target].second =
G->getEdgeWeight(e);
non_null.push_back(contracted_target);
}
}
}
for (const auto& tgt : non_null) {
edges[p].emplace_back(tgt, edge_positions[tgt].second);
}
non_null.clear();
}
}
for (const auto& vec : edges) {
NodeID n = contracted->new_node();
for (const auto& e : vec) {
EdgeID e_new = contracted->new_edge(n, e.first);
contracted->setEdgeWeight(e_new, e.second);
}
}
contracted->finish_construction();
return contracted;
}
};
|
vms_fmt_plug.c | /*
* This file is part of John the Ripper password cracker.
*
* It comes from OpenVMS support 2.4(jtr_vms_2-4.zip) patch
* posted by David Jones.
*
* Converted to OpenVMS format module by David Jones
*
* Copyright (c) 2011 by David L. Jones <jonesd/at/columbus.rr.com>,
* Copyright (c) 2012 by Dhiru Kholia <dhiru/at/openwall.com> and
* is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modifications, are permitted. */
#if !AC_BUILT
#if __GNUC__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define ARCH_LITTLE_ENDIAN 1
#endif
#endif
#if FMT_EXTERNS_H
#if ARCH_LITTLE_ENDIAN
extern struct fmt_main fmt_VMS;
#endif
#elif FMT_REGISTERS_H
#if ARCH_LITTLE_ENDIAN
john_register_one(&fmt_VMS);
#endif
#else
#include <stdio.h>
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "vms_std.h"
#include "common.h"
#include "formats.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1024 // Tuned on K8-Dual HT
#endif
#endif
#include "memdbg.h"
#ifndef UAI$M_PWDMIX
#define UAI$M_PWDMIX 0x2000000
#endif
#define FORMAT_LABEL "OpenVMS"
#define FORMAT_NAME "Purdy"
#define FORMAT_NAME_NOPWDMIX "Purdy (nopwdmix)"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 32
#define CIPHERTEXT_LENGTH UAF_ENCODE_SIZE
#define BINARY_SIZE 8
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct uaf_hash_info)
#define SALT_ALIGN 4
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
/*
* The following two test vectors: "USER" and "service" are case-insensitive
*/
{"$V$9AYXUd5LfDy-aj48Vj54P-----", "USER"},
{"$V$p1UQjRZKulr-Z25g5lJ-------", "service"},
/*
* The following one test vector: "President#44" is case-sensitive
*/
{"$V$S44zI913bBx-UJrcFSC------D", "President#44"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uaf_qword (*crypt_out)[BINARY_SIZE / sizeof(uaf_qword)];
static int initialized;
/*
* See if signature of ciphertext (from passwd file) matches the hack
* produced by the uaf_encode routine (starts with $V$)
*/
static int valid(char *ciphertext, struct fmt_main *self )
{
struct uaf_hash_info pwd;
if (!initialized) {
uaf_init();
initialized = 1;
}
if (strncmp(ciphertext, "$V$", 3))
return 0; /* no match */
if ( strlen ( ciphertext ) < (UAF_ENCODE_SIZE-1) )
return 0;
if (!uaf_hash_decode(ciphertext, &pwd))
return 0;
#ifdef VMS_DEBUG
fprintf(stderr, "/VMS_STD/ get_salt decoded '%s' to %x/%x-%x-%x-%x-%x"
" %ld\n", ciphertext, pwd.salt, pwd.alg, pwd.username.r40[0],
pwd.username.r40[1], pwd.username.r40[2], pwd.username.r40[3],
pwd.flags);
#endif
if (pwd.alg < 1 || pwd.alg > 3)
return 0;
return 1;
}
static void fmt_vms_init ( struct fmt_main *self )
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
/* Init bin 2 hex table for faster conversions later */
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
if (!initialized) {
uaf_init();
initialized = 1;
}
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
/*
* Save a password (key) for testing. VMS_std_set_key returns position value
* we can use if needed to recall the key by a fmt->get_key request. On get_key
* return a private copy.
*/
static void set_key(char *key, int index)
{
strcpy(saved_key[index], key);
}
static char *get_key(int index)
{
return saved_key[index];
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
/*
* Save salt for producing ciphertext from it and saved keys at next crypt call.
*/
static struct uaf_hash_info *cur_salt;
void VMS_std_set_salt ( void *salt )
{
cur_salt = (struct uaf_hash_info*)salt;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
/*
* Hash the password and salt saved with VMS_std_set_key and VMS_std_set_salt,
* saving the result in global storage for retrieval by vms_fmt.c module.
*/
int VMS_std_crypt(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
uaf_test_password (cur_salt, saved_key[index], 0, crypt_out[index]);
}
return count;
}
/*
* Extract salt from ciphertext string to static storage and return
* pointer to it. Salt is effectively 70-80 bits (username, salt,
* algorithm, pwdmix flag).
*/
char *VMS_std_get_salt(char *ciphertext)
{
static struct uaf_hash_info pwd;
memset(&pwd, 0, sizeof(pwd));
uaf_hash_decode ( ciphertext, &pwd );
#ifdef VMS_DEBUG
printf("/VMS_STD/ get_salt decoded '%s' to %x/%x-%x-%x-%x-%x %ld\n",
ciphertext, pwd.salt, pwd.alg, pwd.username.r40[0], pwd.username.r40[1],
pwd.username.r40[2], pwd.username.r40[3], pwd.flags );
#endif
return (char *) &pwd;
}
/*
* Extract binary hash from ciphertext into static storage and return
* pointer to it.
*/
VMS_word *VMS_std_get_binary(char *ciphertext)
{
static union {
struct uaf_hash_info pwd;
VMS_word b[16];
} out;
uaf_hash_decode ( ciphertext, &out.pwd );
return out.b;
}
/*
* Class record.
*/
struct fmt_main fmt_VMS = {
{
FORMAT_LABEL, /* .label */
FORMAT_NAME, /* .format_name */
VMS_ALGORITHM_NAME, /* .algorithm_name */
BENCHMARK_COMMENT, /* .benchmark_comment */
BENCHMARK_LENGTH, /* .benchmark_length (pwd break len) */
0,
PLAINTEXT_LENGTH, /* .plaintext_length (max) */
BINARY_SIZE, /* .binary_size (quadword) */
BINARY_ALIGN,
SALT_SIZE, /* .salt_size (word) */
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
/*
* This format supports both case-sensitive and case-insensitive passwords,
* so this format should set FMT_CASE
*/
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
tests
}, {
fmt_vms_init, /* changed for jumbo */
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
(void *(*)(char *))VMS_std_get_binary,
(void *(*)(char *))VMS_std_get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
(void (*)(void *))VMS_std_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
VMS_std_crypt,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
Kernel_3d_DGZ.h | #ifndef KRIPKE_KERNEL_3D_DGZ_H__
#define KRIPKE_KERNEL_3D_DGZ_H__
#include<Kripke/Kernel.h>
#include<Grid.h>
class Kernel_3d_DGZ : public Kernel {
public:
typedef std::vector<std::vector<double>> result_type;
// Grid is needed to access metadata (e.g. gd_sets) stored on it.
Grid_Data* grid_data;
int group_set;
int direction_set;
Kernel_3d_DGZ(Grid_Data*);
virtual ~Kernel_3d_DGZ();
virtual Nesting_Order nestingPsi(void) const;
virtual Nesting_Order nestingPhi(void) const;
virtual void LTimes(Grid_Data *grid_data);
virtual void LPlusTimes(Grid_Data *grid_data);
template<typename GridView, typename IPlane, typename JPlane, typename KPlane>
result_type
operator()(GridView& grid_view, IPlane const& i_plane,
JPlane const& j_plane, KPlane const& k_plane);
void define_type(stapl::typer& t)
{
t.member(grid_data);
t.member(group_set);
t.member(direction_set);
}
};
/* Sweep routine for Diamond-Difference */
/* Macros for offsets with fluxes on cell faces */
#define I_PLANE_INDEX(j, k) (k)*(local_jmax) + (j)
#define J_PLANE_INDEX(i, k) (k)*(local_imax) + (i)
#define K_PLANE_INDEX(i, j) (j)*(local_imax) + (i)
#define Zonal_INDEX(i, j, k) (i) + (local_imax)*(j) \
+ (local_imax)*(local_jmax)*(k)
template<typename GridView, typename IPlane, typename JPlane, typename KPlane>
std::vector<std::vector<double>>
Kernel_3d_DGZ::operator()(GridView& grid_view, IPlane const& i_plane_in,
JPlane const& j_plane_in, KPlane const& k_plane_in)
{
typedef std::array<typename GridView::value_type::property_type::
storage_type::index, 2> index_type;
result_type result(3);
std::vector<double> i_plane = i_plane_in[0];
std::vector<double> j_plane = j_plane_in[0];
std::vector<double> k_plane = k_plane_in[0];
// grid_data, group_set, and direction_set are data members of the Kernel
Group_Dir_Set& gd_set = grid_data->gd_sets()[group_set][direction_set];
int num_directions = gd_set.num_directions;
int num_groups = gd_set.num_groups;
Directions *direction = gd_set.directions;
//int num_zones = grid_data->num_zones();
int local_imax = grid_data->nzones()[0];
int local_jmax = grid_data->nzones()[1];
int local_kmax = grid_data->nzones()[2];
// Comment copied blindly
// TGS : compiler detects unused variable. Are the macros correct?
// int local_kmax = grid_data->nzones()[2];
auto dx = grid_data->deltas(0);
auto dy = grid_data->deltas(1);
auto dz = grid_data->deltas(2);
// All directions have same id,jd,kd, since these are all one Direction Set
// So pull that information out now
int octant = direction[0].octant;
Grid_Sweep_Block const &extent = grid_data->octant_extent()[octant];
std::vector<double> xcos_dxi_all(local_imax);
std::vector<double> ycos_dyj_all(local_jmax);
std::vector<double> zcos_dzk_all(local_kmax);
for (int d = 0; d < num_directions; ++d)
{
double xcos = direction[d].xcos;
double ycos = direction[d].ycos;
double zcos = direction[d].zcos;
index_type psi_z_idx{{0, d}};
index_type rhs_z_idx{{0, d}};
for (int i = 0; i < local_imax; ++i)
xcos_dxi_all[i] = 2.0 * xcos / dx[i + 1];
for (int j = 0; j < local_jmax; ++j)
ycos_dyj_all[j] = 2.0 * ycos / dy[j + 1];
for (int k = 0; k < local_kmax; ++k)
zcos_dzk_all[k] = 2.0 * zcos / dz[k + 1];
#ifdef KRIPKE_USE_OPENMP
#pragma omp parallel for
#endif
for (int group = 0; group < num_groups; ++group)
{
index_type sigt_idx{{gd_set.group0 + group, 0}};
psi_z_idx[0] = group;
rhs_z_idx[0] = group;
int plane_idx = num_directions * num_groups + d * num_groups + group;
for (int i = extent.start_i; i != extent.end_i; i += extent.inc_i)
{
double xcos_dxi = 2.0 * xcos / xcos_dxi_all[i];
for (int j = extent.start_j; j != extent.end_j; j += extent.inc_j)
{
double ycos_dyj = 2.0 * ycos / ycos_dyj_all[j];
double & psi_bo_d_g_z = k_plane[K_PLANE_INDEX(i, j) * plane_idx];
for (int k = extent.start_k; k != extent.end_k; k += extent.inc_k)
{
double zcos_dzk = 2.0 * zcos / zcos_dzk_all[k];
// get a reference to the vertex being processed
int z = Zonal_INDEX(i, j, k);
auto v = (*grid_view.find_vertex(z)).property();
double & psi_lf_d_g_z = i_plane[I_PLANE_INDEX(j, k) * plane_idx];
double & psi_fr_d_g_z = j_plane[J_PLANE_INDEX(i, k) * plane_idx];
/* Calculate new zonal flux */
double psi_d_g_z = (v.rhs()[group_set][direction_set](rhs_z_idx)
+ psi_lf_d_g_z * xcos_dxi
+ psi_fr_d_g_z * ycos_dyj
+ psi_bo_d_g_z * zcos_dzk)
/ (xcos_dxi + ycos_dyj + zcos_dzk + v.sigt()(sigt_idx));
v.psi()[group_set][direction_set](psi_z_idx) = psi_d_g_z;
/* Apply diamond-difference relationships */
psi_d_g_z *= 2.0;
psi_lf_d_g_z = psi_d_g_z - psi_lf_d_g_z;
psi_fr_d_g_z = psi_d_g_z - psi_fr_d_g_z;
psi_bo_d_g_z = psi_d_g_z - psi_bo_d_g_z;
}
}
}
} // Group
} // Direction
result[0] = std::move(i_plane);
result[1] = std::move(j_plane);
result[2] = std::move(k_plane);
return result;
}
#endif
|
DRB084-threadprivatemissing-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A file-scope variable used within a function called by a parallel region.
No threadprivate is used to avoid data races.
Data race pairs sum0@61:3 vs. sum0@61:8
sum0@61:3 vs. sum0@61:3
*/
#include <stdio.h>
#include <assert.h>
int sum0=0, sum1=0;
//#pragma omp threadprivate(sum0)
void foo (int i)
{
sum0=sum0+i;
}
int main()
{
int i, sum=0;
#pragma omp parallel for private(i ) reduction(+:sum0)
for (i=1;i<=1000;i++)
{
foo (i);
}
sum=sum+sum0;
/* reference calculation */
#pragma omp parallel for private(i ) reduction(+:sum1)
for (i=1;i<=1000;i++)
{
sum1=sum1+i;
}
printf("sum=%d; sum1=%d\n",sum,sum1);
// assert(sum==sum1);
return 0;
}
|
displacement_lagrangemultiplier_contact_criteria.h | // KRATOS ______ __ __ _____ __ __ __
// / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ /
// / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ /
// / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / /
// \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS
//
// License: BSD License
// license: ContactStructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H)
#define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "utilities/table_stream_utility.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "utilities/color_utilities.h"
#include "utilities/constraint_utilities.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@name Kratos Classes
///@{
/**
* @class DisplacementLagrangeMultiplierContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Convergence criteria for contact problems
* @details This class implements a convergence control based on nodal displacement and
* lagrange multiplier values. The error is evaluated separately for each of them, and
* relative and absolute tolerances for both must be specified.
* @author Vicente Mataix Ferrandiz
*/
template< class TSparseSpace,
class TDenseSpace >
class DisplacementLagrangeMultiplierContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of DisplacementLagrangeMultiplierContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierContactCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT );
KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT );
KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED );
KRATOS_DEFINE_LOCAL_FLAG( ROTATION_DOF_IS_CONSIDERED );
/// The base class definition
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
/// The definition of the current class
typedef DisplacementLagrangeMultiplierContactCriteria< TSparseSpace, TDenseSpace > ClassType;
/// The dofs array type
typedef typename BaseType::DofsArrayType DofsArrayType;
/// The sparse matrix type
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
/// The dense vector type
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
/// The epsilon tolerance definition
static constexpr double Tolerance = std::numeric_limits<double>::epsilon();
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor.
*/
explicit DisplacementLagrangeMultiplierContactCriteria()
: BaseType()
{
}
/**
* @brief Default constructor. (with parameters)
* @param ThisParameters The configuration parameters
*/
explicit DisplacementLagrangeMultiplierContactCriteria(Kratos::Parameters ThisParameters)
: BaseType()
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
}
/**
* @brief Default Constructor.
* @param DispRatioTolerance Relative tolerance for displacement error
* @param DispAbsTolerance Absolute tolerance for displacement error
* @param RotRatioTolerance Relative tolerance for rotation error
* @param RotAbsTolerance Absolute tolerance for rotation error
* @param LMRatioTolerance Relative tolerance for lagrange multiplier error
* @param LMAbsTolerance Absolute tolerance for lagrange multiplier error
* @param EnsureContact To check if the contact is lost
* @param pTable The pointer to the output r_table
* @param PrintingOutput If the output is going to be printed in a txt file
*/
explicit DisplacementLagrangeMultiplierContactCriteria(
const double DispRatioTolerance,
const double DispAbsTolerance,
const double RotRatioTolerance,
const double RotAbsTolerance,
const double LMRatioTolerance,
const double LMAbsTolerance,
const bool EnsureContact = false,
const bool PrintingOutput = false
)
: BaseType()
{
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT, EnsureContact);
mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT, PrintingOutput);
mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::ROTATION_DOF_IS_CONSIDERED, false);
// The displacement solution
mDispRatioTolerance = DispRatioTolerance;
mDispAbsTolerance = DispAbsTolerance;
// The rotation solution
mRotRatioTolerance = RotRatioTolerance;
mRotAbsTolerance = RotAbsTolerance;
// The contact solution
mLMRatioTolerance = LMRatioTolerance;
mLMAbsTolerance = LMAbsTolerance;
}
// Copy constructor.
DisplacementLagrangeMultiplierContactCriteria( DisplacementLagrangeMultiplierContactCriteria const& rOther )
:BaseType(rOther)
,mOptions(rOther.mOptions)
,mDispRatioTolerance(rOther.mDispRatioTolerance)
,mDispAbsTolerance(rOther.mDispAbsTolerance)
,mRotRatioTolerance(rOther.mRotRatioTolerance)
,mRotAbsTolerance(rOther.mRotAbsTolerance)
,mLMRatioTolerance(rOther.mLMRatioTolerance)
,mLMAbsTolerance(rOther.mLMAbsTolerance)
{
}
/// Destructor.
~DisplacementLagrangeMultiplierContactCriteria() override = default;
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Create method
* @param ThisParameters The configuration parameters
*/
typename BaseType::Pointer Create(Parameters ThisParameters) const override
{
return Kratos::make_shared<ClassType>(ThisParameters);
}
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (SparseSpaceType::Size(rDx) != 0) { //if we are solving for something
// Initialize
double disp_solution_norm = 0.0, rot_solution_norm = 0.0, lm_solution_norm = 0.0, disp_increase_norm = 0.0, rot_increase_norm = 0.0, lm_increase_norm = 0.0;
IndexType disp_dof_num(0),rot_dof_num(0),lm_dof_num(0);
// First iterator
const auto it_dof_begin = rDofSet.begin();
// Auxiliar values
std::size_t dof_id = 0;
double dof_value = 0.0, dof_incr = 0.0;
// The number of active dofs
const std::size_t number_active_dofs = rb.size();
// Auxiliar displacement DoF check
const std::function<bool(const VariableData&)> check_without_rot =
[](const VariableData& rCurrVar) -> bool {return true;};
const std::function<bool(const VariableData&)> check_with_rot =
[](const VariableData& rCurrVar) -> bool {return ((rCurrVar == DISPLACEMENT_X) || (rCurrVar == DISPLACEMENT_Y) || (rCurrVar == DISPLACEMENT_Z));};
const auto* p_check_disp = (mOptions.Is(DisplacementLagrangeMultiplierContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? &check_with_rot : &check_without_rot;
// Loop over Dofs
#pragma omp parallel for firstprivate(dof_id, dof_value ,dof_incr) reduction(+:disp_solution_norm, rot_solution_norm, lm_solution_norm, disp_increase_norm, rot_increase_norm, lm_increase_norm, disp_dof_num, rot_dof_num, lm_dof_num)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = it_dof_begin + i;
dof_id = it_dof->EquationId();
// Check dof id is solved
if (dof_id < number_active_dofs) {
if (mActiveDofs[dof_id] == 1) {
dof_value = it_dof->GetSolutionStepValue(0);
dof_incr = rDx[dof_id];
const auto& r_curr_var = it_dof->GetVariable();
if ((r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (r_curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) {
lm_solution_norm += std::pow(dof_value, 2);
lm_increase_norm += std::pow(dof_incr, 2);
++lm_dof_num;
} else if ((*p_check_disp)(r_curr_var)) {
disp_solution_norm += std::pow(dof_value, 2);
disp_increase_norm += std::pow(dof_incr, 2);
++disp_dof_num;
} else { // We will assume is rotation dof
KRATOS_DEBUG_ERROR_IF_NOT((r_curr_var == ROTATION_X) || (r_curr_var == ROTATION_Y) || (r_curr_var == ROTATION_Z)) << "Variable must be a ROTATION and it is: " << r_curr_var.Name() << std::endl;
rot_solution_norm += std::pow(dof_value, 2);
rot_increase_norm += std::pow(dof_incr, 2);
++rot_dof_num;
}
}
}
}
if(disp_increase_norm < Tolerance) disp_increase_norm = 1.0;
if(rot_increase_norm < Tolerance) rot_increase_norm = 1.0;
if(lm_increase_norm < Tolerance) lm_increase_norm = 1.0;
if(disp_solution_norm < Tolerance) disp_solution_norm = 1.0;
KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT) && lm_solution_norm < Tolerance) << "WARNING::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl;
const double disp_ratio = std::sqrt(disp_increase_norm/disp_solution_norm);
const double rot_ratio = std::sqrt(rot_increase_norm/rot_solution_norm);
const double lm_ratio = lm_solution_norm > Tolerance ? std::sqrt(lm_increase_norm/lm_solution_norm) : 0.0;
const double disp_abs = std::sqrt(disp_increase_norm)/static_cast<double>(disp_dof_num);
const double rot_abs = std::sqrt(rot_increase_norm)/static_cast<double>(rot_dof_num);
const double lm_abs = std::sqrt(lm_increase_norm)/static_cast<double>(lm_dof_num);
// The process info of the model part
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// We print the results // TODO: Replace for the new log
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
std::cout.precision(4);
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.Is(DisplacementLagrangeMultiplierContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
r_table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance << rot_ratio << mRotRatioTolerance << rot_abs << mRotAbsTolerance << lm_ratio << mLMRatioTolerance << lm_abs << mLMAbsTolerance;
} else {
r_table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance << lm_ratio << mLMRatioTolerance << lm_abs << mLMAbsTolerance;
}
} else {
std::cout.precision(4);
if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) {
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("DoF ONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl;
if (mOptions.Is(DisplacementLagrangeMultiplierContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tROTATION: RATIO = ") << rot_ratio << BOLDFONT(" EXP.RATIO = ") << mRotRatioTolerance << BOLDFONT(" ABS = ") << rot_abs << BOLDFONT(" EXP.ABS = ") << mRotAbsTolerance << std::endl;
}
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT(" LAGRANGE MUL:\tRATIO = ") << lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl;
} else {
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "DoF ONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDISPLACEMENT: RATIO = " << disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl;
if (mOptions.Is(DisplacementLagrangeMultiplierContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tROTATION: RATIO = " << rot_ratio << " EXP.RATIO = " << mRotRatioTolerance << " ABS = " << rot_abs << " EXP.ABS = " << mRotAbsTolerance << std::endl;
}
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << " LAGRANGE MUL:\tRATIO = " << lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl;
}
}
}
// We check if converged
const bool disp_converged = (disp_ratio <= mDispRatioTolerance || disp_abs <= mDispAbsTolerance);
const bool rot_converged = (mOptions.Is(DisplacementLagrangeMultiplierContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? (rot_ratio <= mRotRatioTolerance || rot_abs <= mRotAbsTolerance) : true;
const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT) && lm_solution_norm < Tolerance) ? true : (lm_ratio <= mLMRatioTolerance || lm_abs <= mLMAbsTolerance);
if (disp_converged && rot_converged && lm_converged) {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FGRN(" Achieved"));
else
r_table << "Achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDoF convergence is achieved" << std::endl;
}
}
return true;
} else {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FRED(" Not achieved"));
else
r_table << "Not achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDoF convergence is not achieved" << std::endl;
}
}
return false;
}
}
else // In this case all the displacements are imposed!
return true;
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize( ModelPart& rModelPart ) override
{
// Initialize
BaseType::mConvergenceCriteriaIsInitialized = true;
// Check rotation dof
mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::ROTATION_DOF_IS_CONSIDERED, ContactUtilities::CheckModelPartHasRotationDoF(rModelPart));
// Initialize header
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
r_table.AddColumn("DP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
if (mOptions.Is(DisplacementLagrangeMultiplierContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
r_table.AddColumn("RT RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
}
r_table.AddColumn("LM RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("CONVERGENCE", 15);
mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED, true);
}
}
/**
* @brief This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Filling mActiveDofs when MPC exist
ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet);
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "displacement_lagrangemultiplier_contact_criteria",
"ensure_contact" : false,
"print_convergence_criterion" : false,
"displacement_relative_tolerance" : 1.0e-4,
"displacement_absolute_tolerance" : 1.0e-9,
"rotation_relative_tolerance" : 1.0e-4,
"rotation_absolute_tolerance" : 1.0e-9,
"contact_displacement_relative_tolerance" : 1.0e-4,
"contact_displacement_absolute_tolerance" : 1.0e-9
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "displacement_lagrangemultiplier_contact_criteria";
}
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "DisplacementLagrangeMultiplierContactCriteria";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
void AssignSettings(const Parameters ThisParameters) override
{
BaseType::AssignSettings(ThisParameters);
// The displacement solution
mDispRatioTolerance = ThisParameters["displacement_relative_tolerance"].GetDouble();
mDispAbsTolerance = ThisParameters["displacement_absolute_tolerance"].GetDouble();
// The rotation solution
mRotRatioTolerance = ThisParameters["rotation_relative_tolerance"].GetDouble();
mRotAbsTolerance = ThisParameters["rotation_absolute_tolerance"].GetDouble();
// The contact solution
mLMRatioTolerance = ThisParameters["contact_displacement_relative_tolerance"].GetDouble();
mLMAbsTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble();
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::ROTATION_DOF_IS_CONSIDERED, false);
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Flags mOptions; /// Local flags
double mDispRatioTolerance; /// The ratio threshold for the norm of the displacement
double mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement
double mRotRatioTolerance; /// The ratio threshold for the norm of the rotation
double mRotAbsTolerance; /// The absolute value threshold for the norm of the rotation
double mLMRatioTolerance; /// The ratio threshold for the norm of the LM
double mLMAbsTolerance; /// The absolute value threshold for the norm of the LM
std::vector<int> mActiveDofs; /// This vector contains the dofs that are active
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Kratos DisplacementLagrangeMultiplierContactCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::ROTATION_DOF_IS_CONSIDERED(Kratos::Flags::Create(3));
}
#endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H */
|
GB_unop__exp_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__exp_fp64_fp64)
// op(A') function: GB (_unop_tran__exp_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = exp (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = exp (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = exp (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EXP || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__exp_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = exp (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = exp (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__exp_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolutiondepthwise_3x3_pack4_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
#if __aarch64__
const int w = bottom_blob.w;
#endif
const int outw = top_blob.w;
const int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f);
const unsigned short* k0 = kernel.row<const unsigned short>(g);
unsigned short* outptr0 = out.row<unsigned short>(0);
const Mat img0 = bottom_blob.channel(g);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
float32x4_t _k00 = vcvt_f32_bf16(vld1_u16(k0));
float32x4_t _k01 = vcvt_f32_bf16(vld1_u16(k0 + 4));
float32x4_t _k02 = vcvt_f32_bf16(vld1_u16(k0 + 8));
float32x4_t _k10 = vcvt_f32_bf16(vld1_u16(k0 + 12));
float32x4_t _k11 = vcvt_f32_bf16(vld1_u16(k0 + 16));
float32x4_t _k12 = vcvt_f32_bf16(vld1_u16(k0 + 20));
float32x4_t _k20 = vcvt_f32_bf16(vld1_u16(k0 + 24));
float32x4_t _k21 = vcvt_f32_bf16(vld1_u16(k0 + 28));
float32x4_t _k22 = vcvt_f32_bf16(vld1_u16(k0 + 32));
int i = 0;
#if __aarch64__
unsigned short* outptr1 = out.row<unsigned short>(1);
const unsigned short* r3 = img0.row<const unsigned short>(3);
for (; i + 1 < outh; i += 2)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%3], #32 \n" // r10 r11 r12 r13
"mov v16.16b, %21.16b \n" // sum00
"mov v17.16b, %21.16b \n" // sum01
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v28.4h, v29.4h}, [%3] \n" // r14 r15
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"mov v18.16b, %21.16b \n" // sum02
"mov v19.16b, %21.16b \n" // sum03
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"mov v20.16b, %21.16b \n" // sum10
"fmla v16.4s, %15.4s, v10.4s \n"
"fmla v17.4s, %15.4s, v11.4s \n"
"mov v21.16b, %21.16b \n" // sum11
"fmla v18.4s, %15.4s, v12.4s \n"
"fmla v19.4s, %15.4s, v13.4s \n"
"mov v22.16b, %21.16b \n" // sum12
"fmla v20.4s, %12.4s, v10.4s \n"
"fmla v21.4s, %12.4s, v11.4s \n"
"mov v23.16b, %21.16b \n" // sum13
"fmla v22.4s, %12.4s, v12.4s \n"
"fmla v23.4s, %12.4s, v13.4s \n"
"shll v28.4s, v28.4h, #16 \n"
"fmla v16.4s, %16.4s, v11.4s \n"
"fmla v17.4s, %16.4s, v12.4s \n"
"shll v29.4s, v29.4h, #16 \n"
"fmla v18.4s, %16.4s, v13.4s \n"
"fmla v19.4s, %16.4s, v28.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%4], #32 \n" // r20 r21 r22 r23
"fmla v20.4s, %13.4s, v11.4s \n"
"fmla v21.4s, %13.4s, v12.4s \n"
"fmla v22.4s, %13.4s, v13.4s \n"
"fmla v23.4s, %13.4s, v28.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v14.4h, v15.4h}, [%4] \n" // r24 r25
"fmla v16.4s, %17.4s, v12.4s \n"
"fmla v17.4s, %17.4s, v13.4s \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v18.4s, %17.4s, v28.4s \n"
"fmla v19.4s, %17.4s, v29.4s \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, %14.4s, v12.4s \n"
"fmla v21.4s, %14.4s, v13.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%2], #32 \n" // r00 r01 r02 r03
"fmla v22.4s, %14.4s, v28.4s \n"
"fmla v23.4s, %14.4s, v29.4s \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v16.4s, %18.4s, v24.4s \n"
"fmla v17.4s, %18.4s, v25.4s \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v18.4s, %18.4s, v26.4s \n"
"fmla v19.4s, %18.4s, v27.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%5], #32 \n" // r30 r31 r32 r33
"fmla v20.4s, %15.4s, v24.4s \n"
"fmla v21.4s, %15.4s, v25.4s \n"
"shll v14.4s, v14.4h, #16 \n"
"fmla v22.4s, %15.4s, v26.4s \n"
"fmla v23.4s, %15.4s, v27.4s \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v16.4s, %19.4s, v25.4s \n"
"fmla v17.4s, %19.4s, v26.4s \n"
"fmla v18.4s, %19.4s, v27.4s \n"
"fmla v19.4s, %19.4s, v14.4s \n"
"fmla v20.4s, %16.4s, v25.4s \n"
"fmla v21.4s, %16.4s, v26.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v24.4h, v25.4h}, [%2] \n" // r04 r05
"fmla v22.4s, %16.4s, v27.4s \n"
"fmla v23.4s, %16.4s, v14.4s \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, %20.4s, v26.4s \n"
"fmla v17.4s, %20.4s, v27.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"fmla v18.4s, %20.4s, v14.4s \n"
"fmla v19.4s, %20.4s, v15.4s \n"
"shll v13.4s, v13.4h, #16 \n"
"fmla v20.4s, %17.4s, v26.4s \n"
"fmla v21.4s, %17.4s, v27.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v26.4h, v27.4h}, [%5] \n" // r34 r35
"fmla v22.4s, %17.4s, v14.4s \n"
"fmla v23.4s, %17.4s, v15.4s \n"
"shll v28.4s, v28.4h, #16 \n"
"fmla v16.4s, %12.4s, v10.4s \n"
"fmla v17.4s, %12.4s, v11.4s \n"
"shll v29.4s, v29.4h, #16 \n"
"fmla v18.4s, %12.4s, v12.4s \n"
"fmla v19.4s, %12.4s, v13.4s \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, %18.4s, v28.4s \n"
"fmla v21.4s, %18.4s, v29.4s \n"
"shll v31.4s, v31.4h, #16 \n"
"fmla v22.4s, %18.4s, v30.4s \n"
"fmla v23.4s, %18.4s, v31.4s \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v16.4s, %13.4s, v11.4s \n"
"fmla v17.4s, %13.4s, v12.4s \n"
"fmla v18.4s, %13.4s, v13.4s \n"
"fmla v19.4s, %13.4s, v24.4s \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, %19.4s, v29.4s \n"
"fmla v21.4s, %19.4s, v30.4s \n"
"fmla v22.4s, %19.4s, v31.4s \n"
"fmla v23.4s, %19.4s, v26.4s \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v16.4s, %14.4s, v12.4s \n"
"fmla v17.4s, %14.4s, v13.4s \n"
"fmla v18.4s, %14.4s, v24.4s \n"
"fmla v19.4s, %14.4s, v25.4s \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, %20.4s, v30.4s \n"
"fmla v21.4s, %20.4s, v31.4s \n"
"fmla v22.4s, %20.4s, v26.4s \n"
"fmla v23.4s, %20.4s, v27.4s \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%0], #32 \n"
"st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%1], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k00), // %12
"w"(_k01), // %13
"w"(_k02), // %14
"w"(_k10), // %15
"w"(_k11), // %16
"w"(_k12), // %17
"w"(_k20), // %18
"w"(_k21), // %19
"w"(_k22), // %20
"w"(_bias0) // %21
: "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%3] \n" // r10 r11 r12 r13
"mov v16.16b, %21.16b \n" // sum00
"mov v17.16b, %21.16b \n" // sum01
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"mov v18.16b, %21.16b \n" // sum10
"mov v19.16b, %21.16b \n" // sum11
"fmla v16.4s, %15.4s, v10.4s \n"
"fmla v17.4s, %15.4s, v11.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"fmla v18.4s, %12.4s, v10.4s \n"
"fmla v19.4s, %12.4s, v11.4s \n"
"shll v13.4s, v13.4h, #16 \n"
"fmla v16.4s, %16.4s, v11.4s \n"
"fmla v17.4s, %16.4s, v12.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%4] \n" // r20 r21 r22 r23
"fmla v18.4s, %13.4s, v11.4s \n"
"fmla v19.4s, %13.4s, v12.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v16.4s, %17.4s, v12.4s \n"
"fmla v17.4s, %17.4s, v13.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v18.4s, %14.4s, v12.4s \n"
"fmla v19.4s, %14.4s, v13.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v16.4s, %18.4s, v20.4s \n"
"fmla v17.4s, %18.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v18.4s, %15.4s, v20.4s \n"
"fmla v19.4s, %15.4s, v21.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%2] \n" // r00 r01 r02 r03
"fmla v16.4s, %19.4s, v21.4s \n"
"fmla v17.4s, %19.4s, v22.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%5] \n" // r30 r31 r32 r33
"fmla v18.4s, %16.4s, v21.4s \n"
"fmla v19.4s, %16.4s, v22.4s \n"
"shll v10.4s, v10.4h, #16 \n"
"fmla v16.4s, %20.4s, v22.4s \n"
"fmla v17.4s, %20.4s, v23.4s \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v18.4s, %17.4s, v22.4s \n"
"fmla v19.4s, %17.4s, v23.4s \n"
"shll v11.4s, v11.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v16.4s, %12.4s, v10.4s \n"
"fmla v17.4s, %12.4s, v11.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"fmla v18.4s, %18.4s, v24.4s \n"
"fmla v19.4s, %18.4s, v25.4s \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v16.4s, %13.4s, v11.4s \n"
"fmla v17.4s, %13.4s, v12.4s \n"
"shll v13.4s, v13.4h, #16 \n"
"fmla v18.4s, %19.4s, v25.4s \n"
"fmla v19.4s, %19.4s, v26.4s \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v16.4s, %14.4s, v12.4s \n"
"fmla v17.4s, %14.4s, v13.4s \n"
"add %3, %3, #16 \n"
"fmla v18.4s, %20.4s, v26.4s \n"
"fmla v19.4s, %20.4s, v27.4s \n"
"add %4, %4, #16 \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"add %2, %2, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"add %5, %5, #16 \n"
"st1 {v16.4h, v17.4h}, [%0], #16 \n"
"st1 {v18.4h, v19.4h}, [%1], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k00), // %12
"w"(_k01), // %13
"w"(_k02), // %14
"w"(_k10), // %15
"w"(_k11), // %16
"w"(_k12), // %17
"w"(_k20), // %18
"w"(_k21), // %19
"w"(_k22), // %20
"w"(_bias0) // %21
: "memory", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v10.4h, v11.4h, v12.4h}, [%3] \n" // r10 r11 r12
"mov v18.16b, %21.16b \n" // sum0
"mov v19.16b, %21.16b \n" // sum1
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmul v16.4s, %15.4s, v10.4s \n"
"fmul v17.4s, %12.4s, v10.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"fmla v18.4s, %16.4s, v11.4s \n"
"fmla v19.4s, %13.4s, v11.4s \n"
"prfm pldl1keep, [%4, #192] \n"
"ld1 {v20.4h, v21.4h, v22.4h}, [%4] \n" // r20 r21 r22
"fmla v16.4s, %17.4s, v12.4s \n"
"fmla v17.4s, %14.4s, v12.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v18.4s, %18.4s, v20.4s \n"
"fmla v19.4s, %15.4s, v20.4s \n"
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v10.4h, v11.4h, v12.4h}, [%2] \n" // r00 r01 r02
"shll v22.4s, v22.4h, #16 \n"
"prfm pldl1keep, [%5, #192] \n"
"ld1 {v24.4h, v25.4h, v26.4h}, [%5] \n" // r30 r31 r32
"fmla v16.4s, %19.4s, v21.4s \n"
"fmla v17.4s, %16.4s, v21.4s \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v18.4s, %20.4s, v22.4s \n"
"fmla v19.4s, %17.4s, v22.4s \n"
"shll v11.4s, v11.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v16.4s, %12.4s, v10.4s \n"
"fmla v17.4s, %18.4s, v24.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v18.4s, %13.4s, v11.4s \n"
"fmla v19.4s, %19.4s, v25.4s \n"
"add %3, %3, #8 \n"
"fmla v16.4s, %14.4s, v12.4s \n"
"fmla v17.4s, %20.4s, v26.4s \n"
"add %4, %4, #8 \n"
"fadd v18.4s, v18.4s, v16.4s \n"
"fadd v19.4s, v19.4s, v17.4s \n"
"add %2, %2, #8 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"add %5, %5, #8 \n"
"st1 {v18.4h}, [%0], #8 \n"
"st1 {v19.4h}, [%1], #8 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k00), // %12
"w"(_k01), // %13
"w"(_k02), // %14
"w"(_k10), // %15
"w"(_k11), // %16
"w"(_k12), // %17
"w"(_k20), // %18
"w"(_k21), // %19
"w"(_k22), // %20
"w"(_bias0) // %21
: "memory", "v10", "v11", "v12", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v24", "v25", "v26");
}
r0 += 2 * 4 + w * 4;
r1 += 2 * 4 + w * 4;
r2 += 2 * 4 + w * 4;
r3 += 2 * 4 + w * 4;
outptr0 += outw * 4;
outptr1 += outw * 4;
}
#endif // __aarch64__
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%1], #32 \n" // r00 r01 r02 r03
"mov v16.16b, %17.16b \n" // sum00
"mov v17.16b, %17.16b \n" // sum01
"mov v18.16b, %17.16b \n" // sum02
"mov v19.16b, %17.16b \n" // sum03
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, %8.4s, v10.4s \n"
"fmla v17.4s, %8.4s, v11.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"fmla v18.4s, %8.4s, v12.4s \n"
"fmla v19.4s, %8.4s, v13.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v14.4h, v15.4h}, [%1] \n" // r04 r05
"fmla v16.4s, %9.4s, v11.4s \n"
"fmla v17.4s, %9.4s, v12.4s \n"
"shll v14.4s, v14.4h, #16 \n"
"fmla v18.4s, %9.4s, v13.4s \n"
"fmla v19.4s, %9.4s, v14.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n" // r10 r11 r12 r13
"fmla v16.4s, %10.4s, v12.4s \n"
"fmla v17.4s, %10.4s, v13.4s \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v18.4s, %10.4s, v14.4s \n"
"fmla v19.4s, %10.4s, v15.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v16.4s, %11.4s, v20.4s \n"
"fmla v17.4s, %11.4s, v21.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v18.4s, %11.4s, v22.4s \n"
"fmla v19.4s, %11.4s, v23.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v14.4h, v15.4h}, [%2] \n" // r14 r15
"fmla v16.4s, %12.4s, v21.4s \n"
"fmla v17.4s, %12.4s, v22.4s \n"
"shll v14.4s, v14.4h, #16 \n"
"fmla v18.4s, %12.4s, v23.4s \n"
"fmla v19.4s, %12.4s, v14.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%3], #32 \n" // r20 r21 r22 r23
"fmla v16.4s, %13.4s, v22.4s \n"
"fmla v17.4s, %13.4s, v23.4s \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v18.4s, %13.4s, v14.4s \n"
"fmla v19.4s, %13.4s, v15.4s \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, %14.4s, v10.4s \n"
"fmla v17.4s, %14.4s, v11.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"fmla v18.4s, %14.4s, v12.4s \n"
"fmla v19.4s, %14.4s, v13.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v14.4h, v15.4h}, [%3] \n" // r24 r25
"fmla v16.4s, %15.4s, v11.4s \n"
"fmla v17.4s, %15.4s, v12.4s \n"
"shll v14.4s, v14.4h, #16 \n"
"fmla v18.4s, %15.4s, v13.4s \n"
"fmla v19.4s, %15.4s, v14.4s \n"
"fmla v16.4s, %16.4s, v12.4s \n"
"fmla v17.4s, %16.4s, v13.4s \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v18.4s, %16.4s, v14.4s \n"
"fmla v19.4s, %16.4s, v15.4s \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
#else
asm volatile(
"pld [%1, #128] \n"
"vld1.u16 {d30-d31}, [%1 :64]! \n" // r00 r01
"vmov q10, %q17 \n" // sum00
"vmov q11, %q17 \n" // sum01
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q10, %q8, q14 \n"
"vmla.f32 q11, %q8, q15 \n"
"vmla.f32 q10, %q9, q15 \n"
"pld [%1, #128] \n"
"vld1.u16 {d30-d31}, [%1 :64]! \n" // r02 r03
"vmov q12, %q17 \n" // sum02
"vmov q13, %q17 \n" // sum03
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q12, %q8, q14 \n"
"vmla.f32 q11, %q9, q14 \n"
"vmla.f32 q13, %q8, q15 \n"
"vmla.f32 q10, %q10, q14 \n"
"vmla.f32 q12, %q9, q15 \n"
"vmla.f32 q11, %q10, q15 \n"
// "pld [%1, #128] \n"
"vld1.u16 {d30-d31}, [%1 :64] \n" // r04 r05
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q13, %q9, q14 \n"
"vmla.f32 q12, %q10, q14 \n"
"vmla.f32 q13, %q10, q15 \n"
"pld [%2, #128] \n"
"vld1.u16 {d30-d31}, [%2 :64]! \n" // r10 r11
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q10, %q11, q14 \n"
"vmla.f32 q11, %q11, q15 \n"
"vmla.f32 q10, %q12, q15 \n"
"pld [%2, #128] \n"
"vld1.u16 {d30-d31}, [%2 :64]! \n" // r12 r13
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q12, %q11, q14 \n"
"vmla.f32 q11, %q12, q14 \n"
"vmla.f32 q13, %q11, q15 \n"
"vmla.f32 q10, %q13, q14 \n"
"vmla.f32 q12, %q12, q15 \n"
"vmla.f32 q11, %q13, q15 \n"
// "pld [%2, #128] \n"
"vld1.u16 {d30-d31}, [%2 :64] \n" // r14 r15
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q13, %q12, q14 \n"
"vmla.f32 q12, %q13, q14 \n"
"vmla.f32 q13, %q13, q15 \n"
"pld [%3, #128] \n"
"vld1.u16 {d30-d31}, [%3 :64]! \n" // r20 r21
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q10, %q14, q14 \n"
"vmla.f32 q11, %q14, q15 \n"
"vmla.f32 q10, %q15, q15 \n"
"pld [%3, #128] \n"
"vld1.u16 {d30-d31}, [%3 :64]! \n" // r22 r23
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q12, %q14, q14 \n"
"vmla.f32 q11, %q15, q14 \n"
"vmla.f32 q13, %q14, q15 \n"
"vmla.f32 q10, %q16, q14 \n"
"vmla.f32 q12, %q15, q15 \n"
"vmla.f32 q11, %q16, q15 \n"
// "pld [%3, #128] \n"
"vld1.u16 {d30-d31}, [%3 :64] \n" // r24 r25
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q13, %q15, q14 \n"
"vmla.f32 q12, %q16, q14 \n"
"vmla.f32 q13, %q16, q15 \n"
"vshrn.u32 d20, q10, #16 \n"
"vshrn.u32 d21, q11, #16 \n"
"vshrn.u32 d22, q12, #16 \n"
"vshrn.u32 d23, q13, #16 \n"
"vst1.u16 {d20-d23}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "q10", "q11", "q12", "q13", "q14", "q15");
#endif
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%1] \n" // r00 r01 r02 r03
"mov v18.16b, %17.16b \n" // sum00
"mov v19.16b, %17.16b \n" // sum01
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"fmul v16.4s, %8.4s, v12.4s \n"
"fmul v17.4s, %8.4s, v13.4s \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v18.4s, %9.4s, v13.4s \n"
"fmla v19.4s, %9.4s, v14.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2] \n" // r10 r11 r12 r13
"fmla v16.4s, %10.4s, v14.4s \n"
"fmla v17.4s, %10.4s, v15.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v18.4s, %11.4s, v20.4s \n"
"fmla v19.4s, %11.4s, v21.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v16.4s, %12.4s, v21.4s \n"
"fmla v17.4s, %12.4s, v22.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%3] \n" // r20 r21 r22 r23
"fmla v18.4s, %13.4s, v22.4s \n"
"fmla v19.4s, %13.4s, v23.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"fmla v16.4s, %14.4s, v12.4s \n"
"fmla v17.4s, %14.4s, v13.4s \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v18.4s, %15.4s, v13.4s \n"
"fmla v19.4s, %15.4s, v14.4s \n"
"add %1, %1, #16 \n"
"fmla v16.4s, %16.4s, v14.4s \n"
"fmla v17.4s, %16.4s, v15.4s \n"
"add %2, %2, #16 \n"
"fadd v18.4s, v18.4s, v16.4s \n"
"fadd v19.4s, v19.4s, v17.4s \n"
"add %3, %3, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"st1 {v18.4h, v19.4h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
#else
asm volatile(
"pld [%1, #256] \n"
"vld1.u16 {d28-d31}, [%1 :64] \n" // r00 r01 r02 r03
"vmov q10, %q17 \n" // sum00
"vmov q11, %q17 \n" // sum01
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vmla.f32 q10, %q8, q12 \n"
"vmla.f32 q11, %q8, q13 \n"
"vshll.u16 q14, d30, #16 \n"
"vmla.f32 q10, %q9, q13 \n"
"vmla.f32 q11, %q9, q14 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q10, %q10, q14 \n"
"vmla.f32 q11, %q10, q15 \n"
"pld [%2, #256] \n"
"vld1.u16 {d28-d31}, [%2 :64] \n" // r10 r11 r12 r13
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vmla.f32 q10, %q11, q12 \n"
"vmla.f32 q11, %q11, q13 \n"
"vshll.u16 q14, d30, #16 \n"
"vmla.f32 q10, %q12, q13 \n"
"vmla.f32 q11, %q12, q14 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q10, %q13, q14 \n"
"vmla.f32 q11, %q13, q15 \n"
"pld [%3, #256] \n"
"vld1.u16 {d28-d31}, [%3 :64] \n" // r20 r21 r22 r23
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vmla.f32 q10, %q14, q12 \n"
"vmla.f32 q11, %q14, q13 \n"
"vshll.u16 q14, d30, #16 \n"
"vmla.f32 q10, %q15, q13 \n"
"vmla.f32 q11, %q15, q14 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q10, %q16, q14 \n"
"vmla.f32 q11, %q16, q15 \n"
"add %1, %1, #16 \n"
"add %2, %2, #16 \n"
"vshrn.u32 d20, q10, #16 \n"
"vshrn.u32 d21, q11, #16 \n"
"add %3, %3, #16 \n"
"vst1.u16 {d20-d21}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "q10", "q11", "q12", "q13", "q14", "q15");
#endif
}
for (; j < outw; j++)
{
float32x4_t _sum0 = _bias0;
float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0));
float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4));
float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8));
float32x4_t _r10 = vcvt_f32_bf16(vld1_u16(r1));
float32x4_t _r11 = vcvt_f32_bf16(vld1_u16(r1 + 4));
float32x4_t _r12 = vcvt_f32_bf16(vld1_u16(r1 + 8));
float32x4_t _r20 = vcvt_f32_bf16(vld1_u16(r2));
float32x4_t _r21 = vcvt_f32_bf16(vld1_u16(r2 + 4));
float32x4_t _r22 = vcvt_f32_bf16(vld1_u16(r2 + 8));
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
vst1_u16(outptr0, vcvt_bf16_f32(_sum0));
r0 += 4;
r1 += 4;
r2 += 4;
outptr0 += 4;
}
r0 += 2 * 4;
r1 += 2 * 4;
r2 += 2 * 4;
}
}
}
static void convdw3x3s2_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f);
const unsigned short* k0 = kernel.row<const unsigned short>(g);
unsigned short* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
float32x4_t _k00 = vcvt_f32_bf16(vld1_u16(k0));
float32x4_t _k01 = vcvt_f32_bf16(vld1_u16(k0 + 4));
float32x4_t _k02 = vcvt_f32_bf16(vld1_u16(k0 + 8));
float32x4_t _k10 = vcvt_f32_bf16(vld1_u16(k0 + 12));
float32x4_t _k11 = vcvt_f32_bf16(vld1_u16(k0 + 16));
float32x4_t _k12 = vcvt_f32_bf16(vld1_u16(k0 + 20));
float32x4_t _k20 = vcvt_f32_bf16(vld1_u16(k0 + 24));
float32x4_t _k21 = vcvt_f32_bf16(vld1_u16(k0 + 28));
float32x4_t _k22 = vcvt_f32_bf16(vld1_u16(k0 + 32));
int i = 0;
for (; i < outh; i++)
{
int j = 0;
#if __aarch64__
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%1], #32 \n" // r00 r01 r02 r03
"mov v28.16b, %17.16b \n" // sum00
"mov v29.16b, %17.16b \n" // sum01
"mov v30.16b, %17.16b \n" // sum02
"mov v31.16b, %17.16b \n" // sum03
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v14.4h, v15.4h, v16.4h, v17.4h}, [%1], #32 \n" // r04 r05 r06 r07
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v18.4h}, [%1] \n" // r08
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v28.4s, %8.4s, v10.4s \n"
"fmla v29.4s, %8.4s, v12.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v30.4s, %8.4s, v14.4s \n"
"fmla v31.4s, %8.4s, v16.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v28.4s, %9.4s, v11.4s \n"
"fmla v29.4s, %9.4s, v13.4s \n"
"fmla v30.4s, %9.4s, v15.4s \n"
"fmla v31.4s, %9.4s, v17.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n" // r10 r11 r12 r13
"fmla v28.4s, %10.4s, v12.4s \n"
"fmla v29.4s, %10.4s, v14.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v30.4s, %10.4s, v16.4s \n"
"fmla v31.4s, %10.4s, v18.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" // r14 r15 r16 r17
"shll v20.4s, v20.4h, #16 \n"
"shll v21.4s, v21.4h, #16 \n"
"shll v22.4s, v22.4h, #16 \n"
"shll v23.4s, v23.4h, #16 \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v19.4h}, [%2] \n" // r18
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v28.4s, %11.4s, v20.4s \n"
"fmla v29.4s, %11.4s, v22.4s \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v30.4s, %11.4s, v24.4s \n"
"fmla v31.4s, %11.4s, v26.4s \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v28.4s, %12.4s, v21.4s \n"
"fmla v29.4s, %12.4s, v23.4s \n"
"fmla v30.4s, %12.4s, v25.4s \n"
"fmla v31.4s, %12.4s, v27.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%3], #32 \n" // r20 r21 r22 r23
"fmla v28.4s, %13.4s, v22.4s \n"
"fmla v29.4s, %13.4s, v24.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v30.4s, %13.4s, v26.4s \n"
"fmla v31.4s, %13.4s, v19.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v14.4h, v15.4h, v16.4h, v17.4h}, [%3], #32 \n" // r24 r25 r26 r27
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v18.4h}, [%3] \n" // r28
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v28.4s, %14.4s, v10.4s \n"
"fmla v29.4s, %14.4s, v12.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v30.4s, %14.4s, v14.4s \n"
"fmla v31.4s, %14.4s, v16.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v28.4s, %15.4s, v11.4s \n"
"fmla v29.4s, %15.4s, v13.4s \n"
"fmla v30.4s, %15.4s, v15.4s \n"
"fmla v31.4s, %15.4s, v17.4s \n"
"fmla v28.4s, %16.4s, v12.4s \n"
"fmla v29.4s, %16.4s, v14.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v30.4s, %16.4s, v16.4s \n"
"fmla v31.4s, %16.4s, v18.4s \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
#endif // __aarch64__
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%1], #32 \n" // r00 r01 r02 r03
"mov v22.16b, %17.16b \n" // sum00
"mov v23.16b, %17.16b \n" // sum01
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmul v20.4s, %8.4s, v10.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"fmul v21.4s, %8.4s, v12.4s \n"
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v14.4h}, [%1] \n" // r04
"fmla v22.4s, %9.4s, v11.4s \n"
"fmla v23.4s, %9.4s, v13.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n" // r10 r11 r12 r13
"shll v14.4s, v14.4h, #16 \n"
"fmla v20.4s, %10.4s, v12.4s \n"
"fmla v21.4s, %10.4s, v14.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, %11.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, %11.4s, v18.4s \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v15.4h}, [%2] \n" // r14
"fmla v20.4s, %12.4s, v17.4s \n"
"fmla v21.4s, %12.4s, v19.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%3], #32 \n" // r20 r21 r22 r23
"shll v15.4s, v15.4h, #16 \n"
"fmla v22.4s, %13.4s, v18.4s \n"
"fmla v23.4s, %13.4s, v15.4s \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v20.4s, %14.4s, v10.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"fmla v21.4s, %14.4s, v12.4s \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v14.4h}, [%3] \n" // r24
"fmla v22.4s, %15.4s, v11.4s \n"
"fmla v23.4s, %15.4s, v13.4s \n"
"shll v14.4s, v14.4h, #16 \n"
"fmla v20.4s, %16.4s, v12.4s \n"
"fmla v21.4s, %16.4s, v14.4s \n"
"fadd v22.4s, v20.4s, v22.4s \n"
"fadd v23.4s, v21.4s, v23.4s \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"st1 {v22.4h, v23.4h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
#else
asm volatile(
"pld [%1, #256] \n"
"vld1.u16 {d28-d31}, [%1 :64]! \n" // r00 r01 r02 r03
"vmov q10, %q17 \n" // sum00
"vmov q11, %q17 \n" // sum01
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vmla.f32 q10, %q8, q12 \n"
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q11, %q8, q14 \n"
"vld1.u16 {d25}, [%1] \n" // r04
"vmla.f32 q10, %q9, q13 \n"
"vmla.f32 q11, %q9, q15 \n"
"vshll.u16 q12, d25, #16 \n"
"vmla.f32 q10, %q10, q14 \n"
"pld [%2, #256] \n"
"vld1.u16 {d28-d31}, [%2 :64]! \n" // r10 r11 r12 r13
"vmla.f32 q11, %q10, q12 \n"
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vmla.f32 q10, %q11, q12 \n"
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q11, %q11, q14 \n"
"vld1.u16 {d25}, [%2] \n" // r14
"vmla.f32 q10, %q12, q13 \n"
"vmla.f32 q11, %q12, q15 \n"
"vshll.u16 q12, d25, #16 \n"
"vmla.f32 q10, %q13, q14 \n"
"pld [%3, #256] \n"
"vld1.u16 {d28-d31}, [%3 :64]! \n" // r20 r21 r22 r23
"vmla.f32 q11, %q13, q12 \n"
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vmla.f32 q10, %q14, q12 \n"
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q11, %q14, q14 \n"
"vld1.u16 {d25}, [%3] \n" // r24
"vmla.f32 q10, %q15, q13 \n"
"vmla.f32 q11, %q15, q15 \n"
"vshll.u16 q12, d25, #16 \n"
"vmla.f32 q10, %q16, q14 \n"
"vmla.f32 q11, %q16, q12 \n"
"vshrn.u32 d20, q10, #16 \n"
"vshrn.u32 d21, q11, #16 \n"
"vst1.u16 {d20-d21}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "q10", "q11", "q12", "q13", "q14", "q15");
#endif
}
for (; j < outw; j++)
{
float32x4_t _sum0 = _bias0;
float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0));
float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4));
float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8));
float32x4_t _r10 = vcvt_f32_bf16(vld1_u16(r1));
float32x4_t _r11 = vcvt_f32_bf16(vld1_u16(r1 + 4));
float32x4_t _r12 = vcvt_f32_bf16(vld1_u16(r1 + 8));
float32x4_t _r20 = vcvt_f32_bf16(vld1_u16(r2));
float32x4_t _r21 = vcvt_f32_bf16(vld1_u16(r2 + 4));
float32x4_t _r22 = vcvt_f32_bf16(vld1_u16(r2 + 8));
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
vst1_u16(outptr0, vcvt_bf16_f32(_sum0));
r0 += 2 * 4;
r1 += 2 * 4;
r2 += 2 * 4;
outptr0 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
rectangle.h | // This code is modified from AutoMine and GraphZero
// Daniel Mawhirter and Bo Wu. SOSP 2019.
// AutoMine: Harmonizing High-Level Abstraction and High Performance for Graph Mining
#pragma omp parallel for schedule(dynamic,1) reduction(+:counter)
for (vidType v0 = 0; v0 < g.V(); v0++) {
for (vidType v1 : g.N(v0)) {
if (v1 >= v0) break;
auto y1 = g.N(v1);
for (vidType v2 : g.N(v0)) {
if (v2 >= v1) break;
counter += intersection_num(y1, g.N(v2), v0);
}
}
}
|
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% snibgo (Alan Gibson) %
% January 2022 %
% %
% %
% %
% Copyright @ 2022 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/fx-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
#define MaxTokenLen 100
#define RpnInit 100
#define TableExtend 0.1
#define InitNumOprStack 50
#define MinValStackSize 100
#define InitNumUserSymbols 50
typedef long double fxFltType;
typedef enum {
oAddEq,
oSubtractEq,
oMultiplyEq,
oDivideEq,
oPlusPlus,
oSubSub,
oAdd,
oSubtract,
oMultiply,
oDivide,
oModulus,
oUnaryPlus,
oUnaryMinus,
oLshift,
oRshift,
oEq,
oNotEq,
oLtEq,
oGtEq,
oLt,
oGt,
oLogAnd,
oLogOr,
oLogNot,
oBitAnd,
oBitOr,
oBitNot,
oPow,
oQuery,
oColon,
oOpenParen,
oCloseParen,
oOpenBracket,
oCloseBracket,
oOpenBrace,
oCloseBrace,
oAssign,
oNull
} OperatorE;
typedef struct {
OperatorE op;
const char * str;
int precedence; /* Higher number is higher precedence */
int nArgs;
} OperatorT;
static const OperatorT Operators[] = {
{oAddEq, "+=", 12, 1},
{oSubtractEq, "-=", 12, 1},
{oMultiplyEq, "*=", 13, 1},
{oDivideEq, "/=", 13, 1},
{oPlusPlus, "++", 12, 0},
{oSubSub, "--", 12, 0},
{oAdd, "+", 12, 2},
{oSubtract, "-", 12, 2},
{oMultiply, "*", 13, 2},
{oDivide, "/", 13, 2},
{oModulus, "%", 13, 2},
{oUnaryPlus, "+", 14, 1},
{oUnaryMinus, "-", 14, 1},
{oLshift, "<<", 11, 2},
{oRshift, ">>", 11, 2},
{oEq, "==", 9, 2},
{oNotEq, "!=", 9, 2},
{oLtEq, "<=", 10, 2},
{oGtEq, ">=", 10, 2},
{oLt, "<", 10, 2},
{oGt, ">", 10, 2},
{oLogAnd, "&&", 6, 2},
{oLogOr, "||", 5, 2},
{oLogNot, "!", 16, 1},
{oBitAnd, "&", 8, 2},
{oBitOr, "|", 7, 2},
{oBitNot, "~", 16, 1},
{oPow, "^", 15, 2},
{oQuery, "?", 4, 1},
{oColon, ":", 4, 1},
{oOpenParen, "(", 0, 0},
{oCloseParen, ")", 0, 0},
{oOpenBracket, "[", 0, 0},
{oCloseBracket,"]", 0, 0},
{oOpenBrace, "{", 0, 0},
{oCloseBrace, "}", 0, 0},
{oAssign, "=", 3, 1},
{oNull, "onull", 17, 0}
};
typedef enum {
cEpsilon,
cE,
cOpaque,
cPhi,
cPi,
cQuantumRange,
cQuantumScale,
cTransparent,
cMaxRgb,
cNull
} ConstantE;
typedef struct {
ConstantE cons;
fxFltType val;
const char * str;
} ConstantT;
static const ConstantT Constants[] = {
{cEpsilon, MagickEpsilon, "epsilon"},
{cE, 2.7182818284590452354, "e"},
{cOpaque, 1.0, "opaque"},
{cPhi, MagickPHI, "phi"},
{cPi, MagickPI, "pi"},
{cQuantumRange, QuantumRange, "quantumrange"},
{cQuantumScale, QuantumScale, "quantumscale"},
{cTransparent, 0.0, "transparent"},
{cMaxRgb, QuantumRange, "MaxRGB"},
{cNull, 0.0, "cnull"}
};
#define FirstFunc ((FunctionE) (oNull+1))
typedef enum {
fAbs = oNull+1,
#if defined(MAGICKCORE_HAVE_ACOSH)
fAcosh,
#endif
fAcos,
#if defined(MAGICKCORE_HAVE_J1)
fAiry,
#endif
fAlt,
#if defined(MAGICKCORE_HAVE_ASINH)
fAsinh,
#endif
fAsin,
#if defined(MAGICKCORE_HAVE_ATANH)
fAtanh,
#endif
fAtan2,
fAtan,
fCeil,
fChannel,
fClamp,
fCosh,
fCos,
fDebug,
fDrc,
#if defined(MAGICKCORE_HAVE_ERF)
fErf,
#endif
fExp,
fFloor,
fGauss,
fGcd,
fHypot,
fInt,
fIsnan,
#if defined(MAGICKCORE_HAVE_J0)
fJ0,
#endif
#if defined(MAGICKCORE_HAVE_J1)
fJ1,
#endif
#if defined(MAGICKCORE_HAVE_J1)
fJinc,
#endif
fLn,
fLogtwo,
fLog,
fMax,
fMin,
fMod,
fNot,
fPow,
fRand,
fRound,
fSign,
fSinc,
fSinh,
fSin,
fSqrt,
fSquish,
fTanh,
fTan,
fTrunc,
fDo,
fFor,
fIf,
fWhile,
fU,
fU0,
fUP,
fS,
fV,
fP,
fSP,
fVP,
fNull
} FunctionE;
typedef struct {
FunctionE func;
const char * str;
int nArgs;
} FunctionT;
static const FunctionT Functions[] = {
{fAbs, "abs" , 1},
#if defined(MAGICKCORE_HAVE_ACOSH)
{fAcosh, "acosh" , 1},
#endif
{fAcos, "acos" , 1},
#if defined(MAGICKCORE_HAVE_J1)
{fAiry, "airy" , 1},
#endif
{fAlt, "alt" , 1},
#if defined(MAGICKCORE_HAVE_ASINH)
{fAsinh, "asinh" , 1},
#endif
{fAsin, "asin" , 1},
#if defined(MAGICKCORE_HAVE_ATANH)
{fAtanh, "atanh" , 1},
#endif
{fAtan2, "atan2" , 2},
{fAtan, "atan" , 1},
{fCeil, "ceil" , 1},
{fChannel, "channel", 5}, /* Special case: allow zero to five arguments. */
{fClamp, "clamp" , 1},
{fCosh, "cosh" , 1},
{fCos, "cos" , 1},
{fDebug, "debug" , 1},
{fDrc, "drc" , 2},
#if defined(MAGICKCORE_HAVE_ERF)
{fErf, "erf" , 1},
#endif
{fExp, "exp" , 1},
{fFloor, "floor" , 1},
{fGauss, "gauss" , 2},
{fGcd, "gcd" , 2},
{fHypot, "hypot" , 2},
{fInt, "int" , 1},
{fIsnan, "isnan" , 1},
#if defined(MAGICKCORE_HAVE_J0)
{fJ0, "j0" , 1},
#endif
#if defined(MAGICKCORE_HAVE_J1)
{fJ1, "j1" , 1},
#endif
#if defined(MAGICKCORE_HAVE_J1)
{fJinc, "jinc" , 1},
#endif
{fLn, "ln" , 1},
{fLogtwo, "logtwo", 1},
{fLog, "log" , 1},
{fMax, "max" , 2},
{fMin, "min" , 2},
{fMod, "mod" , 2},
{fNot, "not" , 1},
{fPow, "pow" , 2},
{fRand, "rand" , 0},
{fRound, "round" , 1},
{fSign, "sign" , 1},
{fSinc, "sinc" , 1},
{fSinh, "sinh" , 1},
{fSin, "sin" , 1},
{fSqrt, "sqrt" , 1},
{fSquish, "squish", 1},
{fTanh, "tanh" , 1},
{fTan, "tan" , 1},
{fTrunc, "trunc" , 1},
{fDo, "do", 2},
{fFor, "for", 3},
{fIf, "if", 3},
{fWhile, "while", 2},
{fU, "u", 1},
{fU0, "u0", 0},
{fUP, "up", 3},
{fS, "s", 0},
{fV, "v", 0},
{fP, "p", 2},
{fSP, "sp", 2},
{fVP, "vp", 2},
{fNull, "fnull" , 0}
};
#define FirstImgAttr ((ImgAttrE) (fNull+1))
typedef enum {
aDepth = fNull+1,
aExtent,
aKurtosis,
aMaxima,
aMean,
aMedian,
aMinima,
aPage,
aPageX,
aPageY,
aPageWid,
aPageHt,
aPrintsize,
aPrintsizeX,
aPrintsizeY,
aQuality,
aRes,
aResX,
aResY,
aSkewness,
aStdDev,
aH,
aN,
aT,
aW,
aZ,
aNull
} ImgAttrE;
typedef struct {
ImgAttrE attr;
const char * str;
int NeedStats;
} ImgAttrT;
static const ImgAttrT ImgAttrs[] = {
{aDepth, "depth", 1},
{aExtent, "extent", 0},
{aKurtosis, "kurtosis", 1},
{aMaxima, "maxima", 1},
{aMean, "mean", 1},
{aMedian, "median", 1},
{aMinima, "minima", 1},
{aPage, "page", 0},
{aPageX, "page.x", 0},
{aPageY, "page.y", 0},
{aPageWid, "page.width", 0},
{aPageHt, "page.height", 0},
{aPrintsize, "printsize", 0},
{aPrintsizeX, "printsize.x", 0},
{aPrintsizeY, "printsize.y", 0},
{aQuality, "quality", 0},
{aRes, "resolution", 0},
{aResX, "resolution.x", 0},
{aResY, "resolution.y", 0},
{aSkewness, "skewness", 1},
{aStdDev, "standard_deviation", 1},
{aH, "h", 0},
{aN, "n", 0},
{aT, "t", 0},
{aW, "w", 0},
{aZ, "z", 0},
{aNull, "anull", 0},
{aNull, "anull", 0},
{aNull, "anull", 0},
{aNull, "anull", 0}
};
#define FirstSym ((SymbolE) (aNull+1))
typedef enum {
sHue = aNull+1,
sIntensity,
sLightness,
sLuma,
sLuminance,
sSaturation,
sA,
sB,
sC,
sG,
sI,
sJ,
sK,
sM,
sO,
sR,
sY,
sNull
} SymbolE;
typedef struct {
SymbolE sym;
const char * str;
} SymbolT;
static const SymbolT Symbols[] = {
{sHue, "hue"},
{sIntensity, "intensity"},
{sLightness, "lightness"},
{sLuma, "luma"},
{sLuminance, "luminance"},
{sSaturation, "saturation"},
{sA, "a"},
{sB, "b"},
{sC, "c"},
{sG, "g"},
{sI, "i"},
{sJ, "j"},
{sK, "k"},
{sM, "m"},
{sO, "o"},
{sR, "r"},
{sY, "y"},
{sNull, "snull"}
};
/*
There is no way to access new value of pixels. This might be a future enhancement, eg "q".
fP, oU and oV can have channel qualifier such as "u.r".
For meta channels, we might also allow numbered channels eg "u.2" or "u.16".
... or have extra argument to p[].
*/
#define FirstCont (sNull+1)
/* Run-time controls are in the RPN, not explicitly in the input string. */
typedef enum {
rGoto = FirstCont,
rIfZeroGoto,
rIfNotZeroGoto,
rCopyFrom,
rCopyTo,
rZerStk,
rNull
} ControlE;
typedef struct {
ControlE cont;
const char * str;
int nArgs;
} ControlT;
static const ControlT Controls[] = {
{rGoto, "goto", 0},
{rIfZeroGoto, "ifzerogoto", 1},
{rIfNotZeroGoto, "ifnotzerogoto", 1},
{rCopyFrom, "copyfrom", 0},
{rCopyTo, "copyto", 1},
{rZerStk, "zerstk", 0},
{rNull, "rnull", 0}
};
#define NULL_ADDRESS -2
typedef struct {
int addrQuery;
int addrColon;
} TernaryT;
typedef struct {
const char * str;
PixelChannel pixChan;
} ChannelT;
#define NO_CHAN_QUAL ((PixelChannel) (-1))
#define THIS_CHANNEL ((PixelChannel) (-2))
#define HUE_CHANNEL ((PixelChannel) (-3))
#define SAT_CHANNEL ((PixelChannel) (-4))
#define LIGHT_CHANNEL ((PixelChannel) (-5))
#define INTENSITY_CHANNEL ((PixelChannel) (-6))
static const ChannelT Channels[] = {
{"r", RedPixelChannel},
{"g", GreenPixelChannel},
{"b", BluePixelChannel},
{"c", CyanPixelChannel},
{"m", MagentaPixelChannel},
{"y", YellowPixelChannel},
{"k", BlackPixelChannel},
{"a", AlphaPixelChannel},
{"o", AlphaPixelChannel},
{"hue", HUE_CHANNEL},
{"saturation", SAT_CHANNEL},
{"lightness", LIGHT_CHANNEL},
{"intensity", INTENSITY_CHANNEL},
{"all", CompositePixelChannel},
{"this", THIS_CHANNEL},
{"", NO_CHAN_QUAL}
};
/* The index into UserSymbols is also the index into run-time UserSymVals.
*/
typedef struct {
char * pex;
size_t len;
} UserSymbolT;
typedef enum {
etOperator,
etConstant,
etFunction,
etImgAttr,
etSymbol,
etColourConstant,
etControl
} ElementTypeE;
static const char * sElementTypes[] = {
"Operator",
"Constant",
"Function",
"ImgAttr",
"Symbol",
"ColConst",
"Control"
};
typedef struct {
ElementTypeE type;
fxFltType
val, val1, val2;
int oprNum;
int nArgs;
MagickBooleanType IsRelative;
MagickBooleanType DoPush;
int EleNdx;
int nDest; /* Number of Elements that "goto" this element */
PixelChannel ChannelQual;
ImgAttrE ImgAttrQual;
char * pExpStart;
int lenExp;
} ElementT;
typedef enum {
rtUnknown,
rtEntireImage,
rtCornerOnly
} RunTypeE;
typedef struct {
CacheView *View;
/* Other per-image metadata could go here. */
} ImgT;
typedef struct {
RandomInfo * magick_restrict random_info;
int numValStack;
int usedValStack;
fxFltType * ValStack;
fxFltType * UserSymVals;
Quantum * thisPixel;
} fxRtT;
struct _FxInfo {
Image * image;
size_t ImgListLen;
ssize_t ImgNum;
MagickBooleanType NeedStats;
MagickBooleanType GotStats;
MagickBooleanType NeedHsl;
MagickBooleanType DebugOpt; /* Whether "-debug" option is in effect */
MagickBooleanType ContainsDebug; /* Whether expression contains "debug ()" function */
char * expression;
char * pex;
char ShortExp[MagickPathExtent]; /* for reporting */
int teDepth;
char token[MagickPathExtent];
size_t lenToken;
int numElements;
int usedElements;
ElementT * Elements; /* Elements is read-only at runtime. */
int numUserSymbols;
int usedUserSymbols;
UserSymbolT * UserSymbols;
int numOprStack;
int usedOprStack;
int maxUsedOprStack;
OperatorE * OperatorStack;
ChannelStatistics ** statistics;
int precision;
RunTypeE runType;
RandomInfo
**magick_restrict random_infos;
ImgT * Imgs;
Image ** Images;
ExceptionInfo * exception;
fxRtT * fxrts;
};
/* Forward declarations for recursion.
*/
static MagickBooleanType TranslateStatementList
(FxInfo * pfx, const char * strLimit, char * chLimit);
static MagickBooleanType TranslateExpression
(FxInfo * pfx, const char * strLimit, char * chLimit, MagickBooleanType * needPopAll);
static MagickBooleanType GetFunction (FxInfo * pfx, FunctionE fe);
static MagickBooleanType InitFx (FxInfo * pfx, const Image * img,
MagickBooleanType CalcAllStats, ExceptionInfo *exception)
{
ssize_t i=0;
const Image * next;
pfx->ImgListLen = GetImageListLength (img);
pfx->ImgNum = GetImageIndexInList (img);
pfx->image = (Image *)img;
pfx->NeedStats = MagickFalse;
pfx->GotStats = MagickFalse;
pfx->NeedHsl = MagickFalse;
pfx->DebugOpt = IsStringTrue (GetImageArtifact (img, "fx:debug"));
pfx->statistics = NULL;
pfx->Imgs = NULL;
pfx->Images = NULL;
pfx->exception = exception;
pfx->precision = GetMagickPrecision ();
pfx->random_infos = AcquireRandomInfoTLS ();
pfx->ContainsDebug = MagickFalse;
pfx->runType = (CalcAllStats) ? rtEntireImage : rtCornerOnly;
pfx->Imgs = (ImgT *)AcquireQuantumMemory (pfx->ImgListLen, sizeof (ImgT));
if (!pfx->Imgs) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"Imgs", "%lu",
(unsigned long) pfx->ImgListLen);
return MagickFalse;
}
next = GetFirstImageInList (img);
for ( ; next != (Image *) NULL; next=next->next)
{
ImgT * pimg = &pfx->Imgs[i];
pimg->View = AcquireVirtualCacheView (next, pfx->exception);
if (!pimg->View) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"View", "[%li]",
(long) i);
/* dealloc any done so far, and Imgs */
for ( ; i > 0; i--) {
pimg = &pfx->Imgs[i-1];
pimg->View = DestroyCacheView (pimg->View);
}
pfx->Imgs=(ImgT *) RelinquishMagickMemory (pfx->Imgs);
return MagickFalse;
}
i++;
}
pfx->Images = ImageListToArray (img, pfx->exception);
return MagickTrue;
}
static MagickBooleanType DeInitFx (FxInfo * pfx)
{
ssize_t i;
if (pfx->Images) pfx->Images = (Image**) RelinquishMagickMemory (pfx->Images);
if (pfx->Imgs) {
for (i = (ssize_t)GetImageListLength(pfx->image); i > 0; i--) {
ImgT * pimg = &pfx->Imgs[i-1];
pimg->View = DestroyCacheView (pimg->View);
}
pfx->Imgs=(ImgT *) RelinquishMagickMemory (pfx->Imgs);
}
pfx->random_infos = DestroyRandomInfoTLS (pfx->random_infos);
if (pfx->statistics) {
for (i = (ssize_t)GetImageListLength(pfx->image); i > 0; i--) {
pfx->statistics[i-1]=(ChannelStatistics *) RelinquishMagickMemory (pfx->statistics[i-1]);
}
pfx->statistics = (ChannelStatistics**) RelinquishMagickMemory(pfx->statistics);
}
return MagickTrue;
}
static ElementTypeE TypeOfOpr (int op)
{
if (op < oNull) return etOperator;
if (op == oNull) return etConstant;
if (op <= fNull) return etFunction;
if (op <= aNull) return etImgAttr;
if (op <= sNull) return etSymbol;
if (op <= rNull) return etControl;
return (ElementTypeE) 0;
}
static char * SetPtrShortExp (FxInfo * pfx, char * pExp, size_t len)
{
#define MaxLen 20
size_t slen;
char * p;
*pfx->ShortExp = '\0';
if (pExp && len) {
slen = CopyMagickString (pfx->ShortExp, pExp, len);
if (slen > MaxLen) {
(void) CopyMagickString (pfx->ShortExp+MaxLen, "...", 4);
}
p = strchr (pfx->ShortExp, '\n');
if (p) (void) CopyMagickString (p, "...", 4);
p = strchr (pfx->ShortExp, '\r');
if (p) (void) CopyMagickString (p, "...", 4);
}
return pfx->ShortExp;
}
static char * SetShortExp (FxInfo * pfx)
{
return SetPtrShortExp (pfx, pfx->pex, MaxTokenLen-1);
}
static int FindUserSymbol (FxInfo * pfx, char * name)
/* returns index into pfx->UserSymbols, and thus into pfxrt->UserSymVals,
or NULL_ADDRESS if not found.
*/
{
int i;
size_t lenName;
lenName = strlen (name);
for (i=0; i < pfx->usedUserSymbols; i++) {
UserSymbolT *pus = &pfx->UserSymbols[i];
if (lenName == pus->len && LocaleNCompare (name, pus->pex, lenName)==0) break;
}
if (i == pfx->usedUserSymbols) return NULL_ADDRESS;
return i;
}
static MagickBooleanType ExtendUserSymbols (FxInfo * pfx)
{
pfx->numUserSymbols = (int) ceil (pfx->numUserSymbols * (1 + TableExtend));
pfx->UserSymbols = (UserSymbolT*) ResizeMagickMemory (pfx->UserSymbols, pfx->numUserSymbols * sizeof(UserSymbolT));
if (!pfx->UserSymbols) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"UserSymbols", "%i",
pfx->numUserSymbols);
return MagickFalse;
}
return MagickTrue;
}
static int AddUserSymbol (FxInfo * pfx, char * pex, size_t len)
{
UserSymbolT *pus;
if (++pfx->usedUserSymbols >= pfx->numUserSymbols) {
if (!ExtendUserSymbols (pfx)) return -1;
}
pus = &pfx->UserSymbols[pfx->usedUserSymbols-1];
pus->pex = pex;
pus->len = len;
return pfx->usedUserSymbols-1;
}
static void DumpTables (FILE * fh)
{
int i;
for (i=0; i <= rNull; i++) {
const char * str = "";
if ( i < oNull) str = Operators[i].str;
if (i >= FirstFunc && i < fNull) str = Functions[i-FirstFunc].str;
if (i >= FirstImgAttr && i < aNull) str = ImgAttrs[i-FirstImgAttr].str;
if (i >= FirstSym && i < sNull) str = Symbols[i-FirstSym].str;
if (i >= FirstCont && i < rNull) str = Controls[i-FirstCont].str;
if (i==0 ) fprintf (stderr, "Operators:\n ");
else if (i==oNull) fprintf (stderr, "\nFunctions:\n ");
else if (i==fNull) fprintf (stderr, "\nImage attributes:\n ");
else if (i==aNull) fprintf (stderr, "\nSymbols:\n ");
else if (i==sNull) fprintf (stderr, "\nControls:\n ");
fprintf (fh, " %s", str);
}
fprintf (fh, "\n");
}
static char * NameOfUserSym (FxInfo * pfx, int ndx, char * buf)
{
UserSymbolT * pus;
assert (ndx >= 0 && ndx < pfx->usedUserSymbols);
pus = &pfx->UserSymbols[ndx];
(void) CopyMagickString (buf, pus->pex, pus->len+1);
return buf;
}
static void DumpUserSymbols (FxInfo * pfx, FILE * fh)
{
char UserSym[MagickPathExtent];
int i;
fprintf (fh, "UserSymbols (%i)\n", pfx->usedUserSymbols);
for (i=0; i < pfx->usedUserSymbols; i++) {
fprintf (fh, " %i: '%s'\n", i, NameOfUserSym (pfx, i, UserSym));
}
}
static MagickBooleanType BuildRPN (FxInfo * pfx)
{
pfx->numUserSymbols = InitNumUserSymbols;
pfx->usedUserSymbols = 0;
pfx->UserSymbols = (UserSymbolT*) AcquireMagickMemory (pfx->numUserSymbols * sizeof(UserSymbolT));
if (!pfx->UserSymbols) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"UserSymbols", "%i",
pfx->numUserSymbols);
return MagickFalse;
}
pfx->numElements = RpnInit;
pfx->usedElements = 0;
pfx->Elements = NULL;
pfx->Elements = (ElementT*) AcquireMagickMemory (pfx->numElements * sizeof(ElementT));
if (!pfx->Elements) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"Elements", "%i",
pfx->numElements);
return MagickFalse;
}
pfx->usedOprStack = 0;
pfx->maxUsedOprStack = 0;
pfx->numOprStack = InitNumOprStack;
pfx->OperatorStack = (OperatorE*) AcquireMagickMemory (pfx->numOprStack * sizeof(OperatorE));
if (!pfx->OperatorStack) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"OperatorStack", "%i",
pfx->numOprStack);
return MagickFalse;
}
return MagickTrue;
}
static MagickBooleanType AllocFxRt (FxInfo * pfx, fxRtT * pfxrt)
{
int nRnd;
int i;
pfxrt->random_info = AcquireRandomInfo ();
pfxrt->thisPixel = NULL;
nRnd = 20 + 10 * (int) GetPseudoRandomValue (pfxrt->random_info);
for (i=0; i < nRnd; i++) (void) GetPseudoRandomValue (pfxrt->random_info);;
pfxrt->usedValStack = 0;
pfxrt->numValStack = 2 * pfx->maxUsedOprStack;
if (pfxrt->numValStack < MinValStackSize) pfxrt->numValStack = MinValStackSize;
pfxrt->ValStack = (fxFltType*) AcquireMagickMemory (pfxrt->numValStack * sizeof(fxFltType));
if (!pfxrt->ValStack) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"ValStack", "%i",
pfxrt->numValStack);
return MagickFalse;
}
pfxrt->UserSymVals = NULL;
if (pfx->usedUserSymbols) {
pfxrt->UserSymVals = (fxFltType*) AcquireMagickMemory (pfx->usedUserSymbols * sizeof(fxFltType));
if (!pfxrt->UserSymVals) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"UserSymVals", "%i",
pfx->usedUserSymbols);
return MagickFalse;
}
for (i = 0; i < pfx->usedUserSymbols; i++) pfxrt->UserSymVals[i] = (fxFltType) 0;
}
return MagickTrue;
}
static MagickBooleanType ExtendRPN (FxInfo * pfx)
{
pfx->numElements = (int) ceil (pfx->numElements * (1 + TableExtend));
pfx->Elements = (ElementT*) ResizeMagickMemory (pfx->Elements, pfx->numElements * sizeof(ElementT));
if (!pfx->Elements) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"Elements", "%i",
pfx->numElements);
return MagickFalse;
}
return MagickTrue;
}
static MagickBooleanType inline OprInPlace (int op)
{
return (op >= oAddEq && op <= oSubSub ? MagickTrue : MagickFalse);
}
static const char * OprStr (int oprNum)
{
const char * str;
if (oprNum < 0) str = "bad OprStr";
else if (oprNum <= oNull) str = Operators[oprNum].str;
else if (oprNum <= fNull) str = Functions[oprNum-FirstFunc].str;
else if (oprNum <= aNull) str = ImgAttrs[oprNum-FirstImgAttr].str;
else if (oprNum <= sNull) str = Symbols[oprNum-FirstSym].str;
else if (oprNum <= rNull) str = Controls[oprNum-FirstCont].str;
else {
str = "bad OprStr";
}
return str;
}
static MagickBooleanType DumpRPN (FxInfo * pfx, FILE * fh)
{
int i;
fprintf (fh, "DumpRPN:");
fprintf (fh, " numElements=%i", pfx->numElements);
fprintf (fh, " usedElements=%i", pfx->usedElements);
fprintf (fh, " maxUsedOprStack=%i", pfx->maxUsedOprStack);
fprintf (fh, " ImgListLen=%g", (double) pfx->ImgListLen);
fprintf (fh, " NeedStats=%s", pfx->NeedStats ? "yes" : "no");
fprintf (fh, " GotStats=%s", pfx->GotStats ? "yes" : "no");
fprintf (fh, " NeedHsl=%s\n", pfx->NeedHsl ? "yes" : "no");
if (pfx->runType==rtEntireImage) fprintf (stderr, "EntireImage");
else if (pfx->runType==rtCornerOnly) fprintf (stderr, "CornerOnly");
fprintf (fh, "\n");
for (i=0; i < pfx->usedElements; i++) {
ElementT * pel = &pfx->Elements[i];
pel->nDest = 0;
}
for (i=0; i < pfx->usedElements; i++) {
ElementT * pel = &pfx->Elements[i];
if (pel->oprNum == rGoto || pel->oprNum == rIfZeroGoto || pel->oprNum == rIfNotZeroGoto) {
if (pel->EleNdx >= 0 && pel->EleNdx < pfx->numElements) {
ElementT * pelDest = &pfx->Elements[pel->EleNdx];
pelDest->nDest++;
}
}
}
for (i=0; i < pfx->usedElements; i++) {
char UserSym[MagickPathExtent];
ElementT * pel = &pfx->Elements[i];
const char * str = OprStr (pel->oprNum);
const char *sRelAbs = "";
if (pel->oprNum == fP || pel->oprNum == fUP || pel->oprNum == fVP || pel->oprNum == fSP)
sRelAbs = pel->IsRelative ? "[]" : "{}";
if (pel->type == etColourConstant)
fprintf (fh, " %i: %s vals=%.*Lg,%.*Lg,%.*Lg '%s%s' nArgs=%i ndx=%i %s",
i, sElementTypes[pel->type],
pfx->precision, pel->val, pfx->precision, pel->val1, pfx->precision, pel->val2,
str, sRelAbs, pel->nArgs, pel->EleNdx,
pel->DoPush ? "push" : "NO push");
else
fprintf (fh, " %i: %s val=%.*Lg '%s%s' nArgs=%i ndx=%i %s",
i, sElementTypes[pel->type], pfx->precision, pel->val, str, sRelAbs,
pel->nArgs, pel->EleNdx,
pel->DoPush ? "push" : "NO push");
if (pel->ImgAttrQual != aNull)
fprintf (fh, " ia=%s", OprStr(pel->ImgAttrQual));
if (pel->ChannelQual != NO_CHAN_QUAL) {
if (pel->ChannelQual == THIS_CHANNEL) fprintf (stderr, " ch=this");
else fprintf (stderr, " ch=%i", pel->ChannelQual);
}
if (pel->oprNum == rCopyTo) {
fprintf (fh, " CopyTo ==> %s", NameOfUserSym (pfx, pel->EleNdx, UserSym));
} else if (pel->oprNum == rCopyFrom) {
fprintf (fh, " CopyFrom <== %s", NameOfUserSym (pfx, pel->EleNdx, UserSym));
} else if (OprInPlace (pel->oprNum)) {
fprintf (fh, " <==> %s", NameOfUserSym (pfx, pel->EleNdx, UserSym));
}
if (pel->nDest > 0) fprintf (fh, " <==dest(%i)", pel->nDest);
fprintf (fh, "\n");
}
return MagickTrue;
}
static void DestroyRPN (FxInfo * pfx)
{
pfx->numOprStack = 0;
pfx->usedOprStack = 0;
if (pfx->OperatorStack) pfx->OperatorStack = (OperatorE*) RelinquishMagickMemory (pfx->OperatorStack);
pfx->numElements = 0;
pfx->usedElements = 0;
if (pfx->Elements) pfx->Elements = (ElementT*) RelinquishMagickMemory (pfx->Elements);
pfx->usedUserSymbols = 0;
if (pfx->UserSymbols) pfx->UserSymbols = (UserSymbolT*) RelinquishMagickMemory (pfx->UserSymbols);
}
static void DestroyFxRt (fxRtT * pfxrt)
{
pfxrt->usedValStack = 0;
if (pfxrt->ValStack) pfxrt->ValStack = (fxFltType*) RelinquishMagickMemory (pfxrt->ValStack);
if (pfxrt->UserSymVals) pfxrt->UserSymVals = (fxFltType*) RelinquishMagickMemory (pfxrt->UserSymVals);
pfxrt->random_info = DestroyRandomInfo (pfxrt->random_info);
}
static size_t GetToken (FxInfo * pfx)
/* Returns length of token that starts with an alpha,
or 0 if it isn't a token that starts with an alpha.
j0 and j1 have trailing digit.
Also colours like "gray47" have more trailing digits.
After intial alpha(s) also allow single "_", eg "standard_deviation".
Does not advance pfx->pex.
This splits "mean.r" etc.
*/
{
char * p = pfx->pex;
size_t len = 0;
*pfx->token = '\0';
pfx->lenToken = 0;
if (!isalpha((int)*p)) return 0;
/* Regard strings that start "icc-" or "device-",
followed by any number of alphas,
as a token.
*/
if (LocaleNCompare (p, "icc-", 4) == 0) {
len = 4;
p += 4;
while (isalpha ((int)*p)) { len++; p++; }
} else if (LocaleNCompare (p, "device-", 7) == 0) {
len = 7;
p += 7;
while (isalpha ((int)*p)) { len++; p++; }
} else {
while (isalpha ((int)*p)) { len++; p++; }
if (*p == '_') { len++; p++; }
while (isalpha ((int)*p)) { len++; p++; }
while (isdigit ((int)*p)) { len++; p++; }
}
if (len >= MaxTokenLen) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"GetToken: too long", "%g at '%s'",
(double) len, SetShortExp(pfx));
len = MaxTokenLen;
}
if (len) {
(void) CopyMagickString (pfx->token, pfx->pex, (len+1<MaxTokenLen)?len+1:MaxTokenLen);
}
pfx->lenToken = strlen (pfx->token);
return len;
}
static MagickBooleanType TokenMaybeUserSymbol (FxInfo * pfx)
{
char * p = pfx->token;
int i = 0;
while (*p) {
if (!isalpha ((int)*p++)) return MagickFalse;
i++;
}
if (i < 2) return MagickFalse;
return MagickTrue;
}
static MagickBooleanType AddElement (FxInfo * pfx, fxFltType val, int oprNum)
{
ElementT * pel;
assert (oprNum <= rNull);
if (++pfx->usedElements >= pfx->numElements) {
if (!ExtendRPN (pfx)) return MagickFalse;
}
pel = &pfx->Elements[pfx->usedElements-1];
pel->type = TypeOfOpr (oprNum);
pel->val = val;
pel->val1 = (fxFltType) 0;
pel->val2 = (fxFltType) 0;
pel->oprNum = oprNum;
pel->DoPush = MagickTrue;
pel->EleNdx = 0;
pel->ChannelQual = NO_CHAN_QUAL;
pel->ImgAttrQual = aNull;
pel->nDest = 0;
pel->pExpStart = NULL;
pel->lenExp = 0;
if (oprNum <= oNull) pel->nArgs = Operators[oprNum].nArgs;
else if (oprNum <= fNull) pel->nArgs = Functions[oprNum-FirstFunc].nArgs;
else if (oprNum <= aNull) pel->nArgs = 0;
else if (oprNum <= sNull) pel->nArgs = 0;
else pel->nArgs = Controls[oprNum-FirstCont].nArgs;
return MagickTrue;
}
static MagickBooleanType AddAddressingElement (FxInfo * pfx, int oprNum, int EleNdx)
{
ElementT * pel;
if (!AddElement (pfx, (fxFltType) 0, oprNum)) return MagickFalse;
pel = &pfx->Elements[pfx->usedElements-1];
pel->EleNdx = EleNdx;
if (oprNum == rGoto || oprNum == rIfZeroGoto || oprNum == rIfNotZeroGoto
|| oprNum == rZerStk)
{
pel->DoPush = MagickFalse;
}
/* Note: for() may or may not need pushing,
depending on whether the value is needed, eg "for(...)+2" or debug(for(...)).
*/
return MagickTrue;
}
static MagickBooleanType AddColourElement (FxInfo * pfx, fxFltType val0, fxFltType val1, fxFltType val2)
{
ElementT * pel;
if (!AddElement (pfx, val0, oNull)) return MagickFalse;
pel = &pfx->Elements[pfx->usedElements-1];
pel->val1 = val1;
pel->val2 = val2;
pel->type = etColourConstant;
return MagickTrue;
}
static void inline SkipSpaces (FxInfo * pfx)
{
while (isspace ((int)*pfx->pex)) pfx->pex++;
}
static char inline PeekChar (FxInfo * pfx)
{
SkipSpaces (pfx);
return *pfx->pex;
}
static MagickBooleanType inline PeekStr (FxInfo * pfx, const char * str)
{
SkipSpaces (pfx);
return (LocaleNCompare (pfx->pex, str, strlen(str))==0 ? MagickTrue : MagickFalse);
}
static MagickBooleanType ExpectChar (FxInfo * pfx, char c)
{
if (PeekChar (pfx) != c) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Expected char", "'%c' at '%s'", c, SetShortExp (pfx));
return MagickFalse;
}
pfx->pex++;
return MagickTrue;
}
static int MaybeXYWH (FxInfo * pfx, ImgAttrE * pop)
/* If ".x" or ".y" or ".width" or ".height" increments *pop and returns 1 to 4 .
Otherwise returns 0.
*/
{
int ret=0;
if (*pop != aPage && *pop != aPrintsize && *pop != aRes) return 0;
if (PeekChar (pfx) != '.') return 0;
if (!ExpectChar (pfx, '.')) return 0;
(void) GetToken (pfx);
if (LocaleCompare ("x", pfx->token)==0) ret=1;
else if (LocaleCompare ("y", pfx->token)==0) ret=2;
else if (LocaleCompare ("width", pfx->token)==0) ret=3;
else if (LocaleCompare ("height", pfx->token)==0) ret=4;
if (!ret)
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Invalid 'x' or 'y' or 'width' or 'height' token=", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
if (*pop == aPage) (*pop) = (ImgAttrE) (*pop + ret);
else {
if (ret > 2) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Invalid 'width' or 'height' token=", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
} else {
(*pop) = (ImgAttrE) (*pop + ret);
}
}
pfx->pex+=pfx->lenToken;
return ret;
}
static MagickBooleanType ExtendOperatorStack (FxInfo * pfx)
{
pfx->numOprStack = (int) ceil (pfx->numOprStack * (1 + TableExtend));
pfx->OperatorStack = (OperatorE*) ResizeMagickMemory (pfx->OperatorStack, pfx->numOprStack * sizeof(OperatorE));
if (!pfx->OperatorStack) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"OprStack", "%i",
pfx->numOprStack);
return MagickFalse;
}
return MagickTrue;
}
static MagickBooleanType PushOperatorStack (FxInfo * pfx, int op)
{
if (++pfx->usedOprStack >= pfx->numOprStack) {
if (!ExtendOperatorStack (pfx))
return MagickFalse;
}
pfx->OperatorStack[pfx->usedOprStack-1] = (OperatorE) op;
if (pfx->maxUsedOprStack < pfx->usedOprStack)
pfx->maxUsedOprStack = pfx->usedOprStack;
return MagickTrue;
}
static OperatorE GetLeadingOp (FxInfo * pfx)
{
OperatorE op = oNull;
if (*pfx->pex == '-') op = oUnaryMinus;
else if (*pfx->pex == '+') op = oUnaryPlus;
else if (*pfx->pex == '~') op = oBitNot;
else if (*pfx->pex == '!') op = oLogNot;
else if (*pfx->pex == '(') op = oOpenParen;
return op;
}
static MagickBooleanType inline OprIsUnaryPrefix (OperatorE op)
{
return (op == oUnaryMinus || op == oUnaryPlus || op == oBitNot || op == oLogNot ? MagickTrue : MagickFalse);
}
static MagickBooleanType TopOprIsUnaryPrefix (FxInfo * pfx)
{
if (!pfx->usedOprStack) return MagickFalse;
return OprIsUnaryPrefix (pfx->OperatorStack[pfx->usedOprStack-1]);
}
static MagickBooleanType PopOprOpenParen (FxInfo * pfx, OperatorE op)
{
if (!pfx->usedOprStack) return MagickFalse;
if (pfx->OperatorStack[pfx->usedOprStack-1] != op) return MagickFalse;
pfx->usedOprStack--;
return MagickTrue;
}
static int GetCoordQualifier (FxInfo * pfx, int op)
/* Returns -1 if invalid CoordQualifier, +1 if valid and appropriate.
*/
{
if (op != fU && op != fV && op != fS) return -1;
(void) GetToken (pfx);
if (pfx->lenToken != 1) {
return -1;
}
if (*pfx->token != 'p' && *pfx->token != 'P') return -1;
if (!GetFunction (pfx, fP)) return -1;
return 1;
}
static PixelChannel GetChannelQualifier (FxInfo * pfx, int op)
{
if (op == fU || op == fV || op == fP ||
op == fUP || op == fVP ||
op == fS || (op >= FirstImgAttr && op <= aNull)
)
{
const ChannelT * pch = &Channels[0];
(void) GetToken (pfx);
while (*pch->str) {
if (LocaleCompare (pch->str, pfx->token)==0) {
if (op >= FirstImgAttr && op <= (OperatorE)aNull &&
(pch->pixChan == HUE_CHANNEL ||
pch->pixChan == SAT_CHANNEL ||
pch->pixChan == LIGHT_CHANNEL)
)
{
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Can't have image attribute with HLS qualifier at", "'%s'",
SetShortExp(pfx));
return NO_CHAN_QUAL;
}
pfx->pex += pfx->lenToken;
return pch->pixChan;
}
pch++;
}
}
return NO_CHAN_QUAL;
}
static ImgAttrE GetImgAttrToken (FxInfo * pfx)
{
ImgAttrE ia = aNull;
const char * iaStr;
for (ia = FirstImgAttr; ia < aNull; ia=(ImgAttrE) (ia+1)) {
iaStr = ImgAttrs[ia-FirstImgAttr].str;
if (LocaleCompare (iaStr, pfx->token)==0) {
pfx->pex += strlen(pfx->token);
if (ImgAttrs[ia-FirstImgAttr].NeedStats == 1) pfx->NeedStats = MagickTrue;
MaybeXYWH (pfx, &ia);
break;
}
}
if (ia == aPage || ia == aPrintsize || ia == aRes) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Attribute", "'%s' needs qualifier at '%s'",
iaStr, SetShortExp(pfx));
}
return ia;
}
static ImgAttrE GetImgAttrQualifier (FxInfo * pfx, int op)
{
ImgAttrE ia = aNull;
if (op == (OperatorE)fU || op == (OperatorE)fV || op == (OperatorE)fP || op == (OperatorE)fS) {
(void) GetToken (pfx);
if (pfx->lenToken == 0) {
return aNull;
}
ia = GetImgAttrToken (pfx);
}
return ia;
}
static MagickBooleanType IsQualifier (FxInfo * pfx)
{
if (PeekChar (pfx) == '.') {
pfx->pex++;
return MagickTrue;
}
return MagickFalse;
}
static ssize_t GetProperty (FxInfo * pfx, fxFltType *val)
/* returns number of character to swallow.
"-1" means invalid input
"0" means no relevant input (don't swallow, but not an error)
*/
{
if (PeekStr (pfx, "%[")) {
int level = 0;
size_t len;
char sProperty [MagickPathExtent];
char * p = pfx->pex + 2;
while (*p) {
if (*p == '[') level++;
else if (*p == ']') {
if (level == 0) break;
level--;
}
p++;
}
if (!*p || level != 0) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"After '%[' expected ']' at", "'%s'",
SetShortExp(pfx));
return -1;
}
len = (size_t) (p - pfx->pex + 1);
if (len > MaxTokenLen) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Too much text between '%[' and ']' at", "'%s'",
SetShortExp(pfx));
return -1;
}
(void) CopyMagickString (sProperty, pfx->pex, len+1);
sProperty[len] = '\0';
{
char * tailptr;
char * text;
text = InterpretImageProperties (pfx->image->image_info, pfx->image,
sProperty, pfx->exception);
if (!text || !*text) {
text = DestroyString(text);
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Unknown property", "'%s' at '%s'",
sProperty, SetShortExp(pfx));
return -1;
}
*val = strtold (text, &tailptr);
if (text == tailptr) {
text = DestroyString(text);
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Property", "'%s' text '%s' is not a number at '%s'",
sProperty, text, SetShortExp(pfx));
return -1;
}
text = DestroyString(text);
}
return ((ssize_t) len);
}
return 0;
}
static ssize_t inline GetConstantColour (FxInfo * pfx, fxFltType *v0, fxFltType *v1, fxFltType *v2)
/* Finds named colour such as "blue" and colorspace function such as "lab(10,20,30)".
Returns number of characters to swallow.
Return -1 means apparantly a constant colour, but with an error.
Return 0 means not a constant colour, but not an error.
*/
{
PixelInfo
colour;
ExceptionInfo
*dummy_exception = AcquireExceptionInfo ();
char
*p;
MagickBooleanType
IsGray,
IsIcc,
IsDev;
char ColSp[MagickPathExtent];
(void) CopyMagickString (ColSp, pfx->token, MaxTokenLen);
p = ColSp + pfx->lenToken - 1;
if (*p == 'a' || *p == 'A') *p = '\0';
(void) GetPixelInfo (pfx->image, &colour);
/* "gray" is both a colorspace and a named colour. */
IsGray = (LocaleCompare (ColSp, "gray") == 0) ? MagickTrue : MagickFalse;
IsIcc = (LocaleCompare (ColSp, "icc-color") == 0) ? MagickTrue : MagickFalse;
IsDev = (LocaleNCompare (ColSp, "device-", 7) == 0) ? MagickTrue : MagickFalse;
/* QueryColorCompliance will raise a warning if it isn't a colour, so we discard any exceptions.
*/
if (!QueryColorCompliance (pfx->token, AllCompliance, &colour, dummy_exception) || IsGray) {
ssize_t type = ParseCommandOption (MagickColorspaceOptions, MagickFalse, ColSp);
if (type >= 0 || IsIcc || IsDev) {
char * q = pfx->pex + pfx->lenToken;
while (isspace((int) ((unsigned char) *q))) q++;
if (*q == '(') {
size_t lenfun;
char sFunc[MagickPathExtent];
while (*q && *q != ')') q++;
if (!*q) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"constant color missing ')'", "at '%s'",
SetShortExp(pfx));
dummy_exception = DestroyExceptionInfo (dummy_exception);
return -1;
}
lenfun = (size_t) (q - pfx->pex + 1);
if (lenfun > MaxTokenLen) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"lenfun too long", "'%lu' at '%s'",
(unsigned long) lenfun, SetShortExp(pfx));
dummy_exception = DestroyExceptionInfo (dummy_exception);
return -1;
}
(void) CopyMagickString (sFunc, pfx->pex, lenfun+1);
if (QueryColorCompliance (sFunc, AllCompliance, &colour, dummy_exception)) {
*v0 = colour.red / QuantumRange;
*v1 = colour.green / QuantumRange;
*v2 = colour.blue / QuantumRange;
dummy_exception = DestroyExceptionInfo (dummy_exception);
return (ssize_t)lenfun;
}
} else {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"colorspace but not a valid color with '(...)' at", "'%s'",
SetShortExp(pfx));
dummy_exception = DestroyExceptionInfo (dummy_exception);
return -1;
}
}
if (!IsGray) {
dummy_exception = DestroyExceptionInfo (dummy_exception);
return 0;
}
}
*v0 = colour.red / QuantumRange;
*v1 = colour.green / QuantumRange;
*v2 = colour.blue / QuantumRange;
dummy_exception = DestroyExceptionInfo (dummy_exception);
return (ssize_t)strlen (pfx->token);
}
static ssize_t inline GetHexColour (FxInfo * pfx, fxFltType *v0, fxFltType *v1, fxFltType *v2)
/* Returns number of characters to swallow.
Negative return means it starts with '#', but invalid hex number.
*/
{
char * p;
size_t len;
PixelInfo colour;
if (*pfx->pex != '#') return 0;
/* find end of hex digits. */
p = pfx->pex + 1;
while (isxdigit ((int)*p)) p++;
if (isalpha ((int)*p)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Bad hex number at", "'%s'",
SetShortExp(pfx));
return -1;
}
len = (size_t) (p - pfx->pex);
if (len < 1) return 0;
if (len >= MaxTokenLen) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Hex colour too long at", "'%s'",
SetShortExp(pfx));
return -1;
}
(void) CopyMagickString (pfx->token, pfx->pex, len+1);
(void) GetPixelInfo (pfx->image, &colour);
if (!QueryColorCompliance (pfx->token, AllCompliance, &colour, pfx->exception)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"QueryColorCompliance rejected", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
return -1;
}
*v0 = colour.red / QuantumRange;
*v1 = colour.green / QuantumRange;
*v2 = colour.blue / QuantumRange;
return (ssize_t) len;
}
static MagickBooleanType GetFunction (FxInfo * pfx, FunctionE fe)
{
/* A function, so get open-parens, n args, close-parens
*/
const char * funStr = Functions[fe-FirstFunc].str;
int nArgs = Functions[fe-FirstFunc].nArgs;
char chLimit = ')';
char expChLimit = ')';
const char *strLimit = ",)";
OperatorE pushOp = oOpenParen;
char * pExpStart;
int lenExp = 0;
int FndArgs = 0;
int ndx0 = NULL_ADDRESS, ndx1 = NULL_ADDRESS, ndx2 = NULL_ADDRESS, ndx3 = NULL_ADDRESS;
MagickBooleanType coordQual = MagickFalse;
PixelChannel chQual = NO_CHAN_QUAL;
ImgAttrE iaQual = aNull;
pfx->pex += pfx->lenToken;
if (fe == fP) {
char p = PeekChar (pfx);
if (p=='{') {
(void) ExpectChar (pfx, '{');
pushOp = oOpenBrace;
strLimit = ",}";
chLimit = '}';
expChLimit = '}';
} else if (p=='[') {
(void) ExpectChar (pfx, '[');
pushOp = oOpenBracket;
strLimit = ",]";
chLimit = ']';
expChLimit = ']';
} else {
nArgs = 0;
chLimit = ']';
expChLimit = ']';
}
} else if (fe == fU) {
char p = PeekChar (pfx);
if (p=='[') {
(void) ExpectChar (pfx, '[');
pushOp = oOpenBracket;
strLimit = ",]";
chLimit = ']';
expChLimit = ']';
} else {
nArgs = 0;
chLimit = ']';
expChLimit = ']';
}
} else if (fe == fV || fe == fS) {
nArgs = 0;
pushOp = oOpenBracket;
chLimit = ']';
expChLimit = ']';
} else {
if (!ExpectChar (pfx, '(')) return MagickFalse;
}
if (!PushOperatorStack (pfx, pushOp)) return MagickFalse;
pExpStart = pfx->pex;
ndx0 = pfx->usedElements;
if (fe==fDo) {
(void) AddAddressingElement (pfx, rGoto, NULL_ADDRESS); /* address will be ndx1+1 */
}
while (nArgs > 0) {
int FndOne = 0;
if (TranslateStatementList (pfx, strLimit, &chLimit)) {
FndOne = 1;
} else {
/* Maybe don't break because other expressions may be not empty. */
if (!chLimit) break;
if (fe == fP || fe == fS|| fe == fIf) {
(void) AddElement (pfx, (fxFltType) 0, oNull);
FndOne = 1;
}
}
if (strchr (strLimit, chLimit)==NULL) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s' expected one of '%s' after expression but found '%c' at '%s'",
funStr, strLimit, chLimit ? chLimit : ' ', SetShortExp(pfx));
return MagickFalse;
}
if (FndOne) {
FndArgs++;
nArgs--;
}
switch (FndArgs) {
case 1:
if (ndx1 != NULL_ADDRESS) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s' required argument is missing at '%s'",
funStr, SetShortExp(pfx));
return MagickFalse;
}
ndx1 = pfx->usedElements;
if (fe==fWhile) {
(void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx2+1 */
} else if (fe==fDo) {
(void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx2+1 */
} else if (fe==fFor) {
pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse;
} else if (fe==fIf) {
(void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx2 + 1 */
pfx->Elements[pfx->usedElements-1].DoPush = MagickTrue; /* we may need return from if() */
}
break;
case 2:
if (ndx2 != NULL_ADDRESS) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s' required argument is missing at '%s'",
funStr, SetShortExp(pfx));
return MagickFalse;
}
ndx2 = pfx->usedElements;
if (fe==fWhile) {
pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse;
(void) AddAddressingElement (pfx, rGoto, ndx0);
} else if (fe==fDo) {
pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse;
(void) AddAddressingElement (pfx, rGoto, ndx0 + 1);
} else if (fe==fFor) {
(void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx3 */
pfx->Elements[pfx->usedElements-1].DoPush = MagickTrue; /* we may need return from for() */
(void) AddAddressingElement (pfx, rZerStk, NULL_ADDRESS);
} else if (fe==fIf) {
(void) AddAddressingElement (pfx, rGoto, NULL_ADDRESS); /* address will be ndx3 */
}
break;
case 3:
if (ndx3 != NULL_ADDRESS) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s' required argument is missing at '%s'",
funStr, SetShortExp(pfx));
return MagickFalse;
}
if (fe==fFor) {
pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse;
(void) AddAddressingElement (pfx, rGoto, ndx1);
}
ndx3 = pfx->usedElements;
break;
default:
break;
}
if (chLimit == expChLimit) {
lenExp = pfx->pex - pExpStart - 1;
break;
}
} /* end while args of a function */
if (chLimit && chLimit != expChLimit && chLimit != ',' ) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s' expected '%c', found '%c' at '%s'",
funStr, expChLimit, chLimit ? chLimit : ' ', SetShortExp(pfx));
return MagickFalse;
}
if (fe == fP || fe == fS || fe == fU || fe == fChannel) {
while (FndArgs < Functions[fe-FirstFunc].nArgs) {
(void) AddElement (pfx, (fxFltType) 0, oNull);
FndArgs++;
}
}
if (FndArgs > Functions[fe-FirstFunc].nArgs)
{
if (fe==fChannel) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s' expected up to %i arguments, found '%i' at '%s'",
funStr, Functions[fe-FirstFunc].nArgs, FndArgs, SetShortExp(pfx));
} else {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s' expected %i arguments, found '%i' at '%s'",
funStr, Functions[fe-FirstFunc].nArgs, FndArgs, SetShortExp(pfx));
}
return MagickFalse;
}
if (FndArgs < Functions[fe-FirstFunc].nArgs) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s' expected %i arguments, found too few (%i) at '%s'",
funStr, Functions[fe-FirstFunc].nArgs, FndArgs, SetShortExp(pfx));
return MagickFalse;
}
if (fe != fS && fe != fV && FndArgs == 0 && Functions[fe-FirstFunc].nArgs == 0) {
/* This is for "rand()" and similar. */
chLimit = expChLimit;
if (!ExpectChar (pfx, ')')) return MagickFalse;
}
if (chLimit != expChLimit) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s', arguments don't end with '%c' at '%s'",
funStr, expChLimit, SetShortExp(pfx));
return MagickFalse;
}
if (!PopOprOpenParen (pfx, pushOp)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Bug: For function", "'%s' tos not '%s' at '%s'",
funStr, Operators[pushOp].str, SetShortExp(pfx));
return MagickFalse;
}
if (IsQualifier (pfx)) {
if (fe == fU || fe == fV || fe == fS) {
coordQual = (GetCoordQualifier (pfx, fe) == 1) ? MagickTrue : MagickFalse;
if (coordQual) {
/* Remove last element, which should be fP */
ElementT * pel = &pfx->Elements[pfx->usedElements-1];
if (pel->oprNum != fP) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Bug: For function", "'%s' last element not 'p' at '%s'",
funStr, SetShortExp(pfx));
return MagickFalse;
}
chQual = pel->ChannelQual;
expChLimit = (pel->IsRelative) ? ']' : '}';
pfx->usedElements--;
if (fe == fU) fe = fUP;
else if (fe == fV) fe = fVP;
else if (fe == fS) fe = fSP;
funStr = Functions[fe-FirstFunc].str;
}
}
if ( chQual == NO_CHAN_QUAL &&
(fe == fP || fe == fS || fe == fSP || fe == fU || fe == fUP || fe == fV || fe == fVP)
)
{
chQual = GetChannelQualifier (pfx, fe);
}
if (chQual == NO_CHAN_QUAL && (fe == fU || fe == fV || fe == fS)) {
/* Note: we don't allow "p.mean" etc. */
iaQual = GetImgAttrQualifier (pfx, fe);
}
if (IsQualifier (pfx) && chQual == NO_CHAN_QUAL && iaQual != aNull) {
chQual = GetChannelQualifier (pfx, fe);
}
if (coordQual && iaQual != aNull) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s', can't have qualifiers 'p' and image attribute '%s' at '%s'",
funStr, pfx->token, SetShortExp(pfx));
return MagickFalse;
}
if (!coordQual && chQual == NO_CHAN_QUAL && iaQual == aNull) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s', bad qualifier '%s' at '%s'",
funStr, pfx->token, SetShortExp(pfx));
return MagickFalse;
}
if (!coordQual && chQual == CompositePixelChannel && iaQual == aNull) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s', bad composite qualifier '%s' at '%s'",
funStr, pfx->token, SetShortExp(pfx));
return MagickFalse;
}
if (chQual == HUE_CHANNEL || chQual == SAT_CHANNEL || chQual == LIGHT_CHANNEL) {
pfx->NeedHsl = MagickTrue;
if (iaQual >= FirstImgAttr && iaQual < aNull) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Can't have image attribute with HLS qualifier at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
}
}
if (fe==fWhile) {
pfx->Elements[ndx1].EleNdx = ndx2+1;
} else if (fe==fDo) {
pfx->Elements[ndx0].EleNdx = ndx1+1;
pfx->Elements[ndx1].EleNdx = ndx2+1;
} else if (fe==fFor) {
pfx->Elements[ndx2].EleNdx = ndx3;
} else if (fe==fIf) {
pfx->Elements[ndx1].EleNdx = ndx2 + 1;
pfx->Elements[ndx2].EleNdx = ndx3;
} else {
if (fe == fU && iaQual == aNull) {
ElementT * pel = &pfx->Elements[pfx->usedElements-1];
if (pel->type == etConstant && pel->val == 0.0) {
pfx->usedElements--;
fe = fU0;
}
}
(void) AddElement (pfx, (fxFltType) 0, fe);
if (fe == fP || fe == fU || fe == fU0 || fe == fUP ||
fe == fV || fe == fVP || fe == fS || fe == fSP)
{
ElementT * pel = &pfx->Elements[pfx->usedElements-1];
pel->IsRelative = (expChLimit == ']' ? MagickTrue : MagickFalse);
if (chQual >= 0) pel->ChannelQual = chQual;
if (iaQual != aNull && (fe == fU || fe == fV || fe == fS)) {
/* Note: we don't allow "p[2,3].mean" or "p.mean" etc. */
pel->ImgAttrQual = iaQual;
}
}
}
if (pExpStart && lenExp) {
ElementT * pel = &pfx->Elements[pfx->usedElements-1];
pel->pExpStart = pExpStart;
pel->lenExp = lenExp;
}
if (fe == fDebug)
pfx->ContainsDebug = MagickTrue;
return MagickTrue;
}
static MagickBooleanType IsStealth (int op)
{
return (op == fU0 || op == fUP || op == fSP || op == fVP ||
(op >= FirstCont && op <= rNull) ? MagickTrue : MagickFalse
);
}
static MagickBooleanType GetOperand (
FxInfo * pfx, MagickBooleanType * UserSymbol, MagickBooleanType * NewUserSymbol, int * UserSymNdx,
MagickBooleanType * needPopAll)
{
*NewUserSymbol = *UserSymbol = MagickFalse;
*UserSymNdx = NULL_ADDRESS;
SkipSpaces (pfx);
if (!*pfx->pex) return MagickFalse;
(void) GetToken (pfx);
if (pfx->lenToken==0) {
/* Try '(' or unary prefix
*/
OperatorE op = GetLeadingOp (pfx);
if (op==oOpenParen) {
char chLimit = '\0';
if (!PushOperatorStack (pfx, op)) return MagickFalse;
pfx->pex++;
if (!TranslateExpression (pfx, ")", &chLimit, needPopAll)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Empty expression in parentheses at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (chLimit != ')') {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"'(' but no ')' at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
/* Top of opr stack should be '('. */
if (!PopOprOpenParen (pfx, oOpenParen)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Bug: tos not '(' at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
return MagickTrue;
} else if (OprIsUnaryPrefix (op)) {
if (!PushOperatorStack (pfx, op)) return MagickFalse;
pfx->pex++;
SkipSpaces (pfx);
if (!*pfx->pex) return MagickFalse;
if (!GetOperand (pfx, UserSymbol, NewUserSymbol, UserSymNdx, needPopAll)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"After unary, bad operand at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (*NewUserSymbol) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"After unary, NewUserSymbol at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (*UserSymbol) {
(void) AddAddressingElement (pfx, rCopyFrom, *UserSymNdx);
*UserSymNdx = NULL_ADDRESS;
*UserSymbol = MagickFalse;
*NewUserSymbol = MagickFalse;
}
(void) GetToken (pfx);
return MagickTrue;
} else if (*pfx->pex == '#') {
fxFltType v0=0, v1=0, v2=0;
ssize_t lenToken = GetHexColour (pfx, &v0, &v1, &v2);
if (lenToken < 0) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Bad hex number at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
} else if (lenToken > 0) {
(void) AddColourElement (pfx, v0, v1, v2);
pfx->pex+=lenToken;
}
return MagickTrue;
}
/* Try a constant number.
*/
{
char * tailptr;
ssize_t lenOptArt;
fxFltType val = strtold (pfx->pex, &tailptr);
if (pfx->pex != tailptr) {
pfx->pex = tailptr;
if (*tailptr) {
/* Could have "prefix" K, Ki, M etc.
See https://en.wikipedia.org/wiki/Metric_prefix
and https://en.wikipedia.org/wiki/Binary_prefix
*/
double Pow = 0.0;
const char Prefices[] = "yzafpnum.kMGTPEZY";
const char * pSi = strchr (Prefices, *tailptr);
if (pSi && *pSi != '.') Pow = (pSi - Prefices) * 3 - 24;
else if (*tailptr == 'c') Pow = -2;
else if (*tailptr == 'h') Pow = 2;
else if (*tailptr == 'k') Pow = 3;
if (Pow != 0.0) {
if (*(++pfx->pex) == 'i') {
val *= pow (2.0, Pow/0.3);
pfx->pex++;
} else {
val *= pow (10.0, Pow);
}
}
}
(void) AddElement (pfx, val, oNull);
return MagickTrue;
}
val = (fxFltType) 0;
lenOptArt = GetProperty (pfx, &val);
if (lenOptArt < 0) return MagickFalse;
if (lenOptArt > 0) {
(void) AddElement (pfx, val, oNull);
pfx->pex += lenOptArt;
return MagickTrue;
}
}
} /* end of lenToken==0 */
if (pfx->lenToken > 0) {
/* Try a constant
*/
{
ConstantE ce;
for (ce = (ConstantE)0; ce < cNull; ce=(ConstantE) (ce+1)) {
const char * ceStr = Constants[ce].str;
if (LocaleCompare (ceStr, pfx->token)==0) {
break;
}
}
if (ce != cNull) {
(void) AddElement (pfx, Constants[ce].val, oNull);
pfx->pex += pfx->lenToken;
return MagickTrue;
}
}
/* Try a function
*/
{
FunctionE fe;
for (fe = FirstFunc; fe < fNull; fe=(FunctionE) (fe+1)) {
const char * feStr = Functions[fe-FirstFunc].str;
if (LocaleCompare (feStr, pfx->token)==0) {
break;
}
}
if (fe == fV && pfx->ImgListLen < 2) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Symbol 'v' but fewer than two images at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (IsStealth (fe)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Function", "'%s' not permitted at '%s'",
pfx->token, SetShortExp(pfx));
}
if (fe == fDo || fe == fFor || fe == fIf || fe == fWhile) {
*needPopAll = MagickTrue;
}
if (fe != fNull) return (GetFunction (pfx, fe));
}
/* Try image attribute
*/
{
ImgAttrE ia = GetImgAttrToken (pfx);
if (ia != aNull) {
fxFltType val = 0;
(void) AddElement (pfx, val, ia);
if (ImgAttrs[ia-FirstImgAttr].NeedStats==1) {
if (IsQualifier (pfx)) {
PixelChannel chQual = GetChannelQualifier (pfx, ia);
ElementT * pel;
if (chQual == NO_CHAN_QUAL) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Bad channel qualifier at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
/* Adjust the element */
pel = &pfx->Elements[pfx->usedElements-1];
pel->ChannelQual = chQual;
}
}
return MagickTrue;
}
}
/* Try symbol
*/
{
SymbolE se;
for (se = FirstSym; se < sNull; se=(SymbolE) (se+1)) {
const char * seStr = Symbols[se-FirstSym].str;
if (LocaleCompare (seStr, pfx->token)==0) {
break;
}
}
if (se != sNull) {
fxFltType val = 0;
(void) AddElement (pfx, val, se);
pfx->pex += pfx->lenToken;
if (se==sHue || se==sSaturation || se==sLightness) pfx->NeedHsl = MagickTrue;
return MagickTrue;
}
}
/* Try constant colour.
*/
{
fxFltType v0, v1, v2;
ssize_t ColLen = GetConstantColour (pfx, &v0, &v1, &v2);
if (ColLen < 0) return MagickFalse;
if (ColLen > 0) {
(void) AddColourElement (pfx, v0, v1, v2);
pfx->pex+=ColLen;
return MagickTrue;
}
}
/* Try image artifact.
*/
{
const char *artifact;
artifact = GetImageArtifact (pfx->image, pfx->token);
if (artifact != (const char *) NULL) {
char * tailptr;
fxFltType val = strtold (artifact, &tailptr);
if (pfx->token == tailptr) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Artifact", "'%s' has value '%s', not a number, at '%s'",
pfx->token, artifact, SetShortExp(pfx));
return MagickFalse;
}
(void) AddElement (pfx, val, oNull);
pfx->pex+=pfx->lenToken;
return MagickTrue;
}
}
/* Try user symbols. If it is, don't AddElement yet.
*/
if (TokenMaybeUserSymbol (pfx)) {
*UserSymbol = MagickTrue;
*UserSymNdx = FindUserSymbol (pfx, pfx->token);
if (*UserSymNdx == NULL_ADDRESS) {
*UserSymNdx = AddUserSymbol (pfx, pfx->pex, pfx->lenToken);
*NewUserSymbol = MagickTrue;
} else {
}
pfx->pex += pfx->lenToken;
return MagickTrue;
}
}
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Expected operand at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
static MagickBooleanType inline IsRealOperator (OperatorE op)
{
return (op < oOpenParen || op > oCloseBrace) ? MagickTrue : MagickFalse;
}
static MagickBooleanType inline ProcessTernaryOpr (FxInfo * pfx, TernaryT * ptern)
/* Ternary operator "... ? ... : ..."
returns false iff we have exception
*/
{
if (pfx->usedOprStack == 0)
return MagickFalse;
if (pfx->OperatorStack[pfx->usedOprStack-1] == oQuery) {
if (ptern->addrQuery != NULL_ADDRESS) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Already have '?' in sub-expression at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (ptern->addrColon != NULL_ADDRESS) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Already have ':' in sub-expression at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
pfx->usedOprStack--;
ptern->addrQuery = pfx->usedElements;
(void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS);
/* address will be one after the Colon address. */
}
else if (pfx->OperatorStack[pfx->usedOprStack-1] == oColon) {
if (ptern->addrQuery == NULL_ADDRESS) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Need '?' in sub-expression at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (ptern->addrColon != NULL_ADDRESS) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Already have ':' in sub-expression at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
pfx->usedOprStack--;
ptern->addrColon = pfx->usedElements;
pfx->Elements[pfx->usedElements-1].DoPush = MagickTrue;
(void) AddAddressingElement (pfx, rGoto, NULL_ADDRESS);
/* address will be after the subexpression */
}
return MagickTrue;
}
static MagickBooleanType GetOperator (
FxInfo * pfx,
MagickBooleanType * Assign, MagickBooleanType * Update, MagickBooleanType * IncrDecr)
{
OperatorE op;
size_t len = 0;
MagickBooleanType DoneIt = MagickFalse;
SkipSpaces (pfx);
for (op = (OperatorE)0; op != oNull; op=(OperatorE) (op+1)) {
const char * opStr = Operators[op].str;
len = strlen(opStr);
if (LocaleNCompare (opStr, pfx->pex, len)==0) {
break;
}
}
if (!IsRealOperator (op)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Not a real operator at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (op==oNull) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Expected operator at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
*Assign = (op==oAssign) ? MagickTrue : MagickFalse;
*Update = OprInPlace (op);
*IncrDecr = (op == oPlusPlus || op == oSubSub) ? MagickTrue : MagickFalse;
/* while top of OperatorStack is not empty and is not open-parens or assign,
and top of OperatorStack is higher precedence than new op,
then move top of OperatorStack to Element list.
*/
while (pfx->usedOprStack > 0) {
OperatorE top = pfx->OperatorStack[pfx->usedOprStack-1];
int precTop, precNew;
if (top == oOpenParen || top == oAssign || OprInPlace (top)) break;
precTop = Operators[top].precedence;
precNew = Operators[op].precedence;
/* Assume left associativity.
If right assoc, this would be "<=".
*/
if (precTop < precNew) break;
(void) AddElement (pfx, (fxFltType) 0, top);
pfx->usedOprStack--;
}
/* If new op is close paren, and stack top is open paren,
remove stack top.
*/
if (op==oCloseParen) {
if (pfx->usedOprStack == 0) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Found ')' but nothing on stack at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (pfx->OperatorStack[pfx->usedOprStack-1] != oOpenParen) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Found ')' but no '(' on stack at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
pfx->usedOprStack--;
DoneIt = MagickTrue;
}
if (!DoneIt) {
if (!PushOperatorStack (pfx, op)) return MagickFalse;
}
pfx->pex += len;
return MagickTrue;
}
static MagickBooleanType ResolveTernaryAddresses (FxInfo * pfx, TernaryT * ptern)
{
if (ptern->addrQuery == NULL_ADDRESS && ptern->addrColon == NULL_ADDRESS)
return MagickTrue;
if (ptern->addrQuery != NULL_ADDRESS && ptern->addrColon != NULL_ADDRESS) {
pfx->Elements[ptern->addrQuery].EleNdx = ptern->addrColon + 1;
pfx->Elements[ptern->addrColon].EleNdx = pfx->usedElements;
ptern->addrQuery = NULL_ADDRESS;
ptern->addrColon = NULL_ADDRESS;
} else if (ptern->addrQuery != NULL_ADDRESS) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"'?' with no corresponding ':'", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
return MagickFalse;
} else if (ptern->addrColon != NULL_ADDRESS) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"':' with no corresponding '?'", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
return MagickFalse;
}
return MagickTrue;
}
static MagickBooleanType TranslateExpression (
FxInfo * pfx, const char * strLimit, char * chLimit, MagickBooleanType * needPopAll)
{
/* There should be only one New per expression (oAssign), but can be many Old.
*/
MagickBooleanType UserSymbol, NewUserSymbol;
int UserSymNdx0, UserSymNdx1;
MagickBooleanType
Assign = MagickFalse,
Update = MagickFalse,
IncrDecr = MagickFalse;
int StartEleNdx;
TernaryT ternary;
ternary.addrQuery = NULL_ADDRESS;
ternary.addrColon = NULL_ADDRESS;
pfx->teDepth++;
*chLimit = '\0';
StartEleNdx = pfx->usedElements-1;
if (StartEleNdx < 0) StartEleNdx = 0;
SkipSpaces (pfx);
if (!*pfx->pex) {
pfx->teDepth--;
return MagickFalse;
}
if (strchr(strLimit,*pfx->pex)!=NULL) {
*chLimit = *pfx->pex;
pfx->pex++;
pfx->teDepth--;
return MagickFalse;
}
if (!GetOperand (pfx, &UserSymbol, &NewUserSymbol, &UserSymNdx0, needPopAll)) return MagickFalse;
SkipSpaces (pfx);
/* Loop through Operator, Operand, Operator, Operand, ...
*/
while (*pfx->pex && (!*strLimit || (strchr(strLimit,*pfx->pex)==NULL))) {
if (!GetOperator (pfx, &Assign, &Update, &IncrDecr)) return MagickFalse;
SkipSpaces (pfx);
if (NewUserSymbol && !Assign) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Expected assignment after new UserSymbol", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
return MagickFalse;
}
if (!UserSymbol && Assign) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Attempted assignment to non-UserSymbol", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
return MagickFalse;
}
if (!UserSymbol && Update) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Attempted update to non-UserSymbol", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
return MagickFalse;
}
if (UserSymbol && (Assign || Update) && !IncrDecr) {
if (!TranslateExpression (pfx, strLimit, chLimit, needPopAll)) return MagickFalse;
if (!*pfx->pex) break;
if (!*strLimit) break;
if (strchr(strLimit,*chLimit)!=NULL) break;
}
if (UserSymbol && !Assign && !Update && UserSymNdx0 != NULL_ADDRESS) {
ElementT * pel;
(void) AddAddressingElement (pfx, rCopyFrom, UserSymNdx0);
UserSymNdx0 = NULL_ADDRESS;
pel = &pfx->Elements[pfx->usedElements-1];
pel->DoPush = MagickTrue;
}
if (UserSymbol) {
while (TopOprIsUnaryPrefix (pfx)) {
OperatorE op = pfx->OperatorStack[pfx->usedOprStack-1];
(void) AddElement (pfx, (fxFltType) 0, op);
pfx->usedOprStack--;
}
}
if (!ProcessTernaryOpr (pfx, &ternary)) return MagickFalse;
if (ternary.addrColon != NULL_ADDRESS) {
if (!TranslateExpression (pfx, ",);", chLimit, needPopAll)) return MagickFalse;
break;
}
UserSymbol = NewUserSymbol = MagickFalse;
if ( (!*pfx->pex) || (*strLimit && (strchr(strLimit,*pfx->pex)!=NULL) ) )
{
if (IncrDecr) break;
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Expected operand after operator", "at '%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (IncrDecr) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"'++' and '--' must be the final operators in an expression at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (!GetOperand (pfx, &UserSymbol, &NewUserSymbol, &UserSymNdx1, needPopAll)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Expected operand at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
SkipSpaces (pfx);
if (NewUserSymbol && !Assign) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"NewUserSymbol", "'%s' after non-assignment operator at '%s'",
pfx->token, SetShortExp(pfx));
return MagickFalse;
}
if (UserSymbol && !NewUserSymbol) {
(void) AddAddressingElement (pfx, rCopyFrom, UserSymNdx1);
UserSymNdx1 = NULL_ADDRESS;
}
UserSymNdx0 = UserSymNdx1;
}
if (UserSymbol && !Assign && !Update && UserSymNdx0 != NULL_ADDRESS) {
ElementT * pel;
if (NewUserSymbol) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"NewUserSymbol", "'%s' needs assignment operator at '%s'",
pfx->token, SetShortExp(pfx));
return MagickFalse;
}
(void) AddAddressingElement (pfx, rCopyFrom, UserSymNdx0);
pel = &pfx->Elements[pfx->usedElements-1];
pel->DoPush = MagickTrue;
}
if (*pfx->pex && !*chLimit && (strchr(strLimit,*pfx->pex)!=NULL)) {
*chLimit = *pfx->pex;
pfx->pex++;
}
while (pfx->usedOprStack) {
OperatorE op = pfx->OperatorStack[pfx->usedOprStack-1];
if (op == oOpenParen || op == oOpenBracket || op == oOpenBrace) {
break;
}
if ( (op==oAssign && !Assign) || (OprInPlace(op) && !Update) ) {
break;
}
pfx->usedOprStack--;
(void) AddElement (pfx, (fxFltType) 0, op);
if (op == oAssign) {
if (UserSymNdx0 < 0) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Assignment to unknown user symbol at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
/* Adjust last element, by deletion and add.
*/
pfx->usedElements--;
(void) AddAddressingElement (pfx, rCopyTo, UserSymNdx0);
break;
} else if (OprInPlace (op)) {
if (UserSymNdx0 < 0) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Operator-in-place to unknown user symbol at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
/* Modify latest element.
*/
pfx->Elements[pfx->usedElements-1].EleNdx = UserSymNdx0;
break;
}
}
if (ternary.addrQuery != NULL_ADDRESS) *needPopAll = MagickTrue;
(void) ResolveTernaryAddresses (pfx, &ternary);
pfx->teDepth--;
if (!pfx->teDepth && *needPopAll) {
(void) AddAddressingElement (pfx, rZerStk, NULL_ADDRESS);
*needPopAll = MagickFalse;
}
if (pfx->exception->severity != UndefinedException)
return MagickFalse;
return MagickTrue;
}
static MagickBooleanType TranslateStatement (FxInfo * pfx, char * strLimit, char * chLimit)
{
MagickBooleanType NeedPopAll = MagickFalse;
SkipSpaces (pfx);
if (!*pfx->pex) return MagickFalse;
if (!TranslateExpression (pfx, strLimit, chLimit, &NeedPopAll)) {
return MagickFalse;
}
if (pfx->usedElements && *chLimit==';') {
/* FIXME: not necessarily the last element,
but the last _executed_ element, eg "goto" in a "for()".,
Pending a fix, we will use rZerStk.
*/
ElementT * pel = &pfx->Elements[pfx->usedElements-1];
if (pel->DoPush) pel->DoPush = MagickFalse;
}
return MagickTrue;
}
static MagickBooleanType TranslateStatementList (FxInfo * pfx, const char * strLimit, char * chLimit)
{
#define MAX_SLIMIT 10
char sLimits[MAX_SLIMIT];
SkipSpaces (pfx);
if (!*pfx->pex) return MagickFalse;
(void) CopyMagickString (sLimits, strLimit, MAX_SLIMIT-1);
if (strchr(strLimit,';')==NULL)
(void) ConcatenateMagickString (sLimits, ";", MAX_SLIMIT);
for (;;) {
if (!TranslateStatement (pfx, sLimits, chLimit)) return MagickFalse;
if (!*pfx->pex) break;
if (*chLimit != ';') {
break;
}
}
if (pfx->exception->severity != UndefinedException)
return MagickFalse;
return MagickTrue;
}
/*--------------------------------------------------------------------
Run-time
*/
static ChannelStatistics *CollectOneImgStats (FxInfo * pfx, Image * img)
{
int ch;
ChannelStatistics * cs = GetImageStatistics (img, pfx->exception);
/* Use RelinquishMagickMemory() somewhere. */
if (cs == (ChannelStatistics *) NULL)
return((ChannelStatistics *) NULL);
for (ch=0; ch <= (int) MaxPixelChannels; ch++) {
cs[ch].mean *= QuantumScale;
cs[ch].median *= QuantumScale;
cs[ch].maxima *= QuantumScale;
cs[ch].minima *= QuantumScale;
cs[ch].standard_deviation *= QuantumScale;
cs[ch].kurtosis *= QuantumScale;
cs[ch].skewness *= QuantumScale;
cs[ch].entropy *= QuantumScale;
}
return cs;
}
static MagickBooleanType CollectStatistics (FxInfo * pfx)
{
Image * img = GetFirstImageInList (pfx->image);
size_t imgNum=0;
pfx->statistics = (ChannelStatistics**) AcquireMagickMemory (pfx->ImgListLen * sizeof (ChannelStatistics *));
if (!pfx->statistics) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"Statistics", "%lu",
(unsigned long) pfx->ImgListLen);
return MagickFalse;
}
for (;;) {
pfx->statistics[imgNum] = CollectOneImgStats (pfx, img);
if (++imgNum == pfx->ImgListLen) break;
img = GetNextImageInList (img);
assert (img != (Image *) NULL);
}
pfx->GotStats = MagickTrue;
return MagickTrue;
}
static MagickBooleanType inline PushVal (FxInfo * pfx, fxRtT * pfxrt, fxFltType val, int addr)
{
if (pfxrt->usedValStack >=pfxrt->numValStack) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"ValStack overflow at addr=", "%i",
addr);
return MagickFalse;
}
pfxrt->ValStack[pfxrt->usedValStack++] = val;
return MagickTrue;
}
static inline fxFltType PopVal (FxInfo * pfx, fxRtT * pfxrt, int addr)
{
if (pfxrt->usedValStack <= 0) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"ValStack underflow at addr=", "%i",
addr);
return (fxFltType) 0;
}
return pfxrt->ValStack[--pfxrt->usedValStack];
}
static inline fxFltType ImageStat (
FxInfo * pfx, ssize_t ImgNum, PixelChannel channel, ImgAttrE ia)
{
ChannelStatistics * cs = NULL;
fxFltType ret = 0;
MagickBooleanType NeedRelinq = MagickFalse;
assert (channel >= 0 && channel <= MaxPixelChannels);
if (pfx->GotStats) {
cs = pfx->statistics[ImgNum];
} else if (pfx->NeedStats) {
/* If we need more than one statistic per pixel, this is inefficient. */
cs = CollectOneImgStats (pfx, pfx->Images[ImgNum]);
NeedRelinq = MagickTrue;
}
switch (ia) {
case aDepth:
ret = (fxFltType) GetImageDepth (pfx->Images[ImgNum], pfx->exception);
break;
case aExtent:
ret = (fxFltType) GetBlobSize (pfx->image);
break;
case aKurtosis:
if (cs != (ChannelStatistics *) NULL)
ret = cs[channel].kurtosis;
break;
case aMaxima:
if (cs != (ChannelStatistics *) NULL)
ret = cs[channel].maxima;
break;
case aMean:
if (cs != (ChannelStatistics *) NULL)
ret = cs[channel].mean;
break;
case aMedian:
if (cs != (ChannelStatistics *) NULL)
ret = cs[channel].median;
break;
case aMinima:
if (cs != (ChannelStatistics *) NULL)
ret = cs[channel].minima;
break;
case aPage:
/* Do nothing */
break;
case aPageX:
ret = (fxFltType) pfx->Images[ImgNum]->page.x;
break;
case aPageY:
ret = (fxFltType) pfx->Images[ImgNum]->page.y;
break;
case aPageWid:
ret = (fxFltType) pfx->Images[ImgNum]->page.width;
break;
case aPageHt:
ret = (fxFltType) pfx->Images[ImgNum]->page.height;
break;
case aPrintsize:
/* Do nothing */
break;
case aPrintsizeX:
ret = (fxFltType) PerceptibleReciprocal (pfx->Images[ImgNum]->resolution.x)
* pfx->Images[ImgNum]->columns;
break;
case aPrintsizeY:
ret = (fxFltType) PerceptibleReciprocal (pfx->Images[ImgNum]->resolution.y)
* pfx->Images[ImgNum]->rows;
break;
case aQuality:
ret = (fxFltType) pfx->Images[ImgNum]->quality;
break;
case aRes:
/* Do nothing */
break;
case aResX:
ret = pfx->Images[ImgNum]->resolution.x;
break;
case aResY:
ret = pfx->Images[ImgNum]->resolution.y;
break;
case aSkewness:
if (cs != (ChannelStatistics *) NULL)
ret = cs[channel].skewness;
break;
case aStdDev:
if (cs != (ChannelStatistics *) NULL)
ret = cs[channel].standard_deviation;
break;
case aH:
ret = (fxFltType) pfx->Images[ImgNum]->rows;
break;
case aN:
ret = (fxFltType) pfx->ImgListLen;
break;
case aT: /* image index in list */
ret = (fxFltType) ImgNum;
break;
case aW:
ret = (fxFltType) pfx->Images[ImgNum]->columns;
break;
case aZ:
ret = (fxFltType) GetImageDepth (pfx->Images[ImgNum], pfx->exception);
break;
default:
(void) ThrowMagickException (pfx->exception,GetMagickModule(),OptionError,
"Unknown ia=","%i",ia);
}
if (NeedRelinq) cs = (ChannelStatistics *)RelinquishMagickMemory (cs);
return ret;
}
static fxFltType inline FxGcd (fxFltType x, fxFltType y, const size_t depth)
{
#define FxMaxFunctionDepth 200
if (x < y)
return (FxGcd (y, x, depth+1));
if ((fabs((double) y) < 0.001) || (depth >= FxMaxFunctionDepth))
return (x);
return (FxGcd (y, x-y*floor((double) (x/y)), depth+1));
}
static ssize_t inline ChkImgNum (FxInfo * pfx, fxFltType f)
/* Returns -1 if f is too large. */
{
ssize_t i = (ssize_t) floor ((double) f + 0.5);
if (i < 0) i += pfx->ImgListLen;
if (i < 0 || i >= (ssize_t)pfx->ImgListLen) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"ImgNum", "%lu bad for ImgListLen %lu",
(unsigned long) i, (unsigned long) pfx->ImgListLen);
i = -1;
}
return i;
}
#define WHICH_ATTR_CHAN \
(pel->ChannelQual == NO_CHAN_QUAL) ? CompositePixelChannel : \
(pel->ChannelQual == THIS_CHANNEL) ? channel : pel->ChannelQual
#define WHICH_NON_ATTR_CHAN \
(pel->ChannelQual == NO_CHAN_QUAL || \
pel->ChannelQual == THIS_CHANNEL || \
pel->ChannelQual == CompositePixelChannel \
) ? (channel == CompositePixelChannel ? RedPixelChannel: channel) \
: pel->ChannelQual
static fxFltType GetHslFlt (FxInfo * pfx, ssize_t ImgNum, const fxFltType fx, const fxFltType fy,
int channel)
{
Image * img = pfx->Images[ImgNum];
double red, green, blue;
double hue=0, saturation=0, lightness=0;
MagickBooleanType okay = MagickTrue;
if(!InterpolatePixelChannel (img, pfx->Imgs[ImgNum].View, RedPixelChannel, img->interpolate,
(double) fx, (double) fy, &red, pfx->exception)) okay = MagickFalse;
if(!InterpolatePixelChannel (img, pfx->Imgs[ImgNum].View, GreenPixelChannel, img->interpolate,
(double) fx, (double) fy, &green, pfx->exception)) okay = MagickFalse;
if(!InterpolatePixelChannel (img, pfx->Imgs[ImgNum].View, BluePixelChannel, img->interpolate,
(double) fx, (double) fy, &blue, pfx->exception)) okay = MagickFalse;
if (!okay)
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"GetHslFlt failure", "%lu %g,%g %i", (unsigned long) ImgNum,
(double) fx, (double) fy, channel);
ConvertRGBToHSL (
red, green, blue,
&hue, &saturation, &lightness);
if (channel == HUE_CHANNEL) return hue;
if (channel == SAT_CHANNEL) return saturation;
if (channel == LIGHT_CHANNEL) return lightness;
return 0.0;
}
static fxFltType GetHslInt (FxInfo * pfx, ssize_t ImgNum, const ssize_t imgx, const ssize_t imgy, int channel)
{
Image * img = pfx->Images[ImgNum];
double hue=0, saturation=0, lightness=0;
const Quantum * p = GetCacheViewVirtualPixels (pfx->Imgs[ImgNum].View, imgx, imgy, 1, 1, pfx->exception);
if (p == (const Quantum *) NULL)
{
(void) ThrowMagickException (pfx->exception,GetMagickModule(),
OptionError,"GetHslInt failure","%lu %li,%li %i",(unsigned long) ImgNum,
(long) imgx,(long) imgy,channel);
return(0.0);
}
ConvertRGBToHSL (
GetPixelRed (img, p), GetPixelGreen (img, p), GetPixelBlue (img, p),
&hue, &saturation, &lightness);
if (channel == HUE_CHANNEL) return hue;
if (channel == SAT_CHANNEL) return saturation;
if (channel == LIGHT_CHANNEL) return lightness;
return 0.0;
}
static fxFltType inline GetIntensity (FxInfo * pfx, ssize_t ImgNum, const fxFltType fx, const fxFltType fy)
{
Quantum
quantum_pixel[MaxPixelChannels];
PixelInfo
pixelinf;
Image * img = pfx->Images[ImgNum];
(void) GetPixelInfo (img, &pixelinf);
if (!InterpolatePixelInfo (img, pfx->Imgs[pfx->ImgNum].View, img->interpolate,
(double) fx, (double) fy, &pixelinf, pfx->exception))
{
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"GetIntensity failure", "%lu %g,%g", (unsigned long) ImgNum,
(double) fx, (double) fy);
}
SetPixelViaPixelInfo (img, &pixelinf, quantum_pixel);
return QuantumScale * GetPixelIntensity (img, quantum_pixel);
}
static MagickBooleanType ExecuteRPN (FxInfo * pfx, fxRtT * pfxrt, fxFltType *result,
const PixelChannel channel, const ssize_t imgx, const ssize_t imgy)
{
const Quantum * p = pfxrt->thisPixel;
fxFltType regA=0, regB=0, regC=0, regD=0, regE=0;
Image * img = pfx->image;
ChannelStatistics * cs = NULL;
MagickBooleanType NeedRelinq = MagickFalse;
double hue=0, saturation=0, lightness=0;
int i;
/* For -fx, this sets p to ImgNum 0.
for %[fx:...], this sets p to the currrent image.
Similarly img.
*/
if (!p) p = GetCacheViewVirtualPixels (
pfx->Imgs[pfx->ImgNum].View, imgx, imgy, 1, 1, pfx->exception);
if (p == (const Quantum *) NULL)
{
(void) ThrowMagickException (pfx->exception,GetMagickModule(),
OptionError,"GetHslInt failure","%lu %li,%li",(unsigned long)
pfx->ImgNum,(long) imgx,(long) imgy);
return(MagickFalse);
}
if (pfx->GotStats) {
cs = pfx->statistics[pfx->ImgNum];
} else if (pfx->NeedStats) {
cs = CollectOneImgStats (pfx, pfx->Images[pfx->ImgNum]);
NeedRelinq = MagickTrue;
}
/* Folllowing is only for expressions like "saturation", with no image specifier.
*/
if (pfx->NeedHsl) {
ConvertRGBToHSL (
GetPixelRed (img, p), GetPixelGreen (img, p), GetPixelBlue (img, p),
&hue, &saturation, &lightness);
}
for (i=0; i < pfx->usedElements; i++) {
ElementT *pel = &pfx->Elements[i];
switch (pel->nArgs) {
case 0:
break;
case 1:
regA = PopVal (pfx, pfxrt, i);
break;
case 2:
regB = PopVal (pfx, pfxrt, i);
regA = PopVal (pfx, pfxrt, i);
break;
case 3:
regC = PopVal (pfx, pfxrt, i);
regB = PopVal (pfx, pfxrt, i);
regA = PopVal (pfx, pfxrt, i);
break;
case 4:
regD = PopVal (pfx, pfxrt, i);
regC = PopVal (pfx, pfxrt, i);
regB = PopVal (pfx, pfxrt, i);
regA = PopVal (pfx, pfxrt, i);
break;
case 5:
regE = PopVal (pfx, pfxrt, i);
regD = PopVal (pfx, pfxrt, i);
regC = PopVal (pfx, pfxrt, i);
regB = PopVal (pfx, pfxrt, i);
regA = PopVal (pfx, pfxrt, i);
break;
default:
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Too many args:", "%i", pel->nArgs);
break;
}
switch (pel->oprNum) {
case oAddEq:
regA = (pfxrt->UserSymVals[pel->EleNdx] += regA);
break;
case oSubtractEq:
regA = (pfxrt->UserSymVals[pel->EleNdx] -= regA);
break;
case oMultiplyEq:
regA = (pfxrt->UserSymVals[pel->EleNdx] *= regA);
break;
case oDivideEq:
regA = (pfxrt->UserSymVals[pel->EleNdx] *= PerceptibleReciprocal((double)regA));
break;
case oPlusPlus:
regA = pfxrt->UserSymVals[pel->EleNdx]++;
break;
case oSubSub:
regA = pfxrt->UserSymVals[pel->EleNdx]--;
break;
case oAdd:
regA += regB;
break;
case oSubtract:
regA -= regB;
break;
case oMultiply:
regA *= regB;
break;
case oDivide:
regA *= PerceptibleReciprocal((double)regB);
break;
case oModulus:
regA = fmod ((double) regA, fabs(floor((double) regB+0.5)));
break;
case oUnaryPlus:
/* Do nothing. */
break;
case oUnaryMinus:
regA = -regA;
break;
case oLshift:
if ((size_t) (regB+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException ( pfx->exception, GetMagickModule(),
OptionError, "undefined shift", "%g", (double) regB);
regA = (fxFltType) 0.0;
break;
}
regA = (fxFltType) ((size_t)(regA+0.5) << (size_t)(regB+0.5));
break;
case oRshift:
if ((size_t) (regB+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException ( pfx->exception, GetMagickModule(),
OptionError, "undefined shift", "%g", (double) regB);
regA = (fxFltType) 0.0;
break;
}
regA = (fxFltType) ((size_t)(regA+0.5) >> (size_t)(regB+0.5));
break;
case oEq:
regA = fabs((double) (regA-regB)) < MagickEpsilon ? 1.0 : 0.0;
break;
case oNotEq:
regA = fabs((double) (regA-regB)) >= MagickEpsilon ? 1.0 : 0.0;
break;
case oLtEq:
regA = (regA <= regB) ? 1.0 : 0.0;
break;
case oGtEq:
regA = (regA >= regB) ? 1.0 : 0.0;
break;
case oLt:
regA = (regA < regB) ? 1.0 : 0.0;
break;
case oGt:
regA = (regA > regB) ? 1.0 : 0.0;
break;
case oLogAnd:
regA = (regA<=0) ? 0.0 : (regB > 0) ? 1.0 : 0.0;
break;
case oLogOr:
regA = (regA>0) ? 1.0 : (regB > 0.0) ? 1.0 : 0.0;
break;
case oLogNot:
regA = (regA==0) ? 1.0 : 0.0;
break;
case oBitAnd:
regA = (fxFltType) ((size_t)(regA+0.5) & (size_t)(regB+0.5));
break;
case oBitOr:
regA = (fxFltType) ((size_t)(regA+0.5) | (size_t)(regB+0.5));
break;
case oBitNot:
/* Old fx doesn't add 0.5. */
regA = (fxFltType) (~(size_t)(regA+0.5));
break;
case oPow:
regA = pow ((double) regA, (double) regB);
break;
case oQuery:
case oColon:
break;
case oOpenParen:
case oCloseParen:
case oOpenBracket:
case oCloseBracket:
case oOpenBrace:
case oCloseBrace:
break;
case oAssign:
pel->val = regA;
break;
case oNull: {
if (pel->type == etColourConstant) {
switch (channel) { default:
case (PixelChannel) 0:
regA = pel->val;
break;
case (PixelChannel) 1:
regA = pel->val1;
break;
case (PixelChannel) 2:
regA = pel->val2;
break;
}
} else {
regA = pel->val;
}
break;
}
case fAbs:
regA = fabs ((double) regA);
break;
#if defined(MAGICKCORE_HAVE_ACOSH)
case fAcosh:
regA = acosh ((double) regA);
break;
#endif
case fAcos:
regA = acos ((double) regA);
break;
#if defined(MAGICKCORE_HAVE_J1)
case fAiry:
if (regA==0) regA = 1.0;
else {
fxFltType gamma = 2.0 * j1 ((MagickPI*regA)) / (MagickPI*regA);
regA = gamma * gamma;
}
break;
#endif
case fAlt:
regA = (fxFltType) (((ssize_t) regA) & 0x01 ? -1.0 : 1.0);
break;
#if defined(MAGICKCORE_HAVE_ASINH)
case fAsinh:
regA = asinh ((double) regA);
break;
#endif
case fAsin:
regA = asin ((double) regA);
break;
#if defined(MAGICKCORE_HAVE_ATANH)
case fAtanh:
regA = atanh ((double) regA);
break;
#endif
case fAtan2:
regA = atan2 ((double) regA, (double) regB);
break;
case fAtan:
regA = atan ((double) regA);
break;
case fCeil:
regA = ceil ((double) regA);
break;
case fChannel:
switch (channel) {
case (PixelChannel) 0: break;
case (PixelChannel) 1: regA = regB; break;
case (PixelChannel) 2: regA = regC; break;
case (PixelChannel) 3: regA = regD; break;
case (PixelChannel) 4: regA = regE; break;
default: regA = 0.0;
}
break;
case fClamp:
if (regA < 0) regA = 0.0;
else if (regA > 1.0) regA = 1.0;
break;
case fCosh:
regA = cosh ((double) regA);
break;
case fCos:
regA = cos ((double) regA);
break;
case fDebug:
/* FIXME: debug() should give channel name. */
(void) fprintf (stderr, "%s[%g,%g].[%i]: %s=%.*g\n",
img->filename, (double) imgx, (double) imgy,
channel, SetPtrShortExp (pfx, pel->pExpStart, (size_t) (pel->lenExp+1)),
pfx->precision, (double) regA);
break;
case fDrc:
regA = regA / (regB*(regA-1.0) + 1.0);
break;
#if defined(MAGICKCORE_HAVE_ERF)
case fErf:
regA = erf ((double) regA);
break;
#endif
case fExp:
regA = exp ((double) regA);
break;
case fFloor:
regA = floor ((double) regA);
break;
case fGauss:
regA = exp((double) (-regA*regA/2.0))/sqrt(2.0*MagickPI);
break;
case fGcd:
if (!IsNaN(regA))
regA = FxGcd (regA, regB, 0);
break;
case fHypot:
regA = hypot ((double) regA, (double) regB);
break;
case fInt:
regA = floor ((double) regA);
break;
case fIsnan:
regA = (fxFltType) (!!IsNaN (regA));
break;
#if defined(MAGICKCORE_HAVE_J0)
case fJ0:
regA = j0 ((double) regA);
break;
#endif
#if defined(MAGICKCORE_HAVE_J1)
case fJ1:
regA = j1 ((double) regA);
break;
#endif
#if defined(MAGICKCORE_HAVE_J1)
case fJinc:
if (regA==0) regA = 1.0;
else regA = 2.0 * j1 ((MagickPI*regA))/(MagickPI*regA);
break;
#endif
case fLn:
regA = log ((double) regA);
break;
case fLogtwo:
regA = log10((double) regA) / log10(2.0);
break;
case fLog:
regA = log10 ((double) regA);
break;
case fMax:
regA = (regA > regB) ? regA : regB;
break;
case fMin:
regA = (regA < regB) ? regA : regB;
break;
case fMod:
regA = regA - floor((double) (regA*PerceptibleReciprocal((double) regB)))*regB;
break;
case fNot:
regA = (fxFltType) (regA < MagickEpsilon);
break;
case fPow:
regA = pow ((double) regA, (double) regB);
break;
case fRand: {
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ExecuteRPN)
#endif
regA = GetPseudoRandomValue (pfxrt->random_info);
break;
}
case fRound:
regA = floor ((double) regA + 0.5);
break;
case fSign:
regA = (regA < 0) ? -1.0 : 1.0;
break;
case fSinc:
regA = sin ((double) (MagickPI*regA)) / (MagickPI*regA);
break;
case fSinh:
regA = sinh ((double) regA);
break;
case fSin:
regA = sin ((double) regA);
break;
case fSqrt:
regA = sqrt ((double) regA);
break;
case fSquish:
regA = 1.0 / (1.0 + exp ((double) -regA));
break;
case fTanh:
regA = tanh ((double) regA);
break;
case fTan:
regA = tan ((double) regA);
break;
case fTrunc:
if (regA >= 0) regA = floor ((double) regA);
else regA = ceil ((double) regA);
break;
case fDo:
case fFor:
case fIf:
case fWhile:
break;
case fU: {
/* Note: 1 value is available, index into image list.
May have ImgAttr qualifier or channel qualifier or both.
*/
ssize_t ImgNum = ChkImgNum (pfx, regA);
if (ImgNum < 0) break;
regA = (fxFltType) 0;
if (ImgNum == 0) {
Image * pimg = pfx->Images[0];
int pech = (int)pel->ChannelQual;
if (pel->ImgAttrQual == aNull) {
if (pech < 0) {
if (pech == NO_CHAN_QUAL || pech == THIS_CHANNEL) {
if (pfx->ImgNum==0) {
regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
} else {
const Quantum * pv = GetCacheViewVirtualPixels (
pfx->Imgs[0].View, imgx, imgy, 1,1, pfx->exception);
if (!pv) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fU can't get cache", "%lu", (unsigned long) ImgNum);
break;
}
regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
}
} else if (pech == HUE_CHANNEL || pech == SAT_CHANNEL ||
pech == LIGHT_CHANNEL) {
regA = GetHslInt (pfx, ImgNum, imgx, imgy, pech);
break;
} else if (pech == INTENSITY_CHANNEL) {
regA = GetIntensity (pfx, 0, (double) imgx, (double) imgy);
break;
}
} else {
if (pfx->ImgNum==0) {
regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
} else {
const Quantum * pv = GetCacheViewVirtualPixels (
pfx->Imgs[0].View, imgx, imgy, 1,1, pfx->exception);
if (!pv) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fU can't get cache", "%lu", (unsigned long) ImgNum);
break;
}
regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
}
}
} else {
/* we have an image atttribute */
regA = ImageStat (pfx, 0, WHICH_ATTR_CHAN, pel->ImgAttrQual);
}
} else {
/* We have non-zero ImgNum. */
if (pel->ImgAttrQual == aNull) {
const Quantum * pv;
if ((int)pel->ChannelQual < 0) {
if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL ||
pel->ChannelQual == LIGHT_CHANNEL)
{
regA = GetHslInt (pfx, ImgNum, imgx, imgy, pel->ChannelQual);
break;
} else if (pel->ChannelQual == INTENSITY_CHANNEL)
{
regA = GetIntensity (pfx, ImgNum, (fxFltType) imgx, (fxFltType) imgy);
break;
}
}
pv = GetCacheViewVirtualPixels (
pfx->Imgs[ImgNum].View, imgx, imgy, 1,1, pfx->exception);
if (!pv) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fU can't get cache", "%lu", (unsigned long) ImgNum);
break;
}
regA = QuantumScale *
pv[pfx->Images[ImgNum]->channel_map[WHICH_NON_ATTR_CHAN].offset];
} else {
regA = ImageStat (pfx, ImgNum, WHICH_ATTR_CHAN, pel->ImgAttrQual);
}
}
break;
}
case fU0: {
/* No args. No image attribute. We may have a ChannelQual.
If called from %[fx:...], ChannelQual will be CompositePixelChannel.
*/
Image * pimg = pfx->Images[0];
int pech = (int)pel->ChannelQual;
if (pech < 0) {
if (pech == NO_CHAN_QUAL || pech == THIS_CHANNEL) {
if (pfx->ImgNum==0) {
regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
} else {
const Quantum * pv = GetCacheViewVirtualPixels (
pfx->Imgs[0].View, imgx, imgy, 1,1, pfx->exception);
if (!pv) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fU0 can't get cache", "%i", 0);
break;
}
regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
}
} else if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL ||
pel->ChannelQual == LIGHT_CHANNEL) {
regA = GetHslInt (pfx, 0, imgx, imgy, pel->ChannelQual);
break;
} else if (pel->ChannelQual == INTENSITY_CHANNEL) {
regA = GetIntensity (pfx, 0, (fxFltType) imgx, (fxFltType) imgy);
}
} else {
if (pfx->ImgNum==0) {
regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
} else {
const Quantum * pv = GetCacheViewVirtualPixels (
pfx->Imgs[0].View, imgx, imgy, 1,1, pfx->exception);
if (!pv) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fU0 can't get cache", "%i", 0);
break;
}
regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
}
}
break;
}
case fUP: {
/* 3 args are: ImgNum, x, y */
ssize_t ImgNum = ChkImgNum (pfx, regA);
fxFltType fx, fy;
if (ImgNum < 0) break;
if (pel->IsRelative) {
fx = imgx + regB;
fy = imgy + regC;
} else {
fx = regB;
fy = regC;
}
if ((int)pel->ChannelQual < 0) {
if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL
|| pel->ChannelQual == LIGHT_CHANNEL) {
regA = GetHslFlt (pfx, ImgNum, fx, fy, pel->ChannelQual);
break;
} else if (pel->ChannelQual == INTENSITY_CHANNEL) {
regA = GetIntensity (pfx, ImgNum, fx, fy);
break;
}
}
{
double v;
Image * imUP = pfx->Images[ImgNum];
if (! InterpolatePixelChannel (imUP, pfx->Imgs[ImgNum].View, WHICH_NON_ATTR_CHAN,
imUP->interpolate, (double) fx, (double) fy, &v, pfx->exception))
{
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fUP can't get interpolate", "%lu", (unsigned long) ImgNum);
break;
}
regA = v * QuantumScale;
}
break;
}
case fS:
case fV: {
/* No args. */
ssize_t ImgNum = 1;
if (pel->oprNum == fS) ImgNum = pfx->ImgNum;
if (pel->ImgAttrQual == aNull) {
const Quantum * pv = GetCacheViewVirtualPixels (
pfx->Imgs[ImgNum].View, imgx, imgy, 1,1, pfx->exception);
if (!pv) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fV can't get cache", "%lu", (unsigned long) ImgNum);
break;
}
if ((int)pel->ChannelQual < 0) {
if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL ||
pel->ChannelQual == LIGHT_CHANNEL) {
regA = GetHslInt (pfx, ImgNum, imgx, imgy, pel->ChannelQual);
break;
} else if (pel->ChannelQual == INTENSITY_CHANNEL) {
regA = GetIntensity (pfx, ImgNum, (double) imgx, (double) imgy);
break;
}
}
regA = QuantumScale *
pv[pfx->Images[ImgNum]->channel_map[WHICH_NON_ATTR_CHAN].offset];
} else {
regA = ImageStat (pfx, ImgNum, WHICH_ATTR_CHAN, pel->ImgAttrQual);
}
break;
}
case fP:
case fSP:
case fVP: {
/* 2 args are: x, y */
fxFltType fx, fy;
ssize_t ImgNum = pfx->ImgNum;
if (pel->oprNum == fVP) ImgNum = 1;
if (pel->IsRelative) {
fx = imgx + regA;
fy = imgy + regB;
} else {
fx = regA;
fy = regB;
}
if ((int)pel->ChannelQual < 0) {
if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL ||
pel->ChannelQual == LIGHT_CHANNEL) {
regA = GetHslFlt (pfx, ImgNum, fx, fy, pel->ChannelQual);
break;
} else if (pel->ChannelQual == INTENSITY_CHANNEL) {
regA = GetIntensity (pfx, ImgNum, fx, fy);
}
}
{
double v;
if (! InterpolatePixelChannel (pfx->Images[ImgNum], pfx->Imgs[ImgNum].View,
WHICH_NON_ATTR_CHAN, pfx->Images[ImgNum]->interpolate,
(double) fx, (double) fy, &v, pfx->exception)
)
{
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fSP or fVP can't get interp", "%lu", (unsigned long) ImgNum);
break;
}
regA = v * (fxFltType)QuantumScale;
}
break;
}
case fNull:
break;
case aDepth:
regA = (fxFltType) GetImageDepth (img, pfx->exception);
break;
case aExtent:
regA = (fxFltType) img->extent;
break;
case aKurtosis:
if (cs != (ChannelStatistics *) NULL)
regA = cs[WHICH_ATTR_CHAN].kurtosis;
break;
case aMaxima:
if (cs != (ChannelStatistics *) NULL)
regA = cs[WHICH_ATTR_CHAN].maxima;
break;
case aMean:
if (cs != (ChannelStatistics *) NULL)
regA = cs[WHICH_ATTR_CHAN].mean;
break;
case aMedian:
if (cs != (ChannelStatistics *) NULL)
regA = cs[WHICH_ATTR_CHAN].median;
break;
case aMinima:
if (cs != (ChannelStatistics *) NULL)
regA = cs[WHICH_ATTR_CHAN].minima;
break;
case aPage:
break;
case aPageX:
regA = (fxFltType) img->page.x;
break;
case aPageY:
regA = (fxFltType) img->page.y;
break;
case aPageWid:
regA = (fxFltType) img->page.width;
break;
case aPageHt:
regA = (fxFltType) img->page.height;
break;
case aPrintsize:
break;
case aPrintsizeX:
regA = (fxFltType) PerceptibleReciprocal (img->resolution.x) * img->columns;
break;
case aPrintsizeY:
regA = (fxFltType) PerceptibleReciprocal (img->resolution.y) * img->rows;
break;
case aQuality:
regA = (fxFltType) img->quality;
break;
case aRes:
break;
case aResX:
regA = (fxFltType) img->resolution.x;
break;
case aResY:
regA = (fxFltType) img->resolution.y;
break;
case aSkewness:
if (cs != (ChannelStatistics *) NULL)
regA = cs[WHICH_ATTR_CHAN].skewness;
break;
case aStdDev:
if (cs != (ChannelStatistics *) NULL)
regA = cs[WHICH_ATTR_CHAN].standard_deviation;
break;
case aH: /* image->rows */
regA = (fxFltType) img->rows;
break;
case aN: /* image list length */
regA = (fxFltType) pfx->ImgListLen;
break;
case aT: /* image index in list */
regA = (fxFltType) pfx->ImgNum;
break;
case aW: /* image->columns */
regA = (fxFltType) img->columns;
break;
case aZ: /* image depth */
regA = (fxFltType) GetImageDepth (img, pfx->exception);
break;
case aNull:
break;
case sHue: /* of conversion to HSL */
regA = hue;
break;
case sIntensity:
regA = GetIntensity (pfx, pfx->ImgNum, (double) imgx, (double) imgy);
break;
case sLightness: /* of conversion to HSL */
regA = lightness;
break;
case sLuma: /* calculation */
case sLuminance: /* as Luma */
regA = QuantumScale * (0.212656 * GetPixelRed (img,p) +
0.715158 * GetPixelGreen (img,p) +
0.072186 * GetPixelBlue (img,p));
break;
case sSaturation: /* from conversion to HSL */
regA = saturation;
break;
case sA: /* alpha */
regA = QuantumScale * GetPixelAlpha (img, p);
break;
case sB: /* blue */
regA = QuantumScale * GetPixelBlue (img, p);
break;
case sC: /* red (ie cyan) */
regA = QuantumScale * GetPixelCyan (img, p);
break;
case sG: /* green */
regA = QuantumScale * GetPixelGreen (img, p);
break;
case sI: /* current x-coordinate */
regA = (fxFltType) imgx;
break;
case sJ: /* current y-coordinate */
regA = (fxFltType) imgy;
break;
case sK: /* black of CMYK */
regA = QuantumScale * GetPixelBlack (img, p);
break;
case sM: /* green (ie magenta) */
regA = QuantumScale * GetPixelGreen (img, p);
break;
case sO: /* alpha */
regA = QuantumScale * GetPixelAlpha (img, p);
break;
case sR:
regA = QuantumScale * GetPixelRed (img, p);
break;
case sY:
regA = QuantumScale * GetPixelYellow (img, p);
break;
case sNull:
break;
case rGoto:
assert (pel->EleNdx >= 0);
i = pel->EleNdx-1; /* -1 because 'for' loop will increment. */
break;
case rIfZeroGoto:
assert (pel->EleNdx >= 0);
if (fabs((double) regA) < MagickEpsilon) i = pel->EleNdx-1;
break;
case rIfNotZeroGoto:
assert (pel->EleNdx >= 0);
if (fabs((double) regA) > MagickEpsilon) i = pel->EleNdx-1;
break;
case rCopyFrom:
assert (pel->EleNdx >= 0);
regA = pfxrt->UserSymVals[pel->EleNdx];
break;
case rCopyTo:
assert (pel->EleNdx >= 0);
pfxrt->UserSymVals[pel->EleNdx] = regA;
break;
case rZerStk:
pfxrt->usedValStack = 0;
break;
case rNull:
break;
default:
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"pel->oprNum", "%i '%s' not yet implemented",
(int)pel->oprNum, OprStr(pel->oprNum));
break;
}
if (i < 0) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Bad run-time address", "%i", i);
}
if (pel->DoPush)
if (!PushVal (pfx, pfxrt, regA, i)) break;
}
if (pfxrt->usedValStack > 0) regA = PopVal (pfx, pfxrt, 9999);
*result = regA;
if (NeedRelinq) cs = (ChannelStatistics *)RelinquishMagickMemory (cs);
if (pfx->exception->severity != UndefinedException) {
return MagickFalse;
}
if (pfxrt->usedValStack != 0) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"ValStack not empty", "(%i)", pfxrt->usedValStack);
return MagickFalse;
}
return MagickTrue;
}
/* Following is substitute for FxEvaluateChannelExpression().
*/
MagickPrivate MagickBooleanType FxEvaluateChannelExpression (
FxInfo *pfx,
const PixelChannel channel, const ssize_t x, const ssize_t y,
double *result, ExceptionInfo *exception)
{
const int
id = GetOpenMPThreadId();
fxFltType ret;
assert (pfx != NULL);
assert (pfx->image != NULL);
assert (pfx->Images != NULL);
assert (pfx->Imgs != NULL);
assert (pfx->fxrts != NULL);
pfx->fxrts[id].thisPixel = NULL;
if (!ExecuteRPN (pfx, &pfx->fxrts[id], &ret, channel, x, y)) {
(void) ThrowMagickException (
exception, GetMagickModule(), OptionError,
"ExcuteRPN failed", " ");
return MagickFalse;
}
*result = (double) ret;
return MagickTrue;
}
static FxInfo *AcquireFxInfoPrivate (const Image * images, const char * expression,
MagickBooleanType CalcAllStats, ExceptionInfo *exception)
{
char chLimit;
FxInfo * pfx = (FxInfo*) AcquireCriticalMemory (sizeof (*pfx));
memset (pfx, 0, sizeof (*pfx));
if (!InitFx (pfx, images, CalcAllStats, exception)) {
pfx = (FxInfo*) RelinquishMagickMemory(pfx);
return NULL;
}
if (!BuildRPN (pfx)) {
(void) DeInitFx (pfx);
pfx = (FxInfo*) RelinquishMagickMemory(pfx);
return((FxInfo *) NULL);
}
if ((*expression == '@') && (strlen(expression) > 1))
{
MagickBooleanType
status;
/*
Read expression from a file.
*/
status=IsRightsAuthorized(PathPolicyDomain,ReadPolicyRights,expression);
if (status != MagickFalse)
pfx->expression=FileToString(expression+1,~0UL,exception);
else
{
errno=EPERM;
(void) ThrowMagickException(exception,GetMagickModule(),PolicyError,
"NotAuthorized","`%s'",expression);
}
}
if (pfx->expression == (char *) NULL)
pfx->expression=ConstantString(expression);
pfx->pex = (char *) pfx->expression;
pfx->teDepth = 0;
if (!TranslateStatementList (pfx, ";", &chLimit)) {
(void) DestroyRPN (pfx);
pfx->expression = DestroyString (pfx->expression);
pfx->pex = NULL;
(void) DeInitFx (pfx);
pfx = (FxInfo*) RelinquishMagickMemory(pfx);
return NULL;
}
if (pfx->teDepth) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Translate expression depth", "(%i) not 0",
pfx->teDepth);
(void) DestroyRPN (pfx);
pfx->expression = DestroyString (pfx->expression);
pfx->pex = NULL;
(void) DeInitFx (pfx);
pfx = (FxInfo*) RelinquishMagickMemory(pfx);
return NULL;
}
if (chLimit != '\0' && chLimit != ';') {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"AcquireFxInfo: TranslateExpression did not exhaust input", "(chLimit=%i) at'%s'",
(int)chLimit, pfx->pex);
(void) DestroyRPN (pfx);
pfx->expression = DestroyString (pfx->expression);
pfx->pex = NULL;
(void) DeInitFx (pfx);
pfx = (FxInfo*) RelinquishMagickMemory(pfx);
return NULL;
}
if (pfx->NeedStats && pfx->runType == rtEntireImage && !pfx->statistics) {
if (!CollectStatistics (pfx)) {
(void) DestroyRPN (pfx);
pfx->expression = DestroyString (pfx->expression);
pfx->pex = NULL;
(void) DeInitFx (pfx);
pfx = (FxInfo*) RelinquishMagickMemory(pfx);
return NULL;
}
}
if (pfx->DebugOpt) {
DumpTables (stderr);
DumpUserSymbols (pfx, stderr);
(void) DumpRPN (pfx, stderr);
}
{
size_t number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
ssize_t t;
pfx->fxrts = (fxRtT *)AcquireQuantumMemory (number_threads, sizeof(fxRtT));
if (!pfx->fxrts) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"fxrts", "%lu",
(unsigned long) number_threads);
(void) DestroyRPN (pfx);
pfx->expression = DestroyString (pfx->expression);
pfx->pex = NULL;
(void) DeInitFx (pfx);
pfx = (FxInfo*) RelinquishMagickMemory(pfx);
return NULL;
}
for (t=0; t < (ssize_t) number_threads; t++) {
if (!AllocFxRt (pfx, &pfx->fxrts[t])) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"AllocFxRt t=", "%g",
(double) t);
{
ssize_t t2;
for (t2 = t-1; t2 >= 0; t2--) {
DestroyFxRt (&pfx->fxrts[t]);
}
}
pfx->fxrts = (fxRtT *) RelinquishMagickMemory (pfx->fxrts);
(void) DestroyRPN (pfx);
pfx->expression = DestroyString (pfx->expression);
pfx->pex = NULL;
(void) DeInitFx (pfx);
pfx = (FxInfo*) RelinquishMagickMemory(pfx);
return NULL;
}
}
}
return pfx;
}
FxInfo *AcquireFxInfo (const Image * images, const char * expression, ExceptionInfo *exception)
{
return AcquireFxInfoPrivate (images, expression, MagickFalse, exception);
}
FxInfo *DestroyFxInfo (FxInfo * pfx)
{
ssize_t t;
assert (pfx != NULL);
assert (pfx->image != NULL);
assert (pfx->Images != NULL);
assert (pfx->Imgs != NULL);
assert (pfx->fxrts != NULL);
for (t=0; t < (ssize_t) GetMagickResourceLimit(ThreadResource); t++) {
DestroyFxRt (&pfx->fxrts[t]);
}
pfx->fxrts = (fxRtT *) RelinquishMagickMemory (pfx->fxrts);
DestroyRPN (pfx);
pfx->expression = DestroyString (pfx->expression);
pfx->pex = NULL;
(void) DeInitFx (pfx);
pfx = (FxInfo*) RelinquishMagickMemory(pfx);
return NULL;
}
/* Following is substitute for FxImage().
*/
MagickExport Image *FxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define FxImageTag "FxNew/Image"
CacheView
*fx_view,
*image_view;
Image
*fx_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
FxInfo
*pfx;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (expression == (const char *) NULL)
return(CloneImage(image,0,0,MagickTrue,exception));
fx_image=CloneImage(image,0,0,MagickTrue,exception);
if (!fx_image) return NULL;
if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse) {
fx_image=DestroyImage(fx_image);
return NULL;
}
pfx = AcquireFxInfoPrivate (image, expression, MagickTrue, exception);
if (!pfx) {
fx_image=DestroyImage(fx_image);
return NULL;
}
assert (pfx->image != NULL);
assert (pfx->Images != NULL);
assert (pfx->Imgs != NULL);
assert (pfx->fxrts != NULL);
status=MagickTrue;
progress=0;
image_view = AcquireVirtualCacheView (image, pfx->exception);
fx_view = AcquireAuthenticCacheView (fx_image, pfx->exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(progress,status) \
magick_number_threads(image,fx_image,fx_image->rows, \
pfx->ContainsDebug ? 0 : 1)
#endif
for (y=0; y < (ssize_t) fx_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
fxFltType
result = 0.0;
if (status == MagickFalse)
continue;
p = GetCacheViewVirtualPixels (image_view, 0, y, image->columns, 1, pfx->exception);
q = QueueCacheViewAuthenticPixels (fx_view, 0, y, fx_image->columns, 1, pfx->exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) {
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) fx_image->columns; x++) {
ssize_t i;
pfx->fxrts[id].thisPixel = (Quantum *)p;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel (image, i);
PixelTrait traits = GetPixelChannelTraits (image, channel);
PixelTrait fx_traits = GetPixelChannelTraits (fx_image, channel);
if ((traits == UndefinedPixelTrait) ||
(fx_traits == UndefinedPixelTrait))
continue;
if ((fx_traits & CopyPixelTrait) != 0) {
SetPixelChannel (fx_image, channel, p[i], q);
continue;
}
if (!ExecuteRPN (pfx, &pfx->fxrts[id], &result, channel, x, y)) {
status=MagickFalse;
break;
}
q[i] = ClampToQuantum ((MagickRealType) (QuantumRange*result));
}
p+=GetPixelChannels (image);
q+=GetPixelChannels (fx_image);
}
if (SyncCacheViewAuthenticPixels(fx_view, pfx->exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed = SetImageProgress (image, FxImageTag, progress, image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_view = DestroyCacheView (fx_view);
image_view = DestroyCacheView (image_view);
/* Before destroying the user symbol values, dump them to stderr.
*/
if (pfx->DebugOpt && pfx->usedUserSymbols) {
int t, i;
char UserSym[MagickPathExtent];
fprintf (stderr, "User symbols (%i):\n", pfx->usedUserSymbols);
for (t=0; t < (int) GetMagickResourceLimit(ThreadResource); t++) {
for (i = 0; i < (int) pfx->usedUserSymbols; i++) {
fprintf (stderr, "th=%i us=%i '%s': %.*Lg\n",
t, i, NameOfUserSym (pfx, i, UserSym), pfx->precision, pfx->fxrts[t].UserSymVals[i]);
}
}
}
if (pfx->exception->severity != UndefinedException) {
status = MagickFalse;
}
if (status == MagickFalse)
fx_image = DestroyImage (fx_image);
pfx = DestroyFxInfo (pfx);
return(fx_image);
}
|
image_random-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file image_random-inl.h
* \brief
* \author
*/
#ifndef MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
#define MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
#include <algorithm>
#include <cmath>
#include <limits>
#include <tuple>
#include <utility>
#include <vector>
#include "mxnet/base.h"
#include "../mxnet_op.h"
#include "../operator_common.h"
#if MXNET_USE_OPENCV
#include <opencv2/opencv.hpp>
#endif // MXNET_USE_OPENCV
namespace mxnet {
namespace op {
namespace image {
// There are no parameters for this operator.
// Hence, no arameter registration.
// Shape and Type inference for image to tensor operator
inline bool ToTensorShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TShape &shp = (*in_attrs)[0];
if (!shp.ndim()) return false;
CHECK((shp.ndim() == 3) || (shp.ndim() == 4))
<< "Input image must have shape (height, width, channels), or "
<< "(N, height, width, channels) but got " << shp;
if (shp.ndim() == 3) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, TShape({shp[2], shp[0], shp[1]}));
} else if (shp.ndim() == 4) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, TShape({shp[0], shp[3], shp[1], shp[2]}));
}
return true;
}
inline bool ToTensorType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::kFloat32);
return (*in_attrs)[0] != -1;
}
// Operator Implementation
template<int req>
struct totensor_forward {
template<typename DType>
MSHADOW_XINLINE static void Map(uint32_t c, float* out_data, const DType* in_data,
const int length, const int channel, const int step,
const float normalize_factor = 255.0f) {
#pragma omp parallel for
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(out_data[step + c*length + i], req,
(in_data[step + i*channel + c]) / normalize_factor);
}
}
};
template<typename xpu>
void ToTensorImpl(const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const int length,
const uint32_t channel,
const int step = 0) {
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
float* output = outputs[0].dptr<float>();
DType* input = inputs[0].dptr<DType>();
mxnet_op::Kernel<totensor_forward<req_type>, xpu>::Launch(
s, channel, output, input, length, channel, step);
});
});
}
template<typename xpu>
void ToTensorOpForward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
CHECK_EQ(req[0], kWriteTo)
<< "`to_tensor` does not support inplace updates";
// 3D Input - (h, w, c)
if (inputs[0].ndim() == 3) {
const int length = inputs[0].shape_[0] * inputs[0].shape_[1];
const uint32_t channel = inputs[0].shape_[2];
ToTensorImpl<xpu>(ctx, inputs, outputs, req, length, channel);
} else if (inputs[0].ndim() == 4) {
// 4D input (n, h, w, c)
const int batch_size = inputs[0].shape_[0];
const int length = inputs[0].shape_[1] * inputs[0].shape_[2];
const uint32_t channel = inputs[0].shape_[3];
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
ToTensorImpl<xpu>(ctx, inputs, outputs, req, length, channel, n*step);
}
}
}
struct NormalizeParam : public dmlc::Parameter<NormalizeParam> {
nnvm::Tuple<float> mean;
nnvm::Tuple<float> std;
DMLC_DECLARE_PARAMETER(NormalizeParam) {
DMLC_DECLARE_FIELD(mean)
.set_default(nnvm::Tuple<float> {0.0f, 0.0f, 0.0f, 0.0f})
.describe("Sequence of means for each channel. "
"Default value is 0.");
DMLC_DECLARE_FIELD(std)
.set_default(nnvm::Tuple<float> {1.0f, 1.0f, 1.0f, 1.0f})
.describe("Sequence of standard deviations for each channel. "
"Default value is 1.");
}
};
// Shape and Type inference for image Normalize operator
// Shape inference
inline bool NormalizeOpShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
const auto& dshape = (*in_attrs)[0];
if (!dshape.ndim()) return false;
CHECK((dshape.ndim() == 3) || (dshape.ndim() == 4))
<< "Input tensor must have shape (channels, height, width), or "
<< "(N, channels, height, width), but got " << dshape;
uint32_t nchannels;
if (dshape.ndim() == 3) {
nchannels = dshape[0];
CHECK(nchannels == 3 || nchannels == 1)
<< "The first dimension of input tensor must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
} else if (dshape.ndim() == 4) {
nchannels = dshape[1];
CHECK(nchannels == 3 || nchannels == 1)
<< "The second dimension of input tensor must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
}
CHECK((param.mean.ndim() == 1) || (param.mean.ndim() == nchannels))
<< "Invalid mean for input with shape " << dshape
<< ". mean must have either 1 or " << nchannels
<< " elements, but got " << param.mean;
CHECK(param.std.ndim() == 1 || param.std.ndim() == nchannels)
<< "Invalid std for input with shape " << dshape
<< ". std must have either 1 or " << nchannels
<< " elements, but got " << param.std;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
// Type Inference
inline bool NormalizeOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
template<int req>
struct normalize_forward {
template<typename DType>
MSHADOW_XINLINE static void Map(int j, DType* out_data, const DType* in_data,
const int i, const int length, const int step,
const DType mean, const DType std_dev) {
KERNEL_ASSIGN(out_data[step + i*length + j], req,
(in_data[step + i*length + j] - mean) / std_dev);
}
};
template<typename xpu>
void NormalizeImpl(const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const NormalizeParam ¶m,
const int length,
const uint32_t channel,
const int step = 0) {
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
DType* input = inputs[0].dptr<DType>();
DType* output = outputs[0].dptr<DType>();
for (uint32_t i = 0; i < channel; ++i) {
DType mean = param.mean[param.mean.ndim() > i ? i : 0];
DType std_dev = param.std[param.std.ndim() > i ? i : 0];
mxnet_op::Kernel<normalize_forward<req_type>, xpu>::Launch(
s, length, output, input,
i, length, step, mean, std_dev);
}
});
});
}
template<typename xpu>
void NormalizeOpForward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
// 3D input (c, h, w)
if (inputs[0].ndim() == 3) {
const int length = inputs[0].shape_[1] * inputs[0].shape_[2];
const uint32_t channel = inputs[0].shape_[0];
NormalizeImpl<xpu>(ctx, inputs, outputs, req, param, length, channel);
} else if (inputs[0].ndim() == 4) {
// 4D input (n, c, h, w)
const int batch_size = inputs[0].shape_[0];
const int length = inputs[0].shape_[2] * inputs[0].shape_[3];
const uint32_t channel = inputs[0].shape_[1];
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
NormalizeImpl<xpu>(ctx, inputs, outputs, req, param, length, channel, n*step);
}
}
}
// Backward function
template<int req>
struct normalize_backward {
template<typename DType>
MSHADOW_XINLINE static void Map(int j, DType* in_grad, const DType* out_grad,
const int i, const int length,
const int step, const DType std_dev) {
// d/dx{(x - mean) / std_dev} => (1 / std_dev)
KERNEL_ASSIGN(in_grad[step + i*length + j], req,
out_grad[step + i*length + j] * (1.0 / std_dev));
}
};
template<typename xpu>
void NormalizeBackwardImpl(const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const NormalizeParam ¶m,
const int length,
const uint32_t channel,
const int step = 0) {
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& out_grad = inputs[0];
const TBlob& in_grad = outputs[0];
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
for (uint32_t i = 0; i < channel; ++i) {
DType std_dev = param.std[param.std.ndim() > i ? i : 0];
mxnet_op::Kernel<normalize_backward<req_type>, xpu>::Launch(
s, length, in_grad.dptr<DType>(), out_grad.dptr<DType>(),
i, length, step, std_dev);
}
});
});
}
template<typename xpu>
void NormalizeOpBackward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
// Note: inputs[0] is out_grad
const TBlob& in_data = inputs[1];
// 3D input (c, h, w)
if (in_data.ndim() == 3) {
const int length = in_data.shape_[1] * in_data.shape_[2];
const uint32_t channel = in_data.shape_[0];
NormalizeBackwardImpl<xpu>(ctx, inputs, outputs, req, param, length, channel);
} else if (in_data.ndim() == 4) {
// 4D input (n, c, h, w)
const int batch_size = in_data.shape_[0];
const int length = in_data.shape_[2] * in_data.shape_[3];
const uint32_t channel = in_data.shape_[1];
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
NormalizeBackwardImpl<xpu>(ctx, inputs, outputs, req, param, length, channel, n*step);
}
}
}
template<typename DType>
inline DType saturate_cast(const float& src) {
return static_cast<DType>(src);
}
template<>
inline uint8_t saturate_cast(const float& src) {
return std::min(std::max(src, 0.f), 255.f);
}
inline bool ImageShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
TShape& dshape = (*in_attrs)[0];
CHECK_EQ(dshape.ndim(), 3)
<< "Input image must have shape (height, width, channels), but got " << dshape;
auto nchannels = dshape[dshape.ndim()-1];
CHECK(nchannels == 3 || nchannels == 1)
<< "The last dimension of input image must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<typename DType, int axis>
void FlipImpl(const TShape &shape, DType *src, DType *dst) {
int head = 1, mid = shape[axis], tail = 1;
for (int i = 0; i < axis; ++i) head *= shape[i];
for (uint32_t i = axis+1; i < shape.ndim(); ++i) tail *= shape[i];
for (int i = 0; i < head; ++i) {
for (int j = 0; j < (mid >> 1); ++j) {
int idx1 = (i*mid + j) * tail;
int idx2 = idx1 + (mid-(j << 1)-1) * tail;
for (int k = 0; k < tail; ++k, ++idx1, ++idx2) {
DType tmp = src[idx1];
dst[idx1] = src[idx2];
dst[idx2] = tmp;
}
}
}
}
inline void FlipLeftRight(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
FlipImpl<DType, 1>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
});
}
inline void FlipTopBottom(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
FlipImpl<DType, 0>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
});
}
inline void RandomFlipLeftRight(
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
if (std::bernoulli_distribution()(prnd->GetRndEngine())) {
if (outputs[0].dptr_ != inputs[0].dptr_) {
std::memcpy(outputs[0].dptr_, inputs[0].dptr_, inputs[0].Size() * sizeof(DType));
}
} else {
FlipImpl<DType, 1>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
}
});
}
inline void RandomFlipTopBottom(
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
if (std::bernoulli_distribution()(prnd->GetRndEngine())) {
if (outputs[0].dptr_ != inputs[0].dptr_) {
std::memcpy(outputs[0].dptr_, inputs[0].dptr_, inputs[0].Size() * sizeof(DType));
}
} else {
FlipImpl<DType, 0>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
}
});
}
struct RandomEnhanceParam : public dmlc::Parameter<RandomEnhanceParam> {
float min_factor;
float max_factor;
DMLC_DECLARE_PARAMETER(RandomEnhanceParam) {
DMLC_DECLARE_FIELD(min_factor)
.set_lower_bound(0.0)
.describe("Minimum factor.");
DMLC_DECLARE_FIELD(max_factor)
.set_lower_bound(0.0)
.describe("Maximum factor.");
}
};
inline void AdjustBrightnessImpl(const float& alpha_b,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
int length = inputs[0].Size();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
for (int l = 0; l < length; ++l) {
float val = static_cast<float>(input[l]) * alpha_b;
output[l] = saturate_cast<DType>(val);
}
});
}
inline void RandomBrightness(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
float alpha_b = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustBrightnessImpl(alpha_b, ctx, inputs, req, outputs);
}
inline void AdjustContrastImpl(const float& alpha_c,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
static const float coef[] = { 0.299f, 0.587f, 0.114f };
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int nchannels = inputs[0].shape_[2];
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
float sum = 0.f;
if (nchannels > 1) {
for (int l = 0; l < length; ++l) {
for (int c = 0; c < 3; ++c) sum += input[l*3 + c] * coef[c];
}
} else {
for (int l = 0; l < length; ++l) sum += input[l];
}
float gray_mean = sum / static_cast<float>(length);
float beta = (1 - alpha_c) * gray_mean;
for (int l = 0; l < length * nchannels; ++l) {
float val = input[l] * alpha_c + beta;
output[l] = saturate_cast<DType>(val);
}
});
}
inline void RandomContrast(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha_c = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustContrastImpl(alpha_c, ctx, inputs, req, outputs);
}
inline void AdjustSaturationImpl(const float& alpha_s,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
static const float coef[] = { 0.299f, 0.587f, 0.114f };
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int nchannels = inputs[0].shape_[2];
float alpha_o = 1.f - alpha_s;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
if (nchannels == 1) {
for (int l = 0; l < length; ++l) output[l] = input[l];
return;
}
for (int l = 0; l < length; ++l) {
float gray = 0.f;
for (int c = 0; c < 3; ++c) {
gray = input[l*3 + c] * coef[c];
}
gray *= alpha_o;
for (int c = 0; c < 3; ++c) {
float val = gray + input[l*3 + c] * alpha_s;
output[l*3 + c] = saturate_cast<DType>(val);
}
}
});
}
inline void RandomSaturation(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha_s = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustSaturationImpl(alpha_s, ctx, inputs, req, outputs);
}
inline void RGB2HLSConvert(const float& src_r,
const float& src_g,
const float& src_b,
float *dst_h,
float *dst_l,
float *dst_s) {
float b = src_b / 255.f, g = src_g / 255.f, r = src_r / 255.f;
float h = 0.f, s = 0.f, l;
float vmin;
float vmax;
float diff;
vmax = vmin = r;
vmax = std::fmax(vmax, g);
vmax = std::fmax(vmax, b);
vmin = std::fmin(vmin, g);
vmin = std::fmin(vmin, b);
diff = vmax - vmin;
l = (vmax + vmin) * 0.5f;
if (diff > std::numeric_limits<float>::epsilon()) {
s = (l < 0.5f) * diff / (vmax + vmin);
s += (l >= 0.5f) * diff / (2.0f - vmax - vmin);
diff = 60.f / diff;
h = (vmax == r) * (g - b) * diff;
h += (vmax != r && vmax == g) * ((b - r) * diff + 120.f);
h += (vmax != r && vmax != g) * ((r - g) * diff + 240.f);
h += (h < 0.f) * 360.f;
}
*dst_h = h;
*dst_l = l;
*dst_s = s;
}
inline void HLS2RGBConvert(const float& src_h,
const float& src_l,
const float& src_s,
float *dst_r,
float *dst_g,
float *dst_b) {
static const int c_HlsSectorData[6][3] = {
{ 1, 3, 0 },
{ 1, 0, 2 },
{ 3, 0, 1 },
{ 0, 2, 1 },
{ 0, 1, 3 },
{ 2, 1, 0 }
};
float h = src_h, l = src_l, s = src_s;
float b = l, g = l, r = l;
if (s != 0) {
float p2 = (l <= 0.5f) * l * (1 + s);
p2 += (l > 0.5f) * (l + s - l * s);
float p1 = 2 * l - p2;
h *= 1.f / 60.f;
if (h < 0) {
do { h += 6; } while (h < 0);
} else if (h >= 6) {
do { h -= 6; } while (h >= 6);
}
int sector = static_cast<int>(h);
h -= sector;
float tab[4];
tab[0] = p2;
tab[1] = p1;
tab[2] = p1 + (p2 - p1) * (1 - h);
tab[3] = p1 + (p2 - p1) * h;
b = tab[c_HlsSectorData[sector][0]];
g = tab[c_HlsSectorData[sector][1]];
r = tab[c_HlsSectorData[sector][2]];
}
*dst_b = b * 255.f;
*dst_g = g * 255.f;
*dst_r = r * 255.f;
}
inline void AdjustHueImpl(float alpha,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
if (inputs[0].shape_[2] == 1) return;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* input = inputs[0].dptr<DType>();
DType* output = outputs[0].dptr<DType>();
for (int i = 0; i < length; ++i) {
float h, l, s;
float r = static_cast<float>(*(input++));
float g = static_cast<float>(*(input++));
float b = static_cast<float>(*(input++));
RGB2HLSConvert(r, g, b, &h, &l, &s);
h += alpha * 360.f;
HLS2RGBConvert(h, l, s, &r, &g, &b);
*(output++) = saturate_cast<DType>(r);
*(output++) = saturate_cast<DType>(g);
*(output++) = saturate_cast<DType>(b);
}
});
}
inline void RandomHue(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustHueImpl(alpha, ctx, inputs, req, outputs);
}
struct RandomColorJitterParam : public dmlc::Parameter<RandomColorJitterParam> {
float brightness;
float contrast;
float saturation;
float hue;
DMLC_DECLARE_PARAMETER(RandomColorJitterParam) {
DMLC_DECLARE_FIELD(brightness)
.describe("How much to jitter brightness.");
DMLC_DECLARE_FIELD(contrast)
.describe("How much to jitter contrast.");
DMLC_DECLARE_FIELD(saturation)
.describe("How much to jitter saturation.");
DMLC_DECLARE_FIELD(hue)
.describe("How much to jitter hue.");
}
};
inline void RandomColorJitter(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomColorJitterParam ¶m = nnvm::get<RandomColorJitterParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
int order[4] = {0, 1, 2, 3};
std::shuffle(order, order + 4, prnd->GetRndEngine());
bool flag = false;
for (int i = 0; i < 4; ++i) {
switch (order[i]) {
case 0:
if (param.brightness > 0) {
float alpha_b = 1.0 + std::uniform_real_distribution<float>(
-param.brightness, param.brightness)(prnd->GetRndEngine());
AdjustBrightnessImpl(alpha_b, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 1:
if (param.contrast > 0) {
float alpha_c = 1.0 + std::uniform_real_distribution<float>(
-param.contrast, param.contrast)(prnd->GetRndEngine());
AdjustContrastImpl(alpha_c, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 2:
if (param.saturation > 0) {
float alpha_s = 1.f + std::uniform_real_distribution<float>(
-param.saturation, param.saturation)(prnd->GetRndEngine());
AdjustSaturationImpl(alpha_s, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 3:
if (param.hue > 0) {
float alpha_h = std::uniform_real_distribution<float>(
-param.hue, param.hue)(prnd->GetRndEngine());
AdjustHueImpl(alpha_h, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
}
}
}
struct AdjustLightingParam : public dmlc::Parameter<AdjustLightingParam> {
nnvm::Tuple<float> alpha;
DMLC_DECLARE_PARAMETER(AdjustLightingParam) {
DMLC_DECLARE_FIELD(alpha)
.describe("The lighting alphas for the R, G, B channels.");
}
};
struct RandomLightingParam : public dmlc::Parameter<RandomLightingParam> {
float alpha_std;
DMLC_DECLARE_PARAMETER(RandomLightingParam) {
DMLC_DECLARE_FIELD(alpha_std)
.set_default(0.05)
.describe("Level of the lighting noise.");
}
};
inline void AdjustLightingImpl(const nnvm::Tuple<float>& alpha,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
static const float eig[3][3] = {
{ 55.46 * -0.5675, 4.794 * 0.7192, 1.148 * 0.4009 },
{ 55.46 * -0.5808, 4.794 * -0.0045, 1.148 * -0.8140 },
{ 55.46 * -0.5836, 4.794 * -0.6948, 1.148 * 0.4203 }
};
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int channels = inputs[0].shape_[2];
if (channels == 1) return;
float pca_r = eig[0][0] * alpha[0] + eig[0][1] * alpha[1] + eig[0][2] * alpha[2];
float pca_g = eig[1][0] * alpha[0] + eig[1][1] * alpha[1] + eig[1][2] * alpha[2];
float pca_b = eig[2][0] * alpha[0] + eig[2][1] * alpha[1] + eig[2][2] * alpha[2];
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
for (int i = 0; i < length; i++) {
int base_ind = 3 * i;
float in_r = static_cast<float>(input[base_ind]);
float in_g = static_cast<float>(input[base_ind + 1]);
float in_b = static_cast<float>(input[base_ind + 2]);
output[base_ind] = saturate_cast<DType>(in_r + pca_r);
output[base_ind + 1] = saturate_cast<DType>(in_g + pca_g);
output[base_ind + 2] = saturate_cast<DType>(in_b + pca_b);
}
});
}
inline void AdjustLighting(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const AdjustLightingParam ¶m = nnvm::get<AdjustLightingParam>(attrs.parsed);
AdjustLightingImpl(param.alpha, ctx, inputs, req, outputs);
}
inline void RandomLighting(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomLightingParam ¶m = nnvm::get<RandomLightingParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
std::normal_distribution<float> dist(0, param.alpha_std);
float alpha_r = dist(prnd->GetRndEngine());
float alpha_g = dist(prnd->GetRndEngine());
float alpha_b = dist(prnd->GetRndEngine());
AdjustLightingImpl({alpha_r, alpha_g, alpha_b}, ctx, inputs, req, outputs);
}
#define MXNET_REGISTER_IMAGE_AUG_OP(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}}; \
}) \
.set_attr<nnvm::FInferShape>("FInferShape", ImageShape) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseNone{ "_copy" }) \
.add_argument("data", "NDArray-or-Symbol", "The input.")
#define MXNET_REGISTER_IMAGE_RND_AUG_OP(name) \
MXNET_REGISTER_IMAGE_AUG_OP(name) \
.set_attr<FResourceRequest>("FResourceRequest", \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kRandom}; \
})
} // namespace image
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
|
flexConcatOperator.h | #ifndef flexConcatOperator_H
#define flexConcatOperator_H
#include "vector"
#include "tools.h"
#include "flexLinearOperator.h"
//! represents a concatenation operator
template<typename T>
class flexConcatOperator : public flexLinearOperator<T>
{
#ifdef __CUDACC__
typedef thrust::device_vector<T> Tdata;
#else
typedef std::vector<T> Tdata;
#endif
private:
flexLinearOperator<T>* A;
flexLinearOperator<T>* B;
mySign s;
Tdata tmpVec;
public:
//! initializes the concatenation operator
/*!
\param aA left hand side operator
\param aB right hand side operator
\param aS type of concatenation. Possible values are PLUS, SUBTRACT and COMPOSE.
\param aMinus determines if operator is negated \sa isMinus
*/
flexConcatOperator(flexLinearOperator<T>* aA, flexLinearOperator<T>* aB, mySign aS, bool aMinus) : A(aA), B(aB), s(aS), tmpVec(aA->getNumCols()), flexLinearOperator<T>(aA->getNumRows(), aB->getNumCols(), concatOp, aMinus)
{
}
flexConcatOperator<T>* copy()
{
auto cpOp = new flexConcatOperator<T>(this->A, this->B, this->s, this->isMinus);
return cpOp;
}
//to implement
void times(bool transposed, const Tdata &input, Tdata &output)
{
}
void timesPlus(bool transposed, const Tdata &input, Tdata &output)
{
switch (this->s)
{
case PLUS:
{
if (this->isMinus)
{
A->timesMinus(transposed, input, output);
B->timesMinus(transposed, input, output);
}
else
{
A->timesPlus(transposed, input, output);
B->timesPlus(transposed, input, output);
}
break;
}
case MINUS:
{
if (this->isMinus)
{
A->timesMinus(transposed, input, output);
B->timesPlus(transposed, input, output);
}
else
{
A->timesPlus(transposed, input, output);
B->timesMinus(transposed, input, output);
}
break;
}
case COMPOSE:
{
if (transposed)
{
//apply A first
#ifdef __CUDACC__
thrust::fill(this->tmpVec.begin(), this->tmpVec.end(), (T)0);
#else
std::fill(this->tmpVec.begin(), this->tmpVec.end(), (T)0);
#endif
A->timesPlus(true, input, this->tmpVec);
if (this->isMinus)
{
B->timesMinus(true, this->tmpVec, output);
}
else
{
B->timesPlus(true, this->tmpVec, output);
}
}
else
{
//apply B first
#ifdef __CUDACC__
thrust::fill(this->tmpVec.begin(), this->tmpVec.end(), (T)0);
#else
std::fill(this->tmpVec.begin(), this->tmpVec.end(), (T)0);
#endif
B->timesPlus(false, input, this->tmpVec);
if (this->isMinus)
{
A->timesMinus(false, this->tmpVec, output);
}
else
{
A->timesPlus(false, this->tmpVec, output);
}
}
break;
}
}
}
void timesMinus(bool transposed, const Tdata &input, Tdata &output)
{
switch (this->s)
{
case PLUS:
{
if (this->isMinus)
{
A->timesPlus(transposed, input, output);
B->timesPlus(transposed, input, output);
}
else
{
A->timesMinus(transposed, input, output);
B->timesMinus(transposed, input, output);
}
break;
}
case MINUS:
{
if (this->isMinus)
{
A->timesPlus(transposed, input, output);
B->timesMinus(transposed, input, output);
}
else
{
A->timesMinus(transposed, input, output);
B->timesPlus(transposed, input, output);
}
break;
}
case COMPOSE:
{
if (transposed)
{
//apply A first
#ifdef __CUDACC__
thrust::fill(this->tmpVec.begin(), this->tmpVec.end(), (T)0);
#else
std::fill(this->tmpVec.begin(), this->tmpVec.end(), (T)0);
#endif
A->timesPlus(true, input, tmpVec);
if (this->isMinus)
{
B->timesPlus(true, this->tmpVec, output);
}
else
{
B->timesMinus(true, this->tmpVec, output);
}
}
else
{
//apply B first
#ifdef __CUDACC__
thrust::fill(this->tmpVec.begin(), this->tmpVec.end(), (T)0);
#else
std::fill(this->tmpVec.begin(), this->tmpVec.end(), (T)0);
#endif
B->timesPlus(false, input, this->tmpVec);
if (this->isMinus)
{
A->timesPlus(false, this->tmpVec, output);
}
else
{
A->timesMinus(false, this->tmpVec, output);
}
}
break;
}
}
}
//TODO
T getMaxRowSumAbs(bool transposed)
{
return static_cast<T>(1);
}
std::vector<T> getAbsRowSum(bool transposed)
{
std::vector<T> result;
auto rowSumA = A->getAbsRowSum(transposed);
auto rowSumB = B->getAbsRowSum(transposed);
switch (this->s)
{
case PLUS:
result.resize(rowSumA.size());
#pragma omp parallel for
for (int k = 0; k < result.size(); ++k)
{
result[k] = rowSumA[k] + rowSumB[k];
}
break;
case MINUS:
{
result.resize(rowSumA.size());
#pragma omp parallel for
for (int k = 0; k < result.size(); ++k)
{
result[k] = rowSumA[k] + rowSumB[k];
}
break;
}
case COMPOSE:
{
T maxA = *std::max_element(rowSumA.begin(), rowSumA.end());
T maxB = *std::max_element(rowSumB.begin(), rowSumB.end());
T maxProd = maxA * maxB;
if(transposed)
result.resize(this->B->getNumCols(), maxProd);
else
result.resize(this->A->getNumRows(), maxProd);
break;
}
}
return result;
}
#ifdef __CUDACC__
thrust::device_vector<T> getAbsRowSumCUDA(bool transposed)
{
Tdata result;
auto rowSumA = A->getAbsRowSumCUDA(transposed);
auto rowSumB = B->getAbsRowSumCUDA(transposed);
switch (this->s)
{
case PLUS:
{
result.resize(rowSumA.size());
#pragma omp parallel for
for (int k = 0; k < result.size(); ++k)
{
result[k] = rowSumA[k] + rowSumB[k];
}
break;
}
case MINUS:
{
result.resize(rowSumA.size());
#pragma omp parallel for
for (int k = 0; k < result.size(); ++k)
{
result[k] = rowSumA[k] + rowSumB[k];
}
break;
}
case COMPOSE:
{
T maxA = *thrust::max_element(rowSumA.begin(), rowSumA.end());
T maxB = *thrust::max_element(rowSumB.begin(), rowSumB.end());
T maxProd = maxA * maxB;
if(transposed)
result.resize(this->B->getNumCols(), maxProd);
else
result.resize(this->A->getNumRows(), maxProd);
break;
}
}
return result;
}
#endif
};
#endif
|
positive_solver.c | #include "q_incs.h"
#include "matrix_helpers.h"
#include "positive_solver.h"
#define DEFAULT_EPS 0.001
/* Andrew Winkler
It has the virtue of dramatic simplicity - there's no need to explicitly construct the cholesky decomposition, no need to do the explicit backsubstitutions.
Yet it's essentially equivalent to that more labored approach, so its performance/stability/memory, etc. should be at least as good.
*/
static int _positive_solver_rec(
double ** A,
double * x,
double * b,
int n
)
{
int status = 0;
/// printf("The alpha is %f\n", A[0][0]);
if (n < 1) { go_BYE(-1); }
if (n == 1) {
if (A[0][0] == 0.0) {
if (abs(b[0]) > DEFAULT_EPS) { go_BYE(-1); }
x[0] = 0.0;
return 0;
}
x[0] = b[0] / A[0][0];
return 0;
}
double * bvec = b + 1;
double * Avec = A[0] + 1;
double ** Asub = A + 1;
double * xvec = x + 1;
int m = n -1;
if (A[0][0] != 0.0) {
int nT = sysconf(_SC_NPROCESSORS_ONLN);
#pragma omp parallel for
for (int t = 0; t < nT; t++) {
for(int i=0; i < m; i++){
if ((i - i / nT) % nT != t) continue;
bvec[i] -= Avec[i] * b[0] / A[0][0];
for(int j=0; j < m - i; j++)
Asub[i][j] -= Avec[i] * Avec[i+j] / A[0][0];
}
}
} /* else check that Avec is 0 */
status = _positive_solver_rec(Asub, xvec, bvec, m);
cBYE(status);
if ( status < 0 ) { return status; }
if (A[0][0] == 0.0) {
if (b[0] != 0.0) { go_BYE(-1); } /* or close enough... */
x[0] = 0.0;
return status;
}
double p = 0;
for ( int k = 0; k < m; k++ ) {
p += Avec[k] * xvec[k];
}
x[0] = (b[0] - p) / A[0][0];
BYE:
return status;
}
static bool _positive_solver_check(
double **A,
double *x,
double *b,
int n,
double eps,
bool is_symm
)
{
int status = 0;
bool result = false;
double *b_prime = NULL;
b_prime = malloc(n * sizeof(double)); return_if_malloc_failed(b_prime);
if (is_symm) {
multiply_symm_matrix_vector(A, x, n, b_prime);
} else {
multiply_matrix_vector(A, x, n, b_prime);
}
result = true;
for (int i = 0; i < n; i++) {
if (abs(b_prime[i] - b[i]) > eps) {
result = false;
break;
}
}
BYE:
free_if_non_null(b_prime);
if (status != 0) result = false;
return result;
}
bool full_positive_solver_check(
double **A,
double *x,
double *b,
int n,
double eps
)
{
bool is_symm = false;
if (0 < eps) eps = DEFAULT_EPS;
return _positive_solver_check(A, x, b, n, eps, is_symm);
}
bool symm_positive_solver_check(
double **A,
double *x,
double *b,
int n,
double eps
)
{
bool is_symm = true;
if (0 < eps) eps = DEFAULT_EPS;
return _positive_solver_check(A, x, b, n, eps, is_symm);
}
int posdef_positive_solver_fast(
double ** A,
double * x,
double * b,
int n
)
{
return _positive_solver_rec(A, x, b, n);
}
int posdef_positive_solver(
double ** A,
double * x,
double * b,
int n
)
{
int status = 0;
// copoies of A and b to preserve input.
double ** A_copy = NULL;
double * b_copy = NULL;
status = alloc_symm_matrix(&A_copy, n); cBYE(status);
b_copy = malloc(n * sizeof(double));
return_if_malloc_failed(b_copy);
for ( int i = 0; i < n; i++ ) {
for ( int j = 0; j < n-i; j++ ) {
A_copy[i][j] = A[i][j];
}
b_copy[i] = b[i];
}
status = _positive_solver_rec(A_copy, x, b_copy, n);
BYE:
free_matrix(A_copy, n);
free_if_non_null(b_copy);
return status;
}
int full_posdef_positive_solver_fast(
double ** A,
double * x,
double * b,
int n
)
{
int status = 0;
for (int i = 0; i < n; i++) {
A[i] += i;
}
status = _positive_solver_rec(A, x, b, n); cBYE(status);
BYE:
for (int i = 0; i < n; i++) {
A[i] -= i;
}
return status;
}
int full_posdef_positive_solver(
double ** A,
double * x,
double * b,
int n
)
{
int status = 0;
// copies of A and b in order to preserve input.
double ** A_copy = NULL;
double * b_copy = NULL;
status = alloc_symm_matrix(&A_copy, n); cBYE(status);
b_copy = malloc(n * sizeof(double)); return_if_malloc_failed(b_copy);
for ( int i = 0; i < n; i++ ) {
for ( int j = 0; j < n-i; j++ ) {
A_copy[i][j] = A[i][j + i];
}
b_copy[i] = b[i];
}
status = _positive_solver_rec(A_copy, x, b_copy, n);
BYE:
free_matrix(A_copy, n);
free_if_non_null(b_copy);
return status;
}
int positive_solver(
double ** A,
double * x,
double * b,
int n
)
{
int status = 0;
double ** AtA = NULL;
double * Atb = NULL;
status = alloc_symm_matrix(&AtA, n); cBYE(status);
Atb = malloc(n * sizeof(double)); return_if_malloc_failed(Atb);
transpose_and_multiply(A, AtA, n);
transpose_and_multiply_matrix_vector(A, b, n, Atb);
status = posdef_positive_solver_fast(AtA, x, Atb, n); cBYE(status);
BYE:
free_matrix(AtA, n);
free_if_non_null(Atb);
return status;
}
|
ordered_dependences.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7
#include "callback.h"
#include <omp.h>
int main() {
int a[10][10];
int i, j;
#pragma omp parallel num_threads(2)
#pragma omp for ordered(2)
for (i = 0; i < 2; i++)
for (j = 0; j < 2; j++) {
a[i][j] = i + j + 1;
printf("%d, %d\n", i, j);
#pragma omp ordered depend(sink : i - 1, j) depend(sink : i, j - 1)
if (i > 0 && j > 0)
a[i][j] = a[i - 1][j] + a[i][j - 1] + 1;
printf("%d, %d\n", i, j);
#pragma omp ordered depend(source)
}
return 0;
}
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER:[0-9]+]]: ompt_event_loop_begin:
// CHECK-SAME: parallel_id={{[0-9]+}}, parent_task_id=[[ITASK:[0-9]+]],
// CHECK: {{^}}[[MASTER]]: ompt_event_dependences: task_id=[[ITASK]],
// CHECK-SAME: deps=[(0, ompt_dependence_type_source), (0,
// CHECK-SAME: ompt_dependence_type_source)], ndeps=2
// CHECK: {{^}}[[MASTER]]: ompt_event_dependences: task_id=[[ITASK]],
// CHECK-SAME: deps=[(0, ompt_dependence_type_sink), (0,
// CHECK-SAME: ompt_dependence_type_sink)], ndeps=2
// CHECK: {{^}}[[MASTER]]: ompt_event_dependences: task_id=[[ITASK]],
// CHECK-SAME: deps=[(0, ompt_dependence_type_source), (1,
// CHECK-SAME: ompt_dependence_type_source)], ndeps=2
// CHECK: {{^}}[[WORKER:[0-9]+]]: ompt_event_loop_begin:
// CHECK-SAME: parallel_id={{[0-9]+}}, parent_task_id=[[ITASK:[0-9]+]],
// CHECK: {{^}}[[WORKER]]: ompt_event_dependences: task_id=[[ITASK]],
// CHECK-SAME: deps=[(0, ompt_dependence_type_sink), (0,
// CHECK-SAME: ompt_dependence_type_sink)], ndeps=2
// CHECK: {{^}}[[WORKER]]: ompt_event_dependences: task_id=[[ITASK]],
// CHECK-SAME: deps=[(1, ompt_dependence_type_source), (0,
// CHECK-SAME: ompt_dependence_type_source)], ndeps=2
// either can be first for last iteration
// CHECK-DAG: [[ITASK]]{{.*}}deps=[(0{{.*}}sink), (1,{{.*}}sink)]
// CHECK-DAG: [[ITASK]]{{.*}}deps=[(1{{.*}}sink), (0,{{.*}}sink)]
// CHECK: {{^}}[[WORKER]]: ompt_event_dependences: task_id=[[ITASK]],
// CHECK-SAME: deps=[(1, ompt_dependence_type_source), (1,
// CHECK-SAME: ompt_dependence_type_source)], ndeps=2
|
genzipf.c | //==================================================== file = genzipf.c =====
//= Program to generate Zipf (power law) distributed random variables =
//===========================================================================
//= Notes: 1) Writes to a user specified output file =
//= 2) Generates user specified number of values =
//= 3) Run times is same as an empirical distribution generator =
//= 4) Implements p(i) = C/i^alpha for i = 1 to N where C is the =
//= normalization constant (i.e., sum of p(i) = 1). =
//=-------------------------------------------------------------------------=
//= Example user input: =
//= =
//= ---------------------------------------- genzipf.c ----- =
//= - Program to generate Zipf random variables - =
//= -------------------------------------------------------- =
//= Output file name ===================================> output.dat =
//= Random number seed =================================> 1 =
//= Alpha vlaue ========================================> 1.0 =
//= N value ============================================> 1000 =
//= Number of values to generate =======================> 5 =
//= -------------------------------------------------------- =
//= - Generating samples to file - =
//= -------------------------------------------------------- =
//= -------------------------------------------------------- =
//= - Done! =
//= -------------------------------------------------------- =
//=-------------------------------------------------------------------------=
//= Example output file ("output.dat" for above): =
//= =
//= 1 =
//= 1 =
//= 161 =
//= 17 =
//= 30 =
//=-------------------------------------------------------------------------=
//= Build: bcc32 genzipf.c =
//=-------------------------------------------------------------------------=
//= Execute: genzipf =
//=-------------------------------------------------------------------------=
//= Author: Kenneth J. Christensen =
//= University of South Florida =
//= WWW: http://www.csee.usf.edu/~christen =
//= Email: christen@csee.usf.edu =
//=-------------------------------------------------------------------------=
//= History: KJC (11/16/03) - Genesis (from genexp.c) =
//===========================================================================
//----- Include files -------------------------------------------------------
#include <assert.h> // Needed for assert() macro
#include <stdio.h> // Needed for printf()
#include <stdlib.h> // Needed for exit() and ato*()
#include <math.h> // Needed for pow()
//----- Constants -----------------------------------------------------------
#define FALSE 0 // Boolean false
#define TRUE 1 // Boolean true
//----- Function prototypes -------------------------------------------------
int zipf(double alpha, int n); // Returns a Zipf random variable
double rand_val(int seed); // Jain's RNG
//===== Main program ========================================================
void main(void)
{
FILE *fp; // File pointer to output file
char file_name[256]; // Output file name string
char temp_string[256]; // Temporary string variable
double alpha; // Alpha parameter
double n; // N parameter
int num_values; // Number of values
int zipf_rv; // Zipf random variable
int i; // Loop counter
// Output banner
printf("---------------------------------------- genzipf.c ----- \n");
printf("- Program to generate Zipf random variables - \n");
printf("-------------------------------------------------------- \n");
// Prompt for output filename and then create/open the file
printf("Output file name ===================================> ");
scanf("%s", file_name);
fp = fopen(file_name, "w");
if (fp == NULL)
{
printf("ERROR in creating output file (%s) \n", file_name);
exit(1);
}
// Prompt for random number seed and then use it
printf("Random number seed (greater than 0) ================> ");
scanf("%s", temp_string);
rand_val((int) atoi(temp_string));
// Prompt for alpha value
printf("Alpha value ========================================> ");
scanf("%s", temp_string);
alpha = atof(temp_string);
// Prompt for N value
printf("N value ============================================> ");
scanf("%s", temp_string);
n = atoi(temp_string);
// Prompt for number of values to generate
printf("Number of values to generate =======================> ");
scanf("%s", temp_string);
num_values = atoi(temp_string);
// Output "generating" message
printf("-------------------------------------------------------- \n");
printf("- Generating samples to file - \n");
printf("-------------------------------------------------------- \n");
// Generate and output zipf random variables
for (i=0; i<num_values; i++)
{
zipf_rv = zipf(alpha, n);
fprintf(fp, "%d \n", zipf_rv);
}
// Output "done" message and close the output file
printf("-------------------------------------------------------- \n");
printf("- Done! \n");
printf("-------------------------------------------------------- \n");
fclose(fp);
}
//===========================================================================
//= Function to generate Zipf (power law) distributed random variables =
//= - Input: alpha and N =
//= - Output: Returns with Zipf distributed random variable =
//===========================================================================
int zipf(double alpha, int n, unsigned int *seedp)
{
static int first = TRUE; // Static first time flag
static double c = 0; // Normalization constant
double z; // Uniform random number (0 < z < 1)
double sum_prob; // Sum of probabilities
double zipf_value; // Computed exponential value to be returned
int i; // Loop counter
// Compute normalization constant on first call only
if (first == TRUE)
{
for (i=1; i<=n; i++)
c = c + (1.0 / pow((double) i, alpha));
c = 1.0 / c;
first = FALSE;
}
// Pull a uniform random number (0 < z < 1)
do
{
z = rand_r(seedp)*(1.0/RAND_MAX);
}
while ((z == 0) || (z == 1));
static std::vector<double> oopia;
#pragma omp critical
if(oopia.size()!=n){
oopia.resize(n);
for(int i=0;i<n;i++)
oopia[i]=1.0/pow((double) i, alpha)
}
// Map z to the value
sum_prob = 0;
for (i=1; i<=n; i++)
{
sum_prob = sum_prob + c*oopia[i-1];
if (sum_prob >= z)
{
zipf_value = i;
break;
}
}
// Assert that zipf_value is between 1 and N
assert((zipf_value >=1) && (zipf_value <= n));
return(zipf_value);
}
//=========================================================================
//= Multiplicative LCG for generating uniform(0.0, 1.0) random numbers =
//= - x_n = 7^5*x_(n-1)mod(2^31 - 1) =
//= - With x seeded to 1 the 10000th x value should be 1043618065 =
//= - From R. Jain, "The Art of Computer Systems Performance Analysis," =
//= John Wiley & Sons, 1991. (Page 443, Figure 26.2) =
//=========================================================================
double rand_val(int seed)
{
const long a = 16807; // Multiplier
const long m = 2147483647; // Modulus
const long q = 127773; // m div a
const long r = 2836; // m mod a
static long x; // Random int value
long x_div_q; // x divided by q
long x_mod_q; // x modulo q
long x_new; // New x value
// Set the seed if argument is non-zero and then return zero
if (seed > 0)
{
x = seed;
return(0.0);
}
// RNG using integer arithmetic
x_div_q = x / q;
x_mod_q = x % q;
x_new = (a * x_mod_q) - (r * x_div_q);
if (x_new > 0)
x = x_new;
else
x = x_new + m;
// Return a random value between 0.0 and 1.0
return((double) x / m);
}
|
GB_binop__bshift_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bshift_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__bshift_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__bshift_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__bshift_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bshift_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__bshift_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_int64)
// C=scalar+B GB (_bind1st__bshift_int64)
// C=scalar+B' GB (_bind1st_tran__bshift_int64)
// C=A+scalar GB (_bind2nd__bshift_int64)
// C=A'+scalar GB (_bind2nd_tran__bshift_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_bitshift_int64 (aij, bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_bitshift_int64 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_INT64 || GxB_NO_BSHIFT_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bshift_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bshift_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bshift_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bshift_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bshift_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bshift_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bshift_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bshift_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bshift_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_bitshift_int64 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bshift_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_bitshift_int64 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_int64 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__bshift_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_int64 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__bshift_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImages() compares one or more pixel channels of an image to a
% reconstructed image and returns the difference image.
%
% The format of the CompareImages method is:
%
% Image *CompareImages(const Image *image,const Image *reconstruct_image,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
register ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) != 0)
channels++;
}
return(channels == 0 ? (size_t) 1 : channels);
}
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
const char
*artifact;
double
fuzz;
Image
*clone_image,
*difference_image,
*highlight_image;
MagickBooleanType
status;
PixelInfo
highlight,
lowlight,
masklight;
RectangleInfo
geometry;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageDistortion(image,reconstruct_image,metric,distortion,
exception);
if (status == MagickFalse)
return((Image *) NULL);
columns=MagickMax(image->columns,reconstruct_image->columns);
rows=MagickMax(image->rows,reconstruct_image->rows);
SetGeometry(image,&geometry);
geometry.width=columns;
geometry.height=rows;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception);
difference_image=ExtentImage(clone_image,&geometry,exception);
clone_image=DestroyImage(clone_image);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception);
highlight_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
status=SetImageStorageClass(highlight_image,DirectClass,exception);
if (status == MagickFalse)
{
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception);
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception);
(void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception);
artifact=GetImageArtifact(image,"compare:highlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception);
(void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception);
artifact=GetImageArtifact(image,"compare:lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception);
(void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception);
artifact=GetImageArtifact(image,"compare:masklight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception);
/*
Generate difference image.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,highlight_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p,
*magick_restrict q;
register Quantum
*magick_restrict r;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) ||
(r == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickStatusType
difference;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
SetPixelViaPixelInfo(highlight_image,&masklight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
if ((distance*distance) > fuzz)
{
difference=MagickTrue;
break;
}
}
if (difference == MagickFalse)
SetPixelViaPixelInfo(highlight_image,&lowlight,r);
else
SetPixelViaPixelInfo(highlight_image,&highlight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,highlight_image,image->compose,
MagickTrue,0,0,exception);
(void) SetImageAlphaChannel(difference_image,OffAlphaChannel,exception);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortion() compares one or more pixel channels of an image to a
% reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageDistortion method is:
%
% MagickBooleanType GetImageDistortion(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
fuzz;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickBooleanType
difference;
register ssize_t
i;
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
if ((distance*distance) > fuzz)
{
channel_distortion[i]++;
difference=MagickTrue;
}
}
if (difference != MagickFalse)
channel_distortion[CompositePixelChannel]++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) p[i]-
GetPixelChannel(reconstruct_image,channel,q));
else
distance=QuantumScale*fabs(Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q));
channel_distortion[i]+=distance;
channel_distortion[CompositePixelChannel]+=distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
double
area,
maximum_error,
mean_error;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=fabs((double) p[i]-
GetPixelChannel(reconstruct_image,channel,q));
else
distance=fabs(Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q));
distortion[i]+=distance;
distortion[CompositePixelChannel]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=distortion[CompositePixelChannel]/area;
image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=GetImageChannels(image);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,double *distortion,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
double
area;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageStatistics(image,exception);
reconstruct_statistics=GetImageStatistics(reconstruct_image,exception);
if ((image_statistics == (ChannelStatistics *) NULL) ||
(reconstruct_statistics == (ChannelStatistics *) NULL))
{
if (image_statistics != (ChannelStatistics *) NULL)
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
if (reconstruct_statistics != (ChannelStatistics *) NULL)
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
area=PerceptibleReciprocal(area);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*(image->alpha_trait != UndefinedPixelTrait ?
GetPixelAlpha(image,p) : OpaqueAlpha);
Da=QuantumScale*(reconstruct_image->alpha_trait != UndefinedPixelTrait ?
GetPixelAlpha(reconstruct_image,q) : OpaqueAlpha);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
{
distortion[i]+=area*QuantumScale*(p[i]-
image_statistics[channel].mean)*(GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
else
{
distortion[i]+=area*QuantumScale*(Sa*p[i]-
image_statistics[channel].mean)*(Da*GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SimilarityImageTag,progress++,rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
distortion[CompositePixelChannel]=0.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma;
PixelChannel channel = GetPixelChannelChannel(image,i);
gamma=image_statistics[channel].standard_deviation*
reconstruct_statistics[channel].standard_deviation;
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
distortion[CompositePixelChannel]+=distortion[i]*distortion[i];
}
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/
GetImageChannels(image));
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) p[i]-
GetPixelChannel(reconstruct_image,channel,q));
else
distance=QuantumScale*fabs(Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q));
if (distance > channel_distortion[i])
channel_distortion[i]=distance;
if (distance > channel_distortion[CompositePixelChannel])
channel_distortion[CompositePixelChannel]=distance;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
if (channel_distortion[j] > distortion[j])
distortion[j]=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
if (fabs(distortion[i]) < MagickEpsilon)
distortion[i]=INFINITY;
else
distortion[i]=20.0*MagickLog10(1.0/sqrt(distortion[i]));
return(status);
}
static MagickBooleanType GetPerceptualHashDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
ChannelPerceptualHash
*channel_phash,
*reconstruct_phash;
const char
*artifact;
MagickBooleanType
normalize;
ssize_t
channel;
/*
Compute perceptual hash in the sRGB colorspace.
*/
channel_phash=GetImagePerceptualHash(image,exception);
if (channel_phash == (ChannelPerceptualHash *) NULL)
return(MagickFalse);
reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception);
if (reconstruct_phash == (ChannelPerceptualHash *) NULL)
{
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
channel_phash);
return(MagickFalse);
}
artifact=GetImageArtifact(image,"phash:normalize");
normalize=(artifact == (const char *) NULL) ||
(IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4)
#endif
for (channel=0; channel < MaxPixelChannels; channel++)
{
double
difference;
register ssize_t
i;
difference=0.0;
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
double
alpha,
beta;
register ssize_t
j;
for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++)
{
alpha=channel_phash[channel].phash[j][i];
beta=reconstruct_phash[channel].phash[j][i];
if (normalize == MagickFalse)
difference+=(beta-alpha)*(beta-alpha);
else
difference=sqrt((beta-alpha)*(beta-alpha)/
channel_phash[0].number_channels);
}
}
distortion[channel]+=difference;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPerceptualHashDistortion)
#endif
distortion[CompositePixelChannel]+=difference;
}
/*
Free resources.
*/
reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
reconstruct_phash);
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash);
return(MagickTrue);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=sqrt(distortion[i]);
return(status);
}
static MagickBooleanType GetStructuralSimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
#define SSIMRadius 5.0
#define SSIMSigma 1.5
#define SSIMBlocksize 8
#define SSIMK1 0.01
#define SSIMK2 0.03
#define SSIML 1.0
CacheView
*image_view,
*reconstruct_view;
char
geometry[MagickPathExtent];
const char
*artifact;
double
c1,
c2,
radius,
sigma;
KernelInfo
*kernel_info;
MagickBooleanType
status;
register ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Compute structural similarity index @
https://en.wikipedia.org/wiki/Structural_similarity.
*/
radius=SSIMRadius;
artifact=GetImageArtifact(image,"compare:ssim-radius");
if (artifact != (const char *) NULL)
radius=StringToDouble(artifact,(char **) NULL);
sigma=SSIMSigma;
artifact=GetImageArtifact(image,"compare:ssim-sigma");
if (artifact != (const char *) NULL)
sigma=StringToDouble(artifact,(char **) NULL);
(void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
c1=pow(SSIMK1*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k1");
if (artifact != (const char *) NULL)
c1=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
c2=pow(SSIMK2*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k2");
if (artifact != (const char *) NULL)
c2=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,reconstruct_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) kernel_info->width/2L),y-
((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,-((ssize_t) kernel_info->width/
2L),y-((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
x_pixel_mu[MaxPixelChannels+1],
x_pixel_sigma_squared[MaxPixelChannels+1],
xy_sigma[MaxPixelChannels+1],
y_pixel_mu[MaxPixelChannels+1],
y_pixel_sigma_squared[MaxPixelChannels+1];
register const Quantum
*magick_restrict reference,
*magick_restrict target;
register double
*k;
ssize_t
v;
(void) ResetMagickMemory(x_pixel_mu,0,sizeof(x_pixel_mu));
(void) ResetMagickMemory(x_pixel_sigma_squared,0,
sizeof(x_pixel_sigma_squared));
(void) ResetMagickMemory(xy_sigma,0,sizeof(xy_sigma));
(void) ResetMagickMemory(x_pixel_sigma_squared,0,
sizeof(y_pixel_sigma_squared));
(void) ResetMagickMemory(y_pixel_mu,0,sizeof(y_pixel_mu));
(void) ResetMagickMemory(y_pixel_sigma_squared,0,
sizeof(y_pixel_sigma_squared));
k=kernel_info->values;
reference=p;
target=q;
for (v=0; v < (ssize_t) kernel_info->height; v++)
{
register ssize_t
u;
for (u=0; u < (ssize_t) kernel_info->width; u++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
x_pixel,
y_pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel=QuantumScale*reference[i];
x_pixel_mu[i]+=(*k)*x_pixel;
x_pixel_sigma_squared[i]+=(*k)*x_pixel*x_pixel;
y_pixel=QuantumScale*
GetPixelChannel(reconstruct_image,channel,target);
y_pixel_mu[i]+=(*k)*y_pixel;
y_pixel_sigma_squared[i]+=(*k)*y_pixel*y_pixel;
xy_sigma[i]+=(*k)*x_pixel*y_pixel;
}
k++;
reference+=GetPixelChannels(image);
target+=GetPixelChannels(reconstruct_image);
}
reference+=GetPixelChannels(image)*columns;
target+=GetPixelChannels(reconstruct_image)*columns;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
ssim,
x_pixel_mu_squared,
x_pixel_sigmas_squared,
xy_mu,
xy_sigmas,
y_pixel_mu_squared,
y_pixel_sigmas_squared;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel_mu_squared=x_pixel_mu[i]*x_pixel_mu[i];
y_pixel_mu_squared=y_pixel_mu[i]*y_pixel_mu[i];
xy_mu=x_pixel_mu[i]*y_pixel_mu[i];
xy_sigmas=xy_sigma[i]-xy_mu;
x_pixel_sigmas_squared=x_pixel_sigma_squared[i]-x_pixel_mu_squared;
y_pixel_sigmas_squared=y_pixel_sigma_squared[i]-y_pixel_mu_squared;
ssim=((2.0*xy_mu+c1)*(2.0*xy_sigmas+c2))/
((x_pixel_mu_squared+y_pixel_mu_squared+c1)*
(x_pixel_sigmas_squared+y_pixel_sigmas_squared+c2));
channel_distortion[i]+=ssim;
channel_distortion[CompositePixelChannel]+=ssim;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetStructuralSimilarityDistortion)
#endif
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]+=channel_distortion[i];
}
image_view=DestroyCacheView(image_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0))
continue;
distortion[i]/=((double) columns*rows);
}
distortion[CompositePixelChannel]/=((double) columns*rows);
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
kernel_info=DestroyKernelInfo(kernel_info);
return(status);
}
static MagickBooleanType GetStructuralDisimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=(1.0-(distortion[i]))/2.0;
return(status);
}
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_distortion,0,length*
sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositePixelChannel];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
(void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(),
*distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortions() compares the pixel channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageDistortions method is:
%
% double *GetImageDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() compare the pixels of two images and returns immediately
% if any pixel is not identical.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(const Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImagesEqual(const Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q));
if (distance >= MagickEpsilon)
break;
}
if (i < (ssize_t) GetPixelChannels(image))
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (x < (ssize_t) columns)
break;
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r M e t r i c %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorMetric() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the SetImageColorMetric method is:
%
% MagickBooleanType SetImageColorMetric(Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorMetric(Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area,
maximum_error,
mean_error,
mean_error_per_pixel;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q));
if (distance >= MagickEpsilon)
{
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
}
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area);
image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale*
mean_error/area);
image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error);
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% const MetricType metric,const double similarity_threshold,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o metric: the metric.
%
% o similarity_threshold: minimum distortion for (sub)image match.
%
% o offset: the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
similarity_image=DestroyImage(similarity_image);
if (status == MagickFalse)
return(0.0);
return(distortion);
}
MagickExport Image *SimilarityImage(const Image *image,const Image *reference,
const MetricType metric,const double similarity_threshold,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=MagickMaximumValue;
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(similarity_image,DirectClass,exception);
if (status == MagickFalse)
{
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel,
exception);
/*
Measure similarity of reference image against image.
*/
status=MagickTrue;
progress=0;
similarity_view=AcquireAuthenticCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
shared(progress,status,similarity_metric) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
break;
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if ((metric == NormalizedCrossCorrelationErrorMetric) ||
(metric == UndefinedErrorMetric))
similarity=1.0-similarity;
if (similarity < *similarity_metric)
{
offset->x=x;
offset->y=y;
*similarity_metric=similarity;
}
if (metric == PerceptualHashErrorMetric)
similarity=MagickMin(0.01*similarity,1.0);
if (GetPixelWriteMask(similarity_image,q) <= (QuantumRange/2))
{
SetPixelBackgoundColor(similarity_image,q);
q+=GetPixelChannels(similarity_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(similarity_traits == UndefinedPixelTrait) ||
((similarity_traits & UpdatePixelTrait) == 0))
continue;
SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange-
QuantumRange*similarity),q);
}
q+=GetPixelChannels(similarity_image);
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
proceed=SetImageProgress(image,SimilarityImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
(void) SetImageAlphaChannel(similarity_image,OffAlphaChannel,exception);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
flush.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(void) {
int a = 0, b = 0;
#pragma omp parallel
{
int me = omp_get_thread_num();
if (me==0) a=1;
if (me==1) b=1;
#pragma omp flush (a,b)
}
printf("%d,%d\n",a,b);
return 0;
}
/***************************************************************/
|
struct.c | // RUN: %libomptarget-compile-generic -fopenmp-extensions
// RUN: %libomptarget-run-generic | %fcheck-generic -strict-whitespace
// Wrong results on amdgpu
// XFAIL: amdgcn-amd-amdhsa
// XFAIL: amdgcn-amd-amdhsa-oldDriver
#include <omp.h>
#include <stdio.h>
#define CHECK_PRESENCE(Var1, Var2, Var3) \
printf(" presence of %s, %s, %s: %d, %d, %d\n", \
#Var1, #Var2, #Var3, \
omp_target_is_present(&(Var1), omp_get_default_device()), \
omp_target_is_present(&(Var2), omp_get_default_device()), \
omp_target_is_present(&(Var3), omp_get_default_device()))
#define CHECK_VALUES(Var1, Var2) \
printf(" values of %s, %s: %d, %d\n", \
#Var1, #Var2, (Var1), (Var2))
int main() {
struct S { int i; int j; } s;
// CHECK: presence of s, s.i, s.j: 0, 0, 0
CHECK_PRESENCE(s, s.i, s.j);
// =======================================================================
// Check that ompx_hold keeps entire struct present.
// -----------------------------------------------------------------------
// CHECK-LABEL: check:{{.*}}
printf("check: ompx_hold only on first member\n");
s.i = 20;
s.j = 30;
#pragma omp target data map(tofrom: s) map(ompx_hold,tofrom: s.i) \
map(tofrom: s.j)
{
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
CHECK_PRESENCE(s, s.i, s.j);
#pragma omp target map(tofrom: s)
{
s.i = 21;
s.j = 31;
}
#pragma omp target exit data map(delete: s, s.i)
// ompx_hold on s.i applies to all of s.
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
// CHECK-NEXT: values of s.i, s.j: 20, 30
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
}
// CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0
// CHECK-NEXT: values of s.i, s.j: 21, 31
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
// -----------------------------------------------------------------------
// CHECK-LABEL: check:{{.*}}
printf("check: ompx_hold only on last member\n");
s.i = 20;
s.j = 30;
#pragma omp target data map(tofrom: s) map(tofrom: s.i) \
map(ompx_hold,tofrom: s.j)
{
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
CHECK_PRESENCE(s, s.i, s.j);
#pragma omp target map(tofrom: s)
{
s.i = 21;
s.j = 31;
}
#pragma omp target exit data map(delete: s, s.i)
// ompx_hold on s.j applies to all of s.
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
// CHECK-NEXT: values of s.i, s.j: 20, 30
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
}
// CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0
// CHECK-NEXT: values of s.i, s.j: 21, 31
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
// -----------------------------------------------------------------------
// CHECK-LABEL: check:{{.*}}
printf("check: ompx_hold only on struct\n");
s.i = 20;
s.j = 30;
#pragma omp target data map(ompx_hold,tofrom: s) map(tofrom: s.i) \
map(tofrom: s.j)
{
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
CHECK_PRESENCE(s, s.i, s.j);
#pragma omp target map(tofrom: s)
{
s.i = 21;
s.j = 31;
}
#pragma omp target exit data map(delete: s, s.i)
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
// CHECK-NEXT: values of s.i, s.j: 20, 30
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
}
// CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0
// CHECK-NEXT: values of s.i, s.j: 21, 31
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
// =======================================================================
// Check that transfer to/from host checks reference count correctly.
// -----------------------------------------------------------------------
// CHECK-LABEL: check:{{.*}}
printf("check: parent DynRefCount=1 is not sufficient for transfer\n");
s.i = 20;
s.j = 30;
#pragma omp target data map(ompx_hold, tofrom: s)
#pragma omp target data map(ompx_hold, tofrom: s)
{
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
CHECK_PRESENCE(s, s.i, s.j);
#pragma omp target map(from: s.i, s.j)
{
s.i = 21;
s.j = 31;
} // No transfer here even though parent's DynRefCount=1.
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
// CHECK-NEXT: values of s.i, s.j: 20, 30
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
#pragma omp target map(to: s.i, s.j)
{ // No transfer here even though parent's DynRefCount=1.
// CHECK-NEXT: values of s.i, s.j: 21, 31
CHECK_VALUES(s.i, s.j);
}
}
// CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0
// CHECK-NEXT: values of s.i, s.j: 21, 31
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
// -----------------------------------------------------------------------
// CHECK-LABEL: check:{{.*}}
printf("check: parent HoldRefCount=1 is not sufficient for transfer\n");
s.i = 20;
s.j = 30;
#pragma omp target data map(tofrom: s)
#pragma omp target data map(tofrom: s)
{
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
CHECK_PRESENCE(s, s.i, s.j);
#pragma omp target map(ompx_hold, from: s.i, s.j)
{
s.i = 21;
s.j = 31;
} // No transfer here even though parent's HoldRefCount=1.
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
// CHECK-NEXT: values of s.i, s.j: 20, 30
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
#pragma omp target map(ompx_hold, to: s.i, s.j)
{ // No transfer here even though parent's HoldRefCount=1.
// CHECK-NEXT: values of s.i, s.j: 21, 31
CHECK_VALUES(s.i, s.j);
}
}
// CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0
// CHECK-NEXT: values of s.i, s.j: 21, 31
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
// -----------------------------------------------------------------------
// CHECK-LABEL: check:{{.*}}
//
// At the beginning of a region, if the parent's TotalRefCount=1, then the
// transfer should happen.
//
// At the end of a region, it also must be true that the reference count being
// decremented is the reference count that is 1.
printf("check: parent TotalRefCount=1 is not sufficient for transfer\n");
s.i = 20;
s.j = 30;
#pragma omp target data map(ompx_hold, tofrom: s)
{
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
CHECK_PRESENCE(s, s.i, s.j);
#pragma omp target map(ompx_hold, tofrom: s.i, s.j)
{
s.i = 21;
s.j = 31;
}
#pragma omp target exit data map(from: s.i, s.j)
// No transfer here even though parent's TotalRefCount=1.
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
// CHECK-NEXT: values of s.i, s.j: 20, 30
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
}
// CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0
// CHECK-NEXT: values of s.i, s.j: 21, 31
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
return 0;
}
|
helloThreads.c | // Compile with
// gcc -fopenmp -o helloT helloThreads.c
#include <omp.h>
#include <stdio.h>
int main() {
#pragma omp parallel
printf("Hello from thread %d, nthreads %d\n", omp_get_thread_num(), omp_get_num_threads());
}
// gcc -fopenmp -o helloThreads helloThreads.c
|
GB_unop__sinh_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__sinh_fp64_fp64
// op(A') function: GB_unop_tran__sinh_fp64_fp64
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = sinh (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = sinh (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = sinh (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SINH || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__sinh_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = sinh (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = sinh (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__sinh_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
segment.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS EEEEE GGGG M M EEEEE N N TTTTT %
% SS E G MM MM E NN N T %
% SSS EEE G GGG M M M EEE N N N T %
% SS E G G M M E N NN T %
% SSSSS EEEEE GGGG M M EEEEE N N T %
% %
% %
% MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means %
% %
% Software Design %
% Cristy %
% April 1993 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Segment segments an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% c-means technique. The scale-space filter analyzes the histograms of
% the three color components of the image and identifies a set of
% classes. The extents of each class is used to coarsely segment the
% image with thresholding. The color associated with each class is
% determined by the mean color of all pixels within the extents of a
% particular class. Finally, any unclassified pixels are assigned to
% the closest class with the fuzzy c-means technique.
%
% The fuzzy c-Means algorithm can be summarized as follows:
%
% o Build a histogram, one for each color component of the image.
%
% o For each histogram, successively apply the scale-space filter and
% build an interval tree of zero crossings in the second derivative
% at each scale. Analyze this scale-space ''fingerprint'' to
% determine which peaks and valleys in the histogram are most
% predominant.
%
% o The fingerprint defines intervals on the axis of the histogram.
% Each interval contains either a minima or a maxima in the original
% signal. If each color component lies within the maxima interval,
% that pixel is considered ''classified'' and is assigned an unique
% class number.
%
% o Any pixel that fails to be classified in the above thresholding
% pass is classified using the fuzzy c-Means technique. It is
% assigned to one of the classes discovered in the histogram analysis
% phase.
%
% The fuzzy c-Means technique attempts to cluster a pixel by finding
% the local minima of the generalized within group sum of squared error
% objective function. A pixel is assigned to the closest class of
% which the fuzzy membership has a maximum value.
%
% Segment is strongly based on software written by Andy Gallo,
% University of Delaware.
%
% The following reference was used in creating this program:
%
% Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation
% Algorithm Based on the Thresholding and the Fuzzy c-Means
% Techniques", Pattern Recognition, Volume 23, Number 9, pages
% 935-952, 1990.
%
%
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#define MaxDimension 3
#define DeltaTau 0.5f
#if defined(FastClassify)
#define WeightingExponent 2.0
#define SegmentPower(ratio) (ratio)
#else
#define WeightingExponent 2.5
#define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0)));
#endif
#define Tau 5.2f
/*
Typedef declarations.
*/
typedef struct _ExtentPacket
{
double
center;
ssize_t
index,
left,
right;
} ExtentPacket;
typedef struct _Cluster
{
struct _Cluster
*next;
ExtentPacket
red,
green,
blue;
ssize_t
count,
id;
} Cluster;
typedef struct _IntervalTree
{
double
tau;
ssize_t
left,
right;
double
mean_stability,
stability;
struct _IntervalTree
*sibling,
*child;
} IntervalTree;
typedef struct _ZeroCrossing
{
double
tau,
histogram[256];
short
crossings[256];
} ZeroCrossing;
/*
Constant declarations.
*/
static const int
Blue = 2,
Green = 1,
Red = 0,
SafeMargin = 3,
TreeLength = 600;
/*
Method prototypes.
*/
static double
OptimalTau(const ssize_t *,const double,const double,const double,
const double,short *);
static ssize_t
DefineRegion(const short *,ExtentPacket *);
static void
InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *),
ScaleSpace(const ssize_t *,const double,double *),
ZeroCrossHistogram(double *,const double,short *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Classify() defines one or more classes. Each pixel is thresholded to
% determine which class it belongs to. If the class is not identified it is
% assigned to the closest class based on the fuzzy c-Means technique.
%
% The format of the Classify method is:
%
% MagickBooleanType Classify(Image *image,short **extrema,
% const double cluster_threshold,
% const double weighting_exponent,
% const MagickBooleanType verbose,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o cluster_threshold: This double represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o weighting_exponent: Specifies the membership weighting exponent.
%
% o verbose: A value greater than zero prints detailed information about
% the identified classes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType Classify(Image *image,short **extrema,
const double cluster_threshold,
const double weighting_exponent,const MagickBooleanType verbose,
ExceptionInfo *exception)
{
#define SegmentImageTag "Segment/Image"
CacheView
*image_view;
Cluster
*cluster,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickOffsetType
progress;
double
*free_squares;
MagickStatusType
status;
register ssize_t
i;
register double
*squares;
size_t
number_clusters;
ssize_t
count,
y;
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) ResetMagickMemory(&red,0,sizeof(red));
(void) ResetMagickMemory(&green,0,sizeof(green));
(void) ResetMagickMemory(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
status=MagickTrue;
count=0;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(double) ScaleQuantumToChar(
GetPixelRed(image,p));
cluster->green.center+=(double) ScaleQuantumToChar(
GetPixelGreen(image,p));
cluster->blue.center+=(double) ScaleQuantumToChar(
GetPixelBlue(image,p));
cluster->count++;
break;
}
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_Classify)
#endif
proceed=SetImageProgress(image,SegmentImageTag,progress++,2*
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
number_clusters=(size_t) count;
if (verbose != MagickFalse)
{
/*
Print cluster statistics.
*/
(void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n");
(void) FormatLocaleFile(stdout,"===================\n\n");
(void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double)
cluster_threshold);
(void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double)
weighting_exponent);
(void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n",
(double) number_clusters);
/*
Print the total number of points per cluster.
*/
(void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n");
(void) FormatLocaleFile(stdout,"=============================\n\n");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
(void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double)
cluster->id,(double) cluster->count);
/*
Print the cluster extents.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,
"%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double)
cluster->red.left,(double) cluster->red.right,(double)
cluster->green.left,(double) cluster->green.right,(double)
cluster->blue.left,(double) cluster->blue.right);
}
/*
Print the cluster center values.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"=====================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,"%g %g %g\n",(double)
cluster->red.center,(double) cluster->green.center,(double)
cluster->blue.center);
}
(void) FormatLocaleFile(stdout,"\n");
}
if (number_clusters > 256)
ThrowBinaryException(ImageError,"TooManyClusters",image->filename);
/*
Speed up distance calculations.
*/
squares=(double *) AcquireQuantumMemory(513UL,sizeof(*squares));
if (squares == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
squares+=255;
for (i=(-255); i <= 255; i++)
squares[i]=(double) i*(double) i;
/*
Allocate image colormap.
*/
if (AcquireImageColormap(image,number_clusters,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
i=0;
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
image->colormap[i].red=(double) ScaleCharToQuantum((unsigned char)
(cluster->red.center+0.5));
image->colormap[i].green=(double) ScaleCharToQuantum((unsigned char)
(cluster->green.center+0.5));
image->colormap[i].blue=(double) ScaleCharToQuantum((unsigned char)
(cluster->blue.center+0.5));
i++;
}
/*
Do course grain classes.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Cluster
*cluster;
register const PixelInfo
*restrict p;
register ssize_t
x;
register Quantum
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,0,q);
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Classify this pixel.
*/
SetPixelIndex(image,(Quantum) cluster->id,q);
break;
}
}
if (cluster == (Cluster *) NULL)
{
double
distance_squared,
local_minima,
numerator,
ratio,
sum;
register ssize_t
j,
k;
/*
Compute fuzzy membership.
*/
local_minima=0.0;
for (j=0; j < (ssize_t) image->colors; j++)
{
sum=0.0;
p=image->colormap+j;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(
GetPixelRed(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[(ssize_t)
ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[(ssize_t)
ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->blue))];
numerator=distance_squared;
for (k=0; k < (ssize_t) image->colors; k++)
{
p=image->colormap+k;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(
GetPixelRed(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[
(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[
(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->blue))];
ratio=numerator/distance_squared;
sum+=SegmentPower(ratio);
}
if ((sum != 0.0) && ((1.0/sum) > local_minima))
{
/*
Classify this pixel.
*/
local_minima=1.0/sum;
SetPixelIndex(image,(Quantum) j,q);
}
}
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_Classify)
#endif
proceed=SetImageProgress(image,SegmentImageTag,progress++,
2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status&=SyncImage(image,exception);
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
squares-=255;
free_squares=squares;
free_squares=(double *) RelinquishMagickMemory(free_squares);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C r o s s i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCrossings() guarantees that an even number of zero crossings
% always lie between two crossings.
%
% The format of the ConsolidateCrossings method is:
%
% ConsolidateCrossings(ZeroCrossing *zero_crossing,
% const size_t number_crossings)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static inline ssize_t MagickAbsoluteValue(const ssize_t x)
{
if (x < 0)
return(-x);
return(x);
}
static inline ssize_t MagickMax(const ssize_t x,const ssize_t y)
{
if (x > y)
return(x);
return(y);
}
static inline ssize_t MagickMin(const ssize_t x,const ssize_t y)
{
if (x < y)
return(x);
return(y);
}
static void ConsolidateCrossings(ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
register ssize_t
i,
j,
k,
l;
ssize_t
center,
correct,
count,
left,
right;
/*
Consolidate zero crossings.
*/
for (i=(ssize_t) number_crossings-1; i >= 0; i--)
for (j=0; j <= 255; j++)
{
if (zero_crossing[i].crossings[j] == 0)
continue;
/*
Find the entry that is closest to j and still preserves the
property that there are an even number of crossings between
intervals.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i+1].crossings[k] != 0)
break;
left=MagickMax(k,0);
center=j;
for (k=j+1; k < 255; k++)
if (zero_crossing[i+1].crossings[k] != 0)
break;
right=MagickMin(k,255);
/*
K is the zero crossing just left of j.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i].crossings[k] != 0)
break;
if (k < 0)
k=0;
/*
Check center for an even number of crossings between k and j.
*/
correct=(-1);
if (zero_crossing[i+1].crossings[j] != 0)
{
count=0;
for (l=k+1; l < center; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (center != k))
correct=center;
}
/*
Check left for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < left; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (left != k))
correct=left;
}
/*
Check right for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < right; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (right != k))
correct=right;
}
l=(ssize_t) zero_crossing[i].crossings[j];
zero_crossing[i].crossings[j]=0;
if (correct != -1)
zero_crossing[i].crossings[correct]=(short) l;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineRegion() defines the left and right boundaries of a peak region.
%
% The format of the DefineRegion method is:
%
% ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
%
% A description of each parameter follows.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o extents: This pointer to an ExtentPacket represent the extends
% of a particular peak or valley of a color component.
%
*/
static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
{
/*
Initialize to default values.
*/
extents->left=0;
extents->center=0.0;
extents->right=255;
/*
Find the left side (maxima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] > 0)
break;
if (extents->index > 255)
return(MagickFalse); /* no left side - no region exists */
extents->left=extents->index;
/*
Find the right side (minima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] < 0)
break;
extents->right=extents->index-1;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e r i v a t i v e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DerivativeHistogram() determines the derivative of the histogram using
% central differencing.
%
% The format of the DerivativeHistogram method is:
%
% DerivativeHistogram(const double *histogram,
% double *derivative)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of doubles representing the number
% of pixels for each intensity of a particular color component.
%
% o derivative: This array of doubles is initialized by
% DerivativeHistogram to the derivative of the histogram using central
% differencing.
%
*/
static void DerivativeHistogram(const double *histogram,
double *derivative)
{
register ssize_t
i,
n;
/*
Compute endpoints using second order polynomial interpolation.
*/
n=255;
derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]);
derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]);
/*
Compute derivative using central differencing.
*/
for (i=1; i < n; i++)
derivative[i]=(histogram[i+1]-histogram[i-1])/2.0;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e D y n a m i c T h r e s h o l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDynamicThreshold() returns the dynamic threshold for an image.
%
% The format of the GetImageDynamicThreshold method is:
%
% MagickBooleanType GetImageDynamicThreshold(const Image *image,
% const double cluster_threshold,const double smooth_threshold,
% PixelInfo *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cluster_threshold: This double represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o pixel: return the dynamic threshold here.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image,
const double cluster_threshold,const double smooth_threshold,
PixelInfo *pixel,ExceptionInfo *exception)
{
Cluster
*background,
*cluster,
*object,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickBooleanType
proceed;
double
threshold;
register const Quantum
*p;
register ssize_t
i,
x;
short
*extrema[MaxDimension];
ssize_t
count,
*histogram[MaxDimension],
y;
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetPixelInfo(image,pixel);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
}
/*
Initialize histogram.
*/
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]);
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) ResetMagickMemory(&red,0,sizeof(red));
(void) ResetMagickMemory(&green,0,sizeof(green));
(void) ResetMagickMemory(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
count=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(double) ScaleQuantumToChar(
GetPixelRed(image,p));
cluster->green.center+=(double) ScaleQuantumToChar(
GetPixelGreen(image,p));
cluster->blue.center+=(double) ScaleQuantumToChar(
GetPixelBlue(image,p));
cluster->count++;
break;
}
p+=GetPixelChannels(image);
}
proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y,
2*image->rows);
if (proceed == MagickFalse)
break;
}
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
object=head;
background=head;
if (count > 1)
{
object=head->next;
for (cluster=object; cluster->next != (Cluster *) NULL; )
{
if (cluster->count < object->count)
object=cluster;
cluster=cluster->next;
}
background=head->next;
for (cluster=background; cluster->next != (Cluster *) NULL; )
{
if (cluster->count > background->count)
background=cluster;
cluster=cluster->next;
}
}
if (background != (Cluster *) NULL)
{
threshold=(background->red.center+object->red.center)/2.0;
pixel->red=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->green.center+object->green.center)/2.0;
pixel->green=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->blue.center+object->blue.center)/2.0;
pixel->blue=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
}
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeHistogram() computes the histogram for an image.
%
% The format of the InitializeHistogram method is:
%
% InitializeHistogram(const Image *image,ssize_t **histogram)
%
% A description of each parameter follows.
%
% o image: Specifies a pointer to an Image structure; returned from
% ReadImage.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void InitializeHistogram(const Image *image,ssize_t **histogram,
ExceptionInfo *exception)
{
register const Quantum
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Initialize histogram.
*/
for (i=0; i <= 255; i++)
{
histogram[Red][i]=0;
histogram[Green][i]=0;
histogram[Blue][i]=0;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(image,p))]++;
histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p))]++;
histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p))]++;
p+=GetPixelChannels(image);
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e I n t e r v a l T r e e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeIntervalTree() initializes an interval tree from the lists of
% zero crossings.
%
% The format of the InitializeIntervalTree method is:
%
% InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes,
% IntervalTree *node)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void InitializeList(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
list[(*number_nodes)++]=node;
InitializeList(list,number_nodes,node->sibling);
InitializeList(list,number_nodes,node->child);
}
static void MeanStability(IntervalTree *node)
{
register IntervalTree
*child;
if (node == (IntervalTree *) NULL)
return;
node->mean_stability=0.0;
child=node->child;
if (child != (IntervalTree *) NULL)
{
register ssize_t
count;
register double
sum;
sum=0.0;
count=0;
for ( ; child != (IntervalTree *) NULL; child=child->sibling)
{
sum+=child->stability;
count++;
}
node->mean_stability=sum/(double) count;
}
MeanStability(node->sibling);
MeanStability(node->child);
}
static void Stability(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
node->stability=0.0;
else
node->stability=node->tau-(node->child)->tau;
Stability(node->sibling);
Stability(node->child);
}
static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
IntervalTree
*head,
**list,
*node,
*root;
register ssize_t
i;
ssize_t
j,
k,
left,
number_nodes;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return((IntervalTree *) NULL);
/*
The root is the entire histogram.
*/
root=(IntervalTree *) AcquireMagickMemory(sizeof(*root));
root->child=(IntervalTree *) NULL;
root->sibling=(IntervalTree *) NULL;
root->tau=0.0;
root->left=0;
root->right=255;
for (i=(-1); i < (ssize_t) number_crossings; i++)
{
/*
Initialize list with all nodes with no children.
*/
number_nodes=0;
InitializeList(list,&number_nodes,root);
/*
Split list.
*/
for (j=0; j < number_nodes; j++)
{
head=list[j];
left=head->left;
node=head;
for (k=head->left+1; k < head->right; k++)
{
if (zero_crossing[i+1].crossings[k] != 0)
{
if (node == head)
{
node->child=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->child));
node=node->child;
}
else
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=k;
left=k;
}
}
if (left != head->left)
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=head->right;
}
}
}
/*
Determine the stability: difference between a nodes tau and its child.
*/
Stability(root->child);
MeanStability(root->child);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(root);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p t i m a l T a u %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OptimalTau() finds the optimal tau for each band of the histogram.
%
% The format of the OptimalTau method is:
%
% double OptimalTau(const ssize_t *histogram,const double max_tau,
% const double min_tau,const double delta_tau,
% const double smooth_threshold,short *extrema)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
*/
static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->stability >= node->mean_stability)
{
list[(*number_nodes)++]=node;
ActiveNodes(list,number_nodes,node->sibling);
}
else
{
ActiveNodes(list,number_nodes,node->sibling);
ActiveNodes(list,number_nodes,node->child);
}
}
static void FreeNodes(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
FreeNodes(node->sibling);
FreeNodes(node->child);
node=(IntervalTree *) RelinquishMagickMemory(node);
}
static double OptimalTau(const ssize_t *histogram,const double max_tau,
const double min_tau,const double delta_tau,const double smooth_threshold,
short *extrema)
{
IntervalTree
**list,
*node,
*root;
MagickBooleanType
peak;
double
average_tau,
*derivative,
*second_derivative,
tau,
value;
register ssize_t
i,
x;
size_t
count,
number_crossings;
ssize_t
index,
j,
k,
number_nodes;
ZeroCrossing
*zero_crossing;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return(0.0);
/*
Allocate zero crossing list.
*/
count=(size_t) ((max_tau-min_tau)/delta_tau)+2;
zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count,
sizeof(*zero_crossing));
if (zero_crossing == (ZeroCrossing *) NULL)
return(0.0);
for (i=0; i < (ssize_t) count; i++)
zero_crossing[i].tau=(-1.0);
/*
Initialize zero crossing list.
*/
derivative=(double *) AcquireQuantumMemory(256,sizeof(*derivative));
second_derivative=(double *) AcquireQuantumMemory(256,
sizeof(*second_derivative));
if ((derivative == (double *) NULL) ||
(second_derivative == (double *) NULL))
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDerivatives");
i=0;
for (tau=max_tau; tau >= min_tau; tau-=delta_tau)
{
zero_crossing[i].tau=tau;
ScaleSpace(histogram,tau,zero_crossing[i].histogram);
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
i++;
}
/*
Add an entry for the original histogram.
*/
zero_crossing[i].tau=0.0;
for (j=0; j <= 255; j++)
zero_crossing[i].histogram[j]=(double) histogram[j];
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
number_crossings=(size_t) i;
derivative=(double *) RelinquishMagickMemory(derivative);
second_derivative=(double *)
RelinquishMagickMemory(second_derivative);
/*
Ensure the scale-space fingerprints form lines in scale-space, not loops.
*/
ConsolidateCrossings(zero_crossing,number_crossings);
/*
Force endpoints to be included in the interval.
*/
for (i=0; i <= (ssize_t) number_crossings; i++)
{
for (j=0; j < 255; j++)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]);
for (j=255; j > 0; j--)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]);
}
/*
Initialize interval tree.
*/
root=InitializeIntervalTree(zero_crossing,number_crossings);
if (root == (IntervalTree *) NULL)
return(0.0);
/*
Find active nodes: stability is greater (or equal) to the mean stability of
its children.
*/
number_nodes=0;
ActiveNodes(list,&number_nodes,root->child);
/*
Initialize extrema.
*/
for (i=0; i <= 255; i++)
extrema[i]=0;
for (i=0; i < number_nodes; i++)
{
/*
Find this tau in zero crossings list.
*/
k=0;
node=list[i];
for (j=0; j <= (ssize_t) number_crossings; j++)
if (zero_crossing[j].tau == node->tau)
k=j;
/*
Find the value of the peak.
*/
peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue :
MagickFalse;
index=node->left;
value=zero_crossing[k].histogram[index];
for (x=node->left; x <= node->right; x++)
{
if (peak != MagickFalse)
{
if (zero_crossing[k].histogram[x] > value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
else
if (zero_crossing[k].histogram[x] < value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
for (x=node->left; x <= node->right; x++)
{
if (index == 0)
index=256;
if (peak != MagickFalse)
extrema[x]=(short) index;
else
extrema[x]=(short) (-index);
}
}
/*
Determine the average tau.
*/
average_tau=0.0;
for (i=0; i < number_nodes; i++)
average_tau+=list[i]->tau;
average_tau/=(double) number_nodes;
/*
Relinquish resources.
*/
FreeNodes(root);
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(average_tau);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S c a l e S p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleSpace() performs a scale-space filter on the 1D histogram.
%
% The format of the ScaleSpace method is:
%
% ScaleSpace(const ssize_t *histogram,const double tau,
% double *scale_histogram)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of doubles representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void ScaleSpace(const ssize_t *histogram,const double tau,
double *scale_histogram)
{
double
alpha,
beta,
*gamma,
sum;
register ssize_t
u,
x;
gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma));
if (gamma == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateGammaMap");
alpha=PerceptibleReciprocal(tau*sqrt(2.0*MagickPI));
beta=(-1.0*PerceptibleReciprocal(2.0*tau*tau));
for (x=0; x <= 255; x++)
gamma[x]=0.0;
for (x=0; x <= 255; x++)
{
gamma[x]=exp((double) beta*x*x);
if (gamma[x] < MagickEpsilon)
break;
}
for (x=0; x <= 255; x++)
{
sum=0.0;
for (u=0; u <= 255; u++)
sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)];
scale_histogram[x]=alpha*sum;
}
gamma=(double *) RelinquishMagickMemory(gamma);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e g m e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SegmentImage() segment an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% C-means technique.
%
% The format of the SegmentImage method is:
%
% MagickBooleanType SegmentImage(Image *image,
% const ColorspaceType colorspace,const MagickBooleanType verbose,
% const double cluster_threshold,const double smooth_threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o colorspace: Indicate the colorspace.
%
% o verbose: Set to MagickTrue to print detailed information about the
% identified classes.
%
% o cluster_threshold: This represents the minimum number of pixels
% contained in a hexahedra before it can be considered valid (expressed
% as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SegmentImage(Image *image,
const ColorspaceType colorspace,const MagickBooleanType verbose,
const double cluster_threshold,const double smooth_threshold,
ExceptionInfo *exception)
{
ColorspaceType
previous_colorspace;
MagickBooleanType
status;
register ssize_t
i;
short
*extrema[MaxDimension];
ssize_t
*histogram[MaxDimension];
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename)
}
}
/*
Initialize histogram.
*/
previous_colorspace=image->colorspace;
(void) TransformImageColorspace(image,colorspace,exception);
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]);
/*
Classify using the fuzzy c-Means technique.
*/
status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose,
exception);
(void) TransformImageColorspace(image,previous_colorspace,exception);
/*
Relinquish resources.
*/
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Z e r o C r o s s H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroCrossHistogram() find the zero crossings in a histogram and marks
% directions as: 1 is negative to positive; 0 is zero crossing; and -1
% is positive to negative.
%
% The format of the ZeroCrossHistogram method is:
%
% ZeroCrossHistogram(double *second_derivative,
% const double smooth_threshold,short *crossings)
%
% A description of each parameter follows.
%
% o second_derivative: Specifies an array of doubles representing the
% second derivative of the histogram of a particular color component.
%
% o crossings: This array of integers is initialized with
% -1, 0, or 1 representing the slope of the first derivative of the
% of a particular color component.
%
*/
static void ZeroCrossHistogram(double *second_derivative,
const double smooth_threshold,short *crossings)
{
register ssize_t
i;
ssize_t
parity;
/*
Merge low numbers to zero to help prevent noise.
*/
for (i=0; i <= 255; i++)
if ((second_derivative[i] < smooth_threshold) &&
(second_derivative[i] >= -smooth_threshold))
second_derivative[i]=0.0;
/*
Mark zero crossings.
*/
parity=0;
for (i=0; i <= 255; i++)
{
crossings[i]=0;
if (second_derivative[i] < 0.0)
{
if (parity > 0)
crossings[i]=(-1);
parity=1;
}
else
if (second_derivative[i] > 0.0)
{
if (parity < 0)
crossings[i]=1;
parity=(-1);
}
}
}
|
prepress.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR EEEEE PPPP RRRR EEEEE SSSSS SSSSS %
% P P R R E P P R R E SS SS %
% PPPP RRRR EEE PPPP RRRR EEE SSS SSS %
% P R R E P R R E SS SS %
% P R R EEEEE P R R EEEEE SSSSS SSSSS %
% %
% %
% MagickCore Prepress Methods %
% %
% Software Design %
% Cristy %
% October 2001 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/prepress.h"
#include "MagickCore/resource_.h"
#include "MagickCore/registry.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T o t a l I n k D e n s i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageTotalInkDensity() returns the total ink density for a CMYK image.
% Total Ink Density (TID) is determined by adding the CMYK values in the
% darkest shadow area in an image.
%
% The format of the GetImageTotalInkDensity method is:
%
% double GetImageTotalInkDensity(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double GetImageTotalInkDensity(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
total_ink_density;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ColorSeparatedImageRequired","`%s'",image->filename);
return(0.0);
}
status=MagickTrue;
total_ink_density=0.0;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
density;
register const Quantum
*p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
density=(double) GetPixelRed(image,p)+GetPixelGreen(image,p)+
GetPixelBlue(image,p)+GetPixelBlack(image,p);
if (density > total_ink_density)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetImageTotalInkDensity)
#endif
{
if (density > total_ink_density)
total_ink_density=density;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
total_ink_density=0.0;
return(total_ink_density);
}
|
GB_unop__identity_int16_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int16_fc32)
// op(A') function: GB (_unop_tran__identity_int16_fc32)
// C type: int16_t
// A type: GxB_FC32_t
// cast: int16_t cij = GB_cast_to_int16_t ((double) crealf (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = GB_cast_to_int16_t ((double) crealf (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = GB_cast_to_int16_t ((double) crealf (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int16_fc32)
(
int16_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
int16_t z = GB_cast_to_int16_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
int16_t z = GB_cast_to_int16_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int16_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
program5.1.c | #include <stdio.h>
#include <omp.h>
#include <stdlib.h>
void hello(void);
int main(int argc, char* argv[]) {
int thread_count = strtol(argv[1], NULL, 10);
#pragma omp parallel num_threads(thread_count)
hello();
return 0;
}
void hello(void) {
int my_rank = omp_get_thread_num();
int thread_count = omp_get_num_threads();
printf("Hello from thread %d of %d.\n", my_rank, thread_count);
} |
integrate_omp.c | #include <stdio.h>
#include <omp.h>
static long num_steps = 1000000000;
double step;
#define NUM_THREADS 32
int main() {
int i, nThreads;
double pi, sum[NUM_THREADS];
step = 1.0/(double) num_steps;
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel
{
int i, ID, nThreadsInternal;
double x;
ID = omp_get_thread_num();
nThreadsInternal = omp_get_num_threads();
if(ID == 0) nThreads = nThreadsInternal;
for( i=ID, sum[ID]=0.0; i<num_steps; i=i+nThreadsInternal) {
x = (i+0.5)*step;
sum[ID] += 4.0/(1.0 + x*x);
}
}
for (i=0, pi=0.0; i<nThreads; i++)
pi += sum[i]*step;
printf("%f\n", pi);
}
|
common.c | /****************************************************************************
* *
* OpenMP MicroBenchmark Suite - Version 3.1 *
* *
* produced by *
* *
* Mark Bull, Fiona Reid and Nix Mc Donnell *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: markb@epcc.ed.ac.uk or fiona@epcc.ed.ac.uk *
* *
* *
* This version copyright (c) The University of Edinburgh, 2015. *
* *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#include "common.h"
#define CONF95 1.96
int nthreads = -1; // Number of OpenMP threads
int delaylength = -1; // The number of iterations to delay for
int outerreps = -1; // Outer repetitions
double delaytime = -1.0; // Length of time to delay for in microseconds
double targettesttime = 0.0; // The length of time in microseconds that the test
// should run for.
unsigned long innerreps; // Inner repetitions
double *times; // Array of doubles storing the benchmark times in microseconds
double referencetime; // The average reference time in microseconds to perform
// outerreps runs
double referencesd; // The standard deviation in the reference time in
// microseconds for outerreps runs.
double testtime; // The average test time in microseconds for
// outerreps runs
double testsd; // The standard deviation in the test time in
// microseconds for outerreps runs.
void usage(char *argv[]) {
printf("Usage: %s.x \n"
"\t--outer-repetitions <outer-repetitions> (default %d)\n"
"\t--test-time <target-test-time> (default %0.2f microseconds)\n"
"\t--delay-time <delay-time> (default %0.4f microseconds)\n"
"\t--delay-length <delay-length> "
"(default auto-generated based on processor speed)\n",
argv[0],
DEFAULT_OUTER_REPS, DEFAULT_TEST_TARGET_TIME, DEFAULT_DELAY_TIME);
}
void parse_args(int argc, char *argv[]) {
// Parse the parameters
int arg;
for (arg = 1; arg < argc; arg++) {
if (strcmp(argv[arg], "--delay-time") == 0.0) {
delaytime = atof(argv[++arg]);
if (delaytime == 0.0) {
printf("Invalid float:--delay-time: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "--outer-repetitions") == 0) {
outerreps = atoi(argv[++arg]);
if (outerreps == 0) {
printf("Invalid integer:--outer-repetitions: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "--test-time") == 0) {
targettesttime = atof(argv[++arg]);
if (targettesttime == 0) {
printf("Invalid integer:--test-time: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "-h") == 0) {
usage(argv);
exit(EXIT_SUCCESS);
} else {
printf("Invalid parameters: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
}
}
int getdelaylengthfromtime(double delaytime) {
int i, reps;
double lapsedtime, starttime; // seconds
reps = 1000;
lapsedtime = 0.0;
delaytime = delaytime/1.0E6; // convert from microseconds to seconds
// Note: delaytime is local to this function and thus the conversion
// does not propagate to the main code.
// Here we want to use the delaytime in microseconds to find the
// delaylength in iterations. We start with delaylength=0 and
// increase until we get a large enough delaytime, return delaylength
// in iterations.
delaylength = 0;
delay(delaylength);
while (lapsedtime < delaytime) {
delaylength = delaylength * 1.1 + 1;
starttime = getclock();
for (i = 0; i < reps; i++) {
delay(delaylength);
}
lapsedtime = (getclock() - starttime) / (double) reps;
}
return delaylength;
}
unsigned long getinnerreps(void (*test)(void)) {
innerreps = 10L; // some initial value
double time = 0.0;
while (time < targettesttime) {
double start = getclock();
test();
time = (getclock() - start) * 1.0e6;
innerreps *=2;
// Test to stop code if compiler is optimising reference time expressions away
if (innerreps > (targettesttime*1.0e15)) {
printf("Compiler has optimised reference loop away, STOP! \n");
printf("Try recompiling with lower optimisation level \n");
exit(1);
}
}
return innerreps;
}
void printheader(char *name) {
printf("\n");
printf("--------------------------------------------------------\n");
printf("Computing %s time using %lu reps\n", name, innerreps);
}
void stats(double *mtp, double *sdp) {
double meantime, totaltime, sumsq, mintime, maxtime, sd, cutoff;
int i, nr;
mintime = 1.0e10;
maxtime = 0.;
totaltime = 0.;
for (i = 1; i <= outerreps; i++) {
mintime = (mintime < times[i]) ? mintime : times[i];
maxtime = (maxtime > times[i]) ? maxtime : times[i];
totaltime += times[i];
}
meantime = totaltime / outerreps;
sumsq = 0;
for (i = 1; i <= outerreps; i++) {
sumsq += (times[i] - meantime) * (times[i] - meantime);
}
sd = sqrt(sumsq / (outerreps - 1));
cutoff = 3.0 * sd;
nr = 0;
for (i = 1; i <= outerreps; i++) {
if (fabs(times[i] - meantime) > cutoff)
nr++;
}
printf("\n");
printf("Sample_size Average Min Max S.D. Outliers\n");
printf(" %d %f %f %f %f %d\n",
outerreps, meantime, mintime, maxtime, sd, nr);
printf("\n");
*mtp = meantime;
*sdp = sd;
}
void printfooter(char *name, double testtime, double testsd,
double referencetime, double refsd) {
printf("%s time = %f microseconds +/- %f\n",
name, testtime, CONF95*testsd);
printf("%s overhead = %f microseconds +/- %f\n",
name, testtime-referencetime, CONF95*(testsd+referencesd));
}
void printreferencefooter(char *name, double referencetime, double referencesd) {
printf("%s time = %f microseconds +/- %f\n",
name, referencetime, CONF95 * referencesd);
}
void init(int argc, char **argv)
{
#pragma omp parallel
{
#pragma omp master
{
nthreads = omp_get_num_threads();
}
}
parse_args(argc, argv);
if (outerreps == -1) {
outerreps = DEFAULT_OUTER_REPS;
}
if (targettesttime == 0.0) {
targettesttime = DEFAULT_TEST_TARGET_TIME;
}
if (delaytime == -1.0) {
delaytime = DEFAULT_DELAY_TIME;
}
delaylength = getdelaylengthfromtime(delaytime); // Always need to compute delaylength in iterations
times = malloc((outerreps+1) * sizeof(double));
printf("Running OpenMP benchmark version 3.0\n"
"\t%d thread(s)\n"
"\t%d outer repetitions\n"
"\t%0.2f test time (microseconds)\n"
"\t%d delay length (iterations) \n"
"\t%f delay time (microseconds)\n",
nthreads,
outerreps, targettesttime,
delaylength, delaytime);
}
void finalise(void) {
free(times);
}
void initreference(char *name) {
printheader(name);
}
/* Calculate the reference time. */
void reference(char *name, void (*refer)(void)) {
int k;
double start;
// Calculate the required number of innerreps
innerreps = getinnerreps(refer);
initreference(name);
for (k = 0; k <= outerreps; k++) {
start = getclock();
refer();
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
}
finalisereference(name);
}
void finalisereference(char *name) {
stats(&referencetime, &referencesd);
printreferencefooter(name, referencetime, referencesd);
}
void intitest(char *name) {
printheader(name);
}
void finalisetest(char *name) {
stats(&testtime, &testsd);
printfooter(name, testtime, testsd, referencetime, referencesd);
}
/* Function to run a microbenchmark test*/
void benchmark(char *name, void (*test)(void))
{
int k;
double start;
// Calculate the required number of innerreps
innerreps = getinnerreps(test);
intitest(name);
for (k=0; k<=outerreps; k++) {
start = getclock();
test();
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
}
finalisetest(name);
}
// For the Cray compiler on HECToR we need to turn off optimisation
// for the delay and array_delay functions. Other compilers should
// not be afffected.
//#pragma _CRI noopt
void __attribute__((optnone)) delay(int delaylength) {
int i;
float a = 0.;
for (i = 0; i < delaylength; i++)
a += i;
if (a < 0)
printf("%f \n", a);
}
void __attribute__((optnone)) array_delay(int delaylength, double a[1]) {
int i;
a[0] = 1.0;
for (i = 0; i < delaylength; i++)
a[0] += i;
if (a[0] < 0)
printf("%f \n", a[0]);
}
// Re-enable optimisation for remainder of source.
//#pragma _CRI opt
double getclock() {
double time;
// Returns a value in seconds of the time elapsed from some arbitrary,
// but consistent point.
double omp_get_wtime(void);
time = omp_get_wtime();
return time;
}
int returnfalse() {
return 0;
}
|
hypre_merge_sort.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_utilities.h"
#include "hypre_hopscotch_hash.h"
#include "../seq_mv/HYPRE_seq_mv.h"
//#define DBG_MERGE_SORT
#ifdef DBG_MERGE_SORT
#include <algorithm>
#include <unordered_map>
#endif
#define SWAP(T, a, b) do { T tmp = a; a = b; b = tmp; } while (0)
/*--------------------------------------------------------------------------
* hypre_union2
*
* Union of two sorted (in ascending order) array arr1 and arr2 into arr3
*
* Assumptions:
* 1) no duplicates in arr1 and arr2
* 2) arr3 should have enough space on entry
* 3) map1 and map2 map arr1 and arr2 to arr3
*--------------------------------------------------------------------------*/
void hypre_union2( HYPRE_Int n1, HYPRE_BigInt *arr1,
HYPRE_Int n2, HYPRE_BigInt *arr2,
HYPRE_Int *n3, HYPRE_BigInt *arr3,
HYPRE_Int *map1, HYPRE_Int *map2 )
{
HYPRE_Int i = 0, j = 0, k = 0;
while (i < n1 && j < n2)
{
if (arr1[i] < arr2[j])
{
if (map1) { map1[i] = k; }
arr3[k++] = arr1[i++];
}
else if (arr1[i] > arr2[j])
{
if (map2) { map2[j] = k; }
arr3[k++] = arr2[j++];
}
else /* == */
{
if (map1) { map1[i] = k; }
if (map2) { map2[j] = k; }
arr3[k++] = arr1[i++];
j++;
}
}
while (i < n1)
{
if (map1) { map1[i] = k; }
arr3[k++] = arr1[i++];
}
while (j < n2)
{
if (map2) { map2[j] = k; }
arr3[k++] = arr2[j++];
}
*n3 = k;
}
/*--------------------------------------------------------------------------
* hypre_merge
*--------------------------------------------------------------------------*/
static void hypre_merge( HYPRE_Int *first1, HYPRE_Int *last1,
HYPRE_Int *first2, HYPRE_Int *last2,
HYPRE_Int *out )
{
for ( ; first1 != last1; ++out)
{
if (first2 == last2)
{
for ( ; first1 != last1; ++first1, ++out)
{
*out = *first1;
}
return;
}
if (*first2 < *first1)
{
*out = *first2;
++first2;
}
else
{
*out = *first1;
++first1;
}
}
for ( ; first2 != last2; ++first2, ++out)
{
*out = *first2;
}
}
/*--------------------------------------------------------------------------
* hypre_big_merge
*--------------------------------------------------------------------------*/
static void hypre_big_merge( HYPRE_BigInt *first1, HYPRE_BigInt *last1,
HYPRE_BigInt *first2, HYPRE_BigInt *last2,
HYPRE_BigInt *out )
{
for ( ; first1 != last1; ++out)
{
if (first2 == last2)
{
for ( ; first1 != last1; ++first1, ++out)
{
*out = *first1;
}
return;
}
if (*first2 < *first1)
{
*out = *first2;
++first2;
}
else
{
*out = *first1;
++first1;
}
}
for ( ; first2 != last2; ++first2, ++out)
{
*out = *first2;
}
}
/*--------------------------------------------------------------------------
* kth_element_
*--------------------------------------------------------------------------*/
static void kth_element_( HYPRE_Int *out1, HYPRE_Int *out2,
HYPRE_Int *a1, HYPRE_Int *a2,
HYPRE_Int left, HYPRE_Int right,
HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k)
{
while (1)
{
HYPRE_Int i = (left + right)/2; // right < k -> i < k
HYPRE_Int j = k - i - 1;
#ifdef DBG_MERGE_SORT
hypre_assert(left <= right && right <= k);
hypre_assert(i < k); // i == k implies left == right == k that can never happen
hypre_assert(j >= 0 && j < n2);
#endif
if ((j == -1 || a1[i] >= a2[j]) && (j == n2 - 1 || a1[i] <= a2[j + 1]))
{
*out1 = i; *out2 = j + 1;
return;
}
else if (j >= 0 && a2[j] >= a1[i] && (i == n1 - 1 || a2[j] <= a1[i + 1]))
{
*out1 = i + 1; *out2 = j;
return;
}
else if (a1[i] > a2[j] && j != n2 - 1 && a1[i] > a2[j+1])
{
// search in left half of a1
right = i - 1;
}
else
{
// search in right half of a1
left = i + 1;
}
}
}
/**
* Partition the input so that
* a1[0:*out1) and a2[0:*out2) contain the smallest k elements
*/
/*--------------------------------------------------------------------------
* kth_element
*
* Partition the input so that
* a1[0:*out1) and a2[0:*out2) contain the smallest k elements
*--------------------------------------------------------------------------*/
static void kth_element( HYPRE_Int *out1, HYPRE_Int *out2,
HYPRE_Int *a1, HYPRE_Int *a2,
HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k)
{
// either of the inputs is empty
if (n1 == 0)
{
*out1 = 0; *out2 = k;
return;
}
if (n2 == 0)
{
*out1 = k; *out2 = 0;
return;
}
if (k >= n1 + n2)
{
*out1 = n1; *out2 = n2;
return;
}
// one is greater than the other
if (k < n1 && a1[k] <= a2[0])
{
*out1 = k; *out2 = 0;
return;
}
if (k - n1 >= 0 && a2[k - n1] >= a1[n1 - 1])
{
*out1 = n1; *out2 = k - n1;
return;
}
if (k < n2 && a2[k] <= a1[0])
{
*out1 = 0; *out2 = k;
return;
}
if (k - n2 >= 0 && a1[k - n2] >= a2[n2 - 1])
{
*out1 = k - n2; *out2 = n2;
return;
}
// now k > 0
// faster to do binary search on the shorter sequence
if (n1 > n2)
{
SWAP(HYPRE_Int, n1, n2);
SWAP(HYPRE_Int *, a1, a2);
SWAP(HYPRE_Int *, out1, out2);
}
if (k < (n1 + n2)/2)
{
kth_element_(out1, out2, a1, a2, 0, hypre_min(n1 - 1, k), n1, n2, k);
}
else
{
// when k is big, faster to find (n1 + n2 - k)th biggest element
HYPRE_Int offset1 = hypre_max(k - n2, 0), offset2 = hypre_max(k - n1, 0);
HYPRE_Int new_k = k - offset1 - offset2;
HYPRE_Int new_n1 = hypre_min(n1 - offset1, new_k + 1);
HYPRE_Int new_n2 = hypre_min(n2 - offset2, new_k + 1);
kth_element_(out1, out2, a1 + offset1, a2 + offset2, 0, new_n1 - 1, new_n1, new_n2, new_k);
*out1 += offset1;
*out2 += offset2;
}
#ifdef DBG_MERGE_SORT
hypre_assert(*out1 + *out2 == k);
#endif
}
/*--------------------------------------------------------------------------
* big_kth_element_
*--------------------------------------------------------------------------*/
static void big_kth_element_( HYPRE_Int *out1, HYPRE_Int *out2,
HYPRE_BigInt *a1, HYPRE_BigInt *a2,
HYPRE_Int left, HYPRE_Int right,
HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k)
{
while (1)
{
HYPRE_Int i = (left + right)/2; // right < k -> i < k
HYPRE_Int j = k - i - 1;
#ifdef DBG_MERGE_SORT
hypre_assert(left <= right && right <= k);
hypre_assert(i < k); // i == k implies left == right == k that can never happen
hypre_assert(j >= 0 && j < n2);
#endif
if ((j == -1 || a1[i] >= a2[j]) && (j == n2 - 1 || a1[i] <= a2[j + 1]))
{
*out1 = i; *out2 = j + 1;
return;
}
else if (j >= 0 && a2[j] >= a1[i] && (i == n1 - 1 || a2[j] <= a1[i + 1]))
{
*out1 = i + 1; *out2 = j;
return;
}
else if (a1[i] > a2[j] && j != n2 - 1 && a1[i] > a2[j+1])
{
// search in left half of a1
right = i - 1;
}
else
{
// search in right half of a1
left = i + 1;
}
}
}
/*--------------------------------------------------------------------------
* big_kth_element
*
* Partition the input so that
* a1[0:*out1) and a2[0:*out2) contain the smallest k elements
*--------------------------------------------------------------------------*/
static void big_kth_element( HYPRE_Int *out1, HYPRE_Int *out2,
HYPRE_BigInt *a1, HYPRE_BigInt *a2,
HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k)
{
// either of the inputs is empty
if (n1 == 0)
{
*out1 = 0; *out2 = k;
return;
}
if (n2 == 0)
{
*out1 = k; *out2 = 0;
return;
}
if (k >= n1 + n2)
{
*out1 = n1; *out2 = n2;
return;
}
// one is greater than the other
if (k < n1 && a1[k] <= a2[0])
{
*out1 = k; *out2 = 0;
return;
}
if (k - n1 >= 0 && a2[k - n1] >= a1[n1 - 1])
{
*out1 = n1; *out2 = k - n1;
return;
}
if (k < n2 && a2[k] <= a1[0])
{
*out1 = 0; *out2 = k;
return;
}
if (k - n2 >= 0 && a1[k - n2] >= a2[n2 - 1])
{
*out1 = k - n2; *out2 = n2;
return;
}
// now k > 0
// faster to do binary search on the shorter sequence
if (n1 > n2)
{
SWAP(HYPRE_Int, n1, n2);
SWAP(HYPRE_BigInt *, a1, a2);
SWAP(HYPRE_Int *, out1, out2);
}
if (k < (n1 + n2)/2)
{
big_kth_element_(out1, out2, a1, a2, 0, hypre_min(n1 - 1, k), n1, n2, k);
}
else
{
// when k is big, faster to find (n1 + n2 - k)th biggest element
HYPRE_Int offset1 = hypre_max(k - n2, 0), offset2 = hypre_max(k - n1, 0);
HYPRE_Int new_k = k - offset1 - offset2;
HYPRE_Int new_n1 = hypre_min(n1 - offset1, new_k + 1);
HYPRE_Int new_n2 = hypre_min(n2 - offset2, new_k + 1);
big_kth_element_(out1, out2, a1 + (HYPRE_BigInt)offset1, a2 + (HYPRE_BigInt)offset2, 0, new_n1 - 1, new_n1, new_n2, new_k);
*out1 += offset1;
*out2 += offset2;
}
#ifdef DBG_MERGE_SORT
hypre_assert(*out1 + *out2 == k);
#endif
}
/*--------------------------------------------------------------------------
* hypre_parallel_merge
*
* @param num_threads number of threads that participate in this merge
* @param my_thread_num thread id (zer0-based) among the threads that
* participate in this merge
*--------------------------------------------------------------------------*/
static void hypre_parallel_merge( HYPRE_Int *first1, HYPRE_Int *last1,
HYPRE_Int *first2, HYPRE_Int *last2,
HYPRE_Int *out, HYPRE_Int num_threads,
HYPRE_Int my_thread_num )
{
HYPRE_Int n1 = last1 - first1;
HYPRE_Int n2 = last2 - first2;
HYPRE_Int n = n1 + n2;
HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads;
HYPRE_Int begin_rank = hypre_min(n_per_thread*my_thread_num, n);
HYPRE_Int end_rank = hypre_min(begin_rank + n_per_thread, n);
#ifdef DBG_MERGE_SORT
hypre_assert(std::is_sorted(first1, last1));
hypre_assert(std::is_sorted(first2, last2));
#endif
HYPRE_Int begin1, begin2, end1, end2;
kth_element(&begin1, &begin2, first1, first2, n1, n2, begin_rank);
kth_element(&end1, &end2, first1, first2, n1, n2, end_rank);
while (begin1 > end1 && begin1 > 0 && begin2 < n2 && first1[begin1 - 1] == first2[begin2])
{
#ifdef DBG_MERGE_SORT
printf("%s:%d\n", __FILE__, __LINE__);
#endif
begin1--; begin2++;
}
while (begin2 > end2 && end1 > 0 && end2 < n2 && first1[end1 - 1] == first2[end2])
{
#ifdef DBG_MERGE_SORT
printf("%s:%d\n", __FILE__, __LINE__);
#endif
end1--; end2++;
}
#ifdef DBG_MERGE_SORT
hypre_assert(begin1 <= end1);
hypre_assert(begin2 <= end2);
#endif
hypre_merge(
first1 + begin1, first1 + end1,
first2 + begin2, first2 + end2,
out + begin1 + begin2);
#ifdef DBG_MERGE_SORT
hypre_assert(std::is_sorted(out + begin1 + begin2, out + end1 + end2));
#endif
}
/*--------------------------------------------------------------------------
* hypre_big_parallel_merge
*
* @param num_threads number of threads that participate in this merge
* @param my_thread_num thread id (zero-based) among the threads that
* participate in this merge
*--------------------------------------------------------------------------*/
static void hypre_big_parallel_merge(
HYPRE_BigInt *first1, HYPRE_BigInt *last1, HYPRE_BigInt *first2, HYPRE_BigInt *last2,
HYPRE_BigInt *out,
HYPRE_Int num_threads, HYPRE_Int my_thread_num)
{
HYPRE_Int n1 = (HYPRE_Int)(last1 - first1);
HYPRE_Int n2 = (HYPRE_Int)(last2 - first2);
HYPRE_Int n = n1 + n2;
HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads;
HYPRE_Int begin_rank = hypre_min(n_per_thread*my_thread_num, n);
HYPRE_Int end_rank = hypre_min(begin_rank + n_per_thread, n);
#ifdef DBG_MERGE_SORT
hypre_assert(std::is_sorted(first1, last1));
hypre_assert(std::is_sorted(first2, last2));
#endif
HYPRE_Int begin1, begin2, end1, end2;
big_kth_element(&begin1, &begin2, first1, first2, n1, n2, begin_rank);
big_kth_element(&end1, &end2, first1, first2, n1, n2, end_rank);
while (begin1 > end1 && begin1 > 0 && begin2 < n2 && first1[begin1 - 1] == first2[begin2])
{
#ifdef DBG_MERGE_SORT
printf("%s:%d\n", __FILE__, __LINE__);
#endif
begin1--; begin2++;
}
while (begin2 > end2 && end1 > 0 && end2 < n2 && first1[end1 - 1] == first2[end2])
{
#ifdef DBG_MERGE_SORT
printf("%s:%d\n", __FILE__, __LINE__);
#endif
end1--; end2++;
}
#ifdef DBG_MERGE_SORT
hypre_assert(begin1 <= end1);
hypre_assert(begin2 <= end2);
#endif
hypre_big_merge(
first1 + (HYPRE_BigInt)begin1, first1 + (HYPRE_BigInt)end1,
first2 + (HYPRE_BigInt)begin2, first2 + (HYPRE_BigInt)end2,
out + (HYPRE_BigInt)(begin1 + begin2));
#ifdef DBG_MERGE_SORT
hypre_assert(std::is_sorted(out + begin1 + begin2, out + end1 + end2));
#endif
}
/*--------------------------------------------------------------------------
* hypre_merge_sort
*--------------------------------------------------------------------------*/
void hypre_merge_sort( HYPRE_Int *in, HYPRE_Int *temp, HYPRE_Int len, HYPRE_Int **out )
{
if (0 == len) return;
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime();
#endif
#ifdef DBG_MERGE_SORT
HYPRE_Int *dbg_buf = new HYPRE_Int[len];
std::copy(in, in + len, dbg_buf);
std::sort(dbg_buf, dbg_buf + len);
#endif
// HYPRE_Int thread_private_len[hypre_NumThreads()];
// HYPRE_Int out_len = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads = hypre_NumActiveThreads();
HYPRE_Int my_thread_num = hypre_GetThreadNum();
// thread-private sort
HYPRE_Int i_per_thread = (len + num_threads - 1)/num_threads;
HYPRE_Int i_begin = hypre_min(i_per_thread*my_thread_num, len);
HYPRE_Int i_end = hypre_min(i_begin + i_per_thread, len);
hypre_qsort0(in, i_begin, i_end - 1);
// merge sorted sequences
HYPRE_Int in_group_size;
HYPRE_Int *in_buf = in;
HYPRE_Int *out_buf = temp;
for (in_group_size = 1; in_group_size < num_threads; in_group_size *= 2)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
// merge 2 in-groups into 1 out-group
HYPRE_Int out_group_size = in_group_size*2;
HYPRE_Int group_leader = my_thread_num/out_group_size*out_group_size;
// HYPRE_Int group_sub_leader = hypre_min(group_leader + in_group_size, num_threads - 1);
HYPRE_Int id_in_group = my_thread_num%out_group_size;
HYPRE_Int num_threads_in_group =
hypre_min(group_leader + out_group_size, num_threads) - group_leader;
HYPRE_Int in_group1_begin = hypre_min(i_per_thread*group_leader, len);
HYPRE_Int in_group1_end = hypre_min(in_group1_begin + i_per_thread*in_group_size, len);
HYPRE_Int in_group2_begin = hypre_min(in_group1_begin + i_per_thread*in_group_size, len);
HYPRE_Int in_group2_end = hypre_min(in_group2_begin + i_per_thread*in_group_size, len);
hypre_parallel_merge(
in_buf + in_group1_begin, in_buf + in_group1_end,
in_buf + in_group2_begin, in_buf + in_group2_end,
out_buf + in_group1_begin,
num_threads_in_group,
id_in_group);
HYPRE_Int *temp = in_buf;
in_buf = out_buf;
out_buf = temp;
}
*out = in_buf;
} /* omp parallel */
#ifdef DBG_MERGE_SORT
hypre_assert(std::equal(*out, *out + len, dbg_buf));
delete[] dbg_buf;
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime();
#endif
}
/*--------------------------------------------------------------------------
* hypre_sort_and_create_inverse_map
*
* Sort array "in" with length len and put result in array "out"
* "in" will be deallocated unless in == *out
* inverse_map is an inverse hash table s.t.
* inverse_map[i] = j iff (*out)[j] = i
*--------------------------------------------------------------------------*/
void hypre_sort_and_create_inverse_map(HYPRE_Int *in, HYPRE_Int len, HYPRE_Int **out,
hypre_UnorderedIntMap *inverse_map)
{
if (len == 0)
{
return;
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime();
#endif
HYPRE_Int *temp = hypre_TAlloc(HYPRE_Int, len, HYPRE_MEMORY_HOST);
hypre_merge_sort(in, temp, len, out);
hypre_UnorderedIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads());
HYPRE_Int i;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < len; i++)
{
HYPRE_Int old = hypre_UnorderedIntMapPutIfAbsent(inverse_map, (*out)[i], i);
hypre_assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY);
#ifdef DBG_MERGE_SORT
if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i)
{
fprintf(stderr, "%d %d\n", i, (*out)[i]);
hypre_assert(false);
}
#endif
}
#ifdef DBG_MERGE_SORT
std::unordered_map<HYPRE_Int, HYPRE_Int> inverse_map2(len);
for (HYPRE_Int i = 0; i < len; ++i) {
inverse_map2[(*out)[i]] = i;
if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i)
{
fprintf(stderr, "%d %d\n", i, (*out)[i]);
hypre_assert(false);
}
}
hypre_assert(hypre_UnorderedIntMapSize(inverse_map) == len);
#endif
if (*out == in)
{
hypre_TFree(temp, HYPRE_MEMORY_HOST);
}
else
{
hypre_TFree(in, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime();
#endif
}
/*--------------------------------------------------------------------------
* hypre_big_merge_sort
*--------------------------------------------------------------------------*/
void hypre_big_merge_sort(HYPRE_BigInt *in, HYPRE_BigInt *temp, HYPRE_Int len,
HYPRE_BigInt **out)
{
if (0 == len) return;
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime();
#endif
#ifdef DBG_MERGE_SORT
HYPRE_Int *dbg_buf = new HYPRE_Int[len];
std::copy(in, in + len, dbg_buf);
std::sort(dbg_buf, dbg_buf + len);
#endif
// HYPRE_Int thread_private_len[hypre_NumThreads()];
// HYPRE_Int out_len = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads = hypre_NumActiveThreads();
HYPRE_Int my_thread_num = hypre_GetThreadNum();
// thread-private sort
HYPRE_Int i_per_thread = (len + num_threads - 1)/num_threads;
HYPRE_Int i_begin = hypre_min(i_per_thread*my_thread_num, len);
HYPRE_Int i_end = hypre_min(i_begin + i_per_thread, len);
hypre_BigQsort0(in, i_begin, i_end - 1);
// merge sorted sequences
HYPRE_Int in_group_size;
HYPRE_BigInt *in_buf = in;
HYPRE_BigInt *out_buf = temp;
for (in_group_size = 1; in_group_size < num_threads; in_group_size *= 2)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
// merge 2 in-groups into 1 out-group
HYPRE_Int out_group_size = in_group_size*2;
HYPRE_Int group_leader = my_thread_num/out_group_size*out_group_size;
// HYPRE_Int group_sub_leader = hypre_min(group_leader + in_group_size, num_threads - 1);
HYPRE_Int id_in_group = my_thread_num%out_group_size;
HYPRE_Int num_threads_in_group =
hypre_min(group_leader + out_group_size, num_threads) - group_leader;
HYPRE_Int in_group1_begin = hypre_min(i_per_thread*group_leader, len);
HYPRE_Int in_group1_end = hypre_min(in_group1_begin + i_per_thread*in_group_size, len);
HYPRE_Int in_group2_begin = hypre_min(in_group1_begin + i_per_thread*in_group_size, len);
HYPRE_Int in_group2_end = hypre_min(in_group2_begin + i_per_thread*in_group_size, len);
hypre_big_parallel_merge(
in_buf + (HYPRE_BigInt)in_group1_begin, in_buf + (HYPRE_BigInt)in_group1_end,
in_buf + (HYPRE_BigInt)in_group2_begin, in_buf + (HYPRE_BigInt)in_group2_end,
out_buf + (HYPRE_BigInt)in_group1_begin,
num_threads_in_group,
id_in_group);
HYPRE_BigInt *temp = in_buf;
in_buf = out_buf;
out_buf = temp;
}
*out = in_buf;
} /* omp parallel */
#ifdef DBG_MERGE_SORT
hypre_assert(std::equal(*out, *out + len, dbg_buf));
delete[] dbg_buf;
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime();
#endif
}
/*--------------------------------------------------------------------------
* hypre_big_sort_and_create_inverse_map
*--------------------------------------------------------------------------*/
void hypre_big_sort_and_create_inverse_map(HYPRE_BigInt *in, HYPRE_Int len, HYPRE_BigInt **out,
hypre_UnorderedBigIntMap *inverse_map)
{
if (len == 0)
{
return;
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime();
#endif
HYPRE_BigInt *temp = hypre_TAlloc(HYPRE_BigInt, len, HYPRE_MEMORY_HOST);
hypre_big_merge_sort(in, temp, len, out);
hypre_UnorderedBigIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads());
HYPRE_Int i;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < len; i++)
{
HYPRE_Int old = hypre_UnorderedBigIntMapPutIfAbsent(inverse_map, (*out)[i], i);
hypre_assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY);
#ifdef DBG_MERGE_SORT
if (hypre_UnorderedBigIntMapGet(inverse_map, (*out)[i]) != i)
{
fprintf(stderr, "%d %d\n", i, (*out)[i]);
hypre_assert(false);
}
#endif
}
#ifdef DBG_MERGE_SORT
std::unordered_map<HYPRE_Int, HYPRE_Int> inverse_map2(len);
for (HYPRE_Int i = 0; i < len; ++i) {
inverse_map2[(*out)[i]] = i;
if (hypre_UnorderedBigIntMapGet(inverse_map, (*out)[i]) != i)
{
fprintf(stderr, "%d %d\n", i, (*out)[i]);
hypre_assert(false);
}
}
hypre_assert(hypre_UnorderedBigIntMapSize(inverse_map) == len);
#endif
if (*out == in)
{
hypre_TFree(temp, HYPRE_MEMORY_HOST);
}
else
{
hypre_TFree(in, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime();
#endif
}
/* vim: set tabstop=8 softtabstop=3 sw=3 expandtab: */
|
ADR_assembler_C_omp.c | /* This file is part of redbKIT.
* Copyright (c) 2016, Ecole Polytechnique Federale de Lausanne (EPFL)
* Author: Federico Negri <federico.negri@epfl.ch>
*/
#include "mex.h"
#include <stdio.h>
#include <math.h>
#include "blas.h"
#include <string.h>
#define INVJAC(i,j,k) invjac[i+(j+k*dim)*noe]
#define GRADREFPHI(i,j,k) gradrefphi[i+(j+k*NumQuadPoints)*nln]
#ifdef _OPENMP
#include <omp.h>
#else
#warning "OpenMP not enabled. Compile with mex ADR_assembler_C_omp.c CFLAGS="\$CFLAGS -fopenmp" LDFLAGS="\$LDFLAGS -fopenmp""
#endif
void mexFunction(int nlhs,mxArray* plhs[], int nrhs, const mxArray* prhs[])
{
/* Check for proper number of arguments. */
if(nrhs!=15) {
mexErrMsgTxt("15 inputs are required.");
} else if(nlhs>6) {
mexErrMsgTxt("Too many output arguments.");
}
double* dim_ptr = mxGetPr(prhs[0]);
int dim = (int)(dim_ptr[0]);
int noe = mxGetN(prhs[4]);
double* nln_ptr = mxGetPr(prhs[5]);
int nln = (int)(nln_ptr[0]);
int numRowsElements = mxGetM(prhs[4]);
int nln2 = nln*nln;
/**/
plhs[0] = mxCreateDoubleMatrix(nln2*noe,1, mxREAL);
plhs[1] = mxCreateDoubleMatrix(nln2*noe,1, mxREAL);
plhs[2] = mxCreateDoubleMatrix(nln2*noe,1, mxREAL);
plhs[3] = mxCreateDoubleMatrix(nln2*noe,1, mxREAL);
plhs[4] = mxCreateDoubleMatrix(nln*noe,1, mxREAL);
plhs[5] = mxCreateDoubleMatrix(nln*noe,1, mxREAL);
double* myArows = mxGetPr(plhs[0]);
double* myAcols = mxGetPr(plhs[1]);
double* myAcoef = mxGetPr(plhs[2]);
double* myMcoef = mxGetPr(plhs[3]);
double* myRrows = mxGetPr(plhs[4]);
double* myRcoef = mxGetPr(plhs[5]);
/* copy the string data from prhs[0] into a C string input_ buf. */
char *OP_string = mxArrayToString(prhs[1]);
int OP[4] = {0, 0, 0, 0};
if (strcmp(OP_string, "diffusion")==0)
{
OP[0] = 1;
}
if (strcmp(OP_string, "transport")==0)
{
OP[1] = 1;
}
if (strcmp(OP_string, "reaction")==0)
{
OP[2] = 1;
}
if (strcmp(OP_string, "source")==0)
{
OP[3] = 1;
}
if (strcmp(OP_string, "all")==0)
{
OP[0] = 1;
OP[1] = 1;
OP[2] = 1;
OP[3] = 1;
}
mxFree(OP_string);
double C_t[dim];
double C_d[dim][dim];
double* TC_d = mxGetPr(prhs[2]);
double* TC_t = mxGetPr(prhs[3]);
int k,l;
for (k = 0; k < dim; k = k + 1 )
{
for (l = 0; l < dim; l = l + 1 )
{
C_d[k][l] = 0;
}
C_t[k] = 0;
}
if ((int)(TC_d[0])==10 && (int)(TC_d[1])==10)
{
for (l = 0; l < dim; l = l + 1 )
{
C_d[l][l] = 1;
}
}
else
{
C_d[(int)(TC_d[0]-1)][(int)(TC_d[1]-1)] = 1;
}
if ((int)(TC_t[0])==10)
{
for (l = 0; l < dim; l = l + 1 )
{
C_t[l] = 1;
}
}
else
{
C_t[(int)(TC_t[0]-1)] = 1;
}
/* Local mass matrix (computed only once) with quadrature nodes */
double LocalMass[nln][nln];
int q;
int NumQuadPoints = mxGetN(prhs[10]);
double* mu = mxGetPr(prhs[6]);
double* conv_field = mxGetPr(prhs[7]);
double* si = mxGetPr(prhs[8]);
double* f = mxGetPr(prhs[9]);
double* w = mxGetPr(prhs[10]);
double* invjac = mxGetPr(prhs[11]);
double* detjac = mxGetPr(prhs[12]);
double* phi = mxGetPr(prhs[13]);
double* gradrefphi = mxGetPr(prhs[14]);
for (k = 0; k < nln; k = k + 1 )
{
for (l = 0; l < nln; l = l + 1 )
{
double tmp = 0;
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
tmp = tmp + phi[k+q*nln] * phi[l+q*nln] * w[q];
}
LocalMass[k][l] = tmp;
}
}
double gradphi[dim][nln][NumQuadPoints];
double* elements = mxGetPr(prhs[4]);
/* Assembly: loop over the elements */
int ie;
#pragma omp parallel for shared(invjac,mu,conv_field,si,f,detjac,elements, myRrows, myRcoef,myAcols, myArows, myAcoef, myMcoef) private(gradphi,ie,k,l,q) firstprivate(phi,gradrefphi, w, numRowsElements, nln2, nln, OP, C_t, C_d, LocalMass)
for (ie = 0; ie < noe; ie = ie + 1 )
{
int d1, d2;
for (k = 0; k < nln; k = k + 1 )
{
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
gradphi[d1][k][q] = 0;
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
gradphi[d1][k][q] = gradphi[d1][k][q] + INVJAC(ie,d1,d2)*GRADREFPHI(k,q,d2);
}
}
}
}
int iii = 0;
int ii = 0;
int a, b;
/* a tes, b trial */
for (a = 0; a < nln; a = a + 1 )
{
for (b = 0; b < nln; b = b + 1 )
{
double aloc = 0;
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
double diffusion = 0;
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
diffusion = diffusion + C_d[d1][d2] * mu[ie+q*noe] * gradphi[d1][b][q] * gradphi[d2][a][q];
}
}
double transport = 0;
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
transport = transport + C_t[d1] * conv_field[ie+(q+d1*NumQuadPoints)*noe] * gradphi[d1][b][q] * phi[a+q*nln];
}
double reaction = si[ie+q*noe] * phi[b+q*nln] * phi[a+q*nln];
aloc = aloc + (OP[0] * diffusion + OP[1] * transport + OP[2] * reaction) * w[q];
}
myArows[ie*nln2+iii] = elements[a+ie*numRowsElements];
myAcols[ie*nln2+iii] = elements[b+ie*numRowsElements];
myAcoef[ie*nln2+iii] = aloc*detjac[ie];
myMcoef[ie*nln2+iii] = LocalMass[a][b]*detjac[ie];
iii = iii + 1;
}
double floc = 0;
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
floc = floc + ( OP[3] * phi[a+q*nln] * f[ie+q*noe] ) * w[q];
}
myRrows[ie*nln+ii] = elements[a+ie*numRowsElements];
myRcoef[ie*nln+ii] = floc*detjac[ie];
ii = ii + 1;
}
}
}
|
search.h | // -*- C++ -*-
// Copyright (C) 2007, 2008, 2009 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/search.h
* @brief Parallel implementation base for std::search() and
* std::search_n().
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Felix Putze.
#ifndef _GLIBCXX_PARALLEL_SEARCH_H
#define _GLIBCXX_PARALLEL_SEARCH_H 1
#include <bits/stl_algobase.h>
#include <parallel/parallel.h>
#include <parallel/equally_split.h>
namespace __gnu_parallel
{
/**
* @brief Precalculate __advances for Knuth-Morris-Pratt algorithm.
* @param __elements Begin iterator of sequence to search for.
* @param __length Length of sequence to search for.
* @param __advances Returned __offsets.
*/
template<typename _RAIter, typename _DifferenceTp>
void
__calc_borders(_RAIter __elements, _DifferenceTp __length,
_DifferenceTp* __off)
{
typedef _DifferenceTp _DifferenceType;
__off[0] = -1;
if (__length > 1)
__off[1] = 0;
_DifferenceType __k = 0;
for (_DifferenceType __j = 2; __j <= __length; __j++)
{
while ((__k >= 0) && !(__elements[__k] == __elements[__j-1]))
__k = __off[__k];
__off[__j] = ++__k;
}
}
// Generic parallel find algorithm (requires random access iterator).
/** @brief Parallel std::search.
* @param __begin1 Begin iterator of first sequence.
* @param __end1 End iterator of first sequence.
* @param __begin2 Begin iterator of second sequence.
* @param __end2 End iterator of second sequence.
* @param __pred Find predicate.
* @return Place of finding in first sequences. */
template<typename __RAIter1,
typename __RAIter2,
typename _Pred>
__RAIter1
__search_template(__RAIter1 __begin1, __RAIter1 __end1,
__RAIter2 __begin2, __RAIter2 __end2,
_Pred __pred)
{
typedef std::iterator_traits<__RAIter1> _TraitsType;
typedef typename _TraitsType::difference_type _DifferenceType;
_GLIBCXX_CALL((__end1 - __begin1) + (__end2 - __begin2));
_DifferenceType __pattern_length = __end2 - __begin2;
// Pattern too short.
if(__pattern_length <= 0)
return __end1;
// Last point to start search.
_DifferenceType __input_length = (__end1 - __begin1) - __pattern_length;
// Where is first occurrence of pattern? defaults to end.
_DifferenceType __result = (__end1 - __begin1);
_DifferenceType *__splitters;
// Pattern too long.
if (__input_length < 0)
return __end1;
omp_lock_t __result_lock;
omp_init_lock(&__result_lock);
_ThreadIndex __num_threads = std::max<_DifferenceType>
(1, std::min<_DifferenceType>(__input_length,
__get_max_threads()));
_DifferenceType __advances[__pattern_length];
__calc_borders(__begin2, __pattern_length, __advances);
# pragma omp parallel num_threads(__num_threads)
{
# pragma omp single
{
__num_threads = omp_get_num_threads();
__splitters = new _DifferenceType[__num_threads + 1];
equally_split(__input_length, __num_threads, __splitters);
}
_ThreadIndex __iam = omp_get_thread_num();
_DifferenceType __start = __splitters[__iam],
__stop = __splitters[__iam + 1];
_DifferenceType __pos_in_pattern = 0;
bool __found_pattern = false;
while (__start <= __stop && !__found_pattern)
{
// Get new value of result.
#pragma omp flush(__result)
// No chance for this thread to find first occurrence.
if (__result < __start)
break;
while (__pred(__begin1[__start + __pos_in_pattern],
__begin2[__pos_in_pattern]))
{
++__pos_in_pattern;
if (__pos_in_pattern == __pattern_length)
{
// Found new candidate for result.
omp_set_lock(&__result_lock);
__result = std::min(__result, __start);
omp_unset_lock(&__result_lock);
__found_pattern = true;
break;
}
}
// Make safe jump.
__start += (__pos_in_pattern - __advances[__pos_in_pattern]);
__pos_in_pattern = (__advances[__pos_in_pattern] < 0
? 0 : __advances[__pos_in_pattern]);
}
} //parallel
omp_destroy_lock(&__result_lock);
delete[] __splitters;
// Return iterator on found element.
return (__begin1 + __result);
}
} // end namespace
#endif /* _GLIBCXX_PARALLEL_SEARCH_H */
|
GB_binop__isle_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isle_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__isle_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__isle_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__isle_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_uint32)
// A*D function (colscale): GB (_AxD__isle_uint32)
// D*A function (rowscale): GB (_DxB__isle_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__isle_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__isle_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_uint32)
// C=scalar+B GB (_bind1st__isle_uint32)
// C=scalar+B' GB (_bind1st_tran__isle_uint32)
// C=A+scalar GB (_bind2nd__isle_uint32)
// C=A'+scalar GB (_bind2nd_tran__isle_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_UINT32 || GxB_NO_ISLE_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isle_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isle_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isle_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isle_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isle_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isle_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__isle_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
target_teams_distribute_parallel_for_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute parallel for simd'}}
#pragma omp target teams distribute parallel for simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute parallel for simd'}}
#pragma omp target teams distribute parallel for simd foo
void test_no_clause() {
int i;
#pragma omp target teams distribute parallel for simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp target teams distribute parallel for simd' must be a for loop}}
#pragma omp target teams distribute parallel for simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target teams distribute parallel for simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute parallel for simd' are ignored}}
#pragma omp target teams distribute parallel for simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute parallel for simd' are ignored}}
#pragma omp target teams distribute parallel for simd;
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute parallel for simd' are ignored}}
#pragma omp target teams distribute parallel for simd private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute parallel for simd' are ignored}}
#pragma omp target teams distribute parallel for simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target teams distribute parallel for simd collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute parallel for simd collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute parallel for simd collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute parallel for simd collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target teams distribute parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target teams distribute parallel for simd collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}}
#pragma omp target teams distribute parallel for simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}}
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target teams distribute parallel for simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target teams distribute parallel for simd collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute parallel for simd collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute parallel for simd collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute parallel for simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
// expected-error@+4 {{OpenMP constructs may not be nested inside a simd region}}
#pragma omp target teams distribute parallel for simd collapse(2) firstprivate(i)
for (i = 0; i < 16; ++i)
for (int j = 0; j < 16; ++j)
#pragma omp parallel for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private() {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute parallel for simd private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for simd private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for simd private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute parallel for simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target teams distribute parallel for simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute parallel for simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute parallel for simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for simd lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute parallel for simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target teams distribute parallel for simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute parallel for simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute parallel for simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd firstprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for simd firstprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd firstprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute parallel for simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
// expected-error@+1 {{lastprivate variable cannot be firstprivate}} expected-note@+1 {{defined as lastprivate}}
#pragma omp target teams distribute parallel for simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{lastprivate variable cannot be firstprivate}} expected-note@+1 2 {{defined as lastprivate}}
#pragma omp target teams distribute parallel for simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 3 {{lastprivate variable cannot be firstprivate}} expected-note@+1 3 {{defined as lastprivate}}
#pragma omp target teams distribute parallel for simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp target teams distribute parallel for simd simdlen(64) safelen(8)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target teams distribute parallel for simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target teams distribute parallel for simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
|
Fig_13.1_taskBug.c | // sample compile command: "gcc -fopenmp -c Fig_13.1_taskBug.c" to generate *.o object file
void work()
{
#pragma omp task //task 1
{
#pragma omp task //task 2
{
#pragma omp critical // Critical region 1
{/* do work here */}
}
#pragma omp critical // Critical Region 2
{
#pragma omp task // task 3
{/* do work here */}
}
}
}
|
localizer.h | /*
* File: localizer.h
* Author: hexi
*
* Created on 2015年12月27日, 上午11:11
*/
#pragma once
#include <type_traits>
#include <limits>
#include "dmlc/data.h"
#include "dmlc/omp.h"
#include "data/row_block.h"
#include "base/parallel_sort.h"
namespace dmlc {
namespace svdfeature {
/// \brief reverse the bytes of x to make it more uniformly spanning the space
inline uint64_t ReverseBytes64(uint64_t x) {
// return x;
x = x << 32 | x >> 32;
x = (x & 0x0000FFFF0000FFFFULL) << 16 |
(x & 0xFFFF0000FFFF0000ULL) >> 16;
x = (x & 0x00FF00FF00FF00FFULL) << 8 |
(x & 0xFF00FF00FF00FF00ULL) >> 8;
x = (x & 0x0F0F0F0F0F0F0F0FULL) << 4 |
(x & 0xF0F0F0F0F0F0F0F0ULL) >> 4;
x = (x & 0x3333333333333333ULL) << 2 |
(x & 0xCCCCCCCCCCCCCCCCULL) >> 2;
return x;
}
/// \brief reverse the bytes of x to make it more uniformly spanning the space
inline uint32_t ReverseBytes32(uint32_t x) {
// return x;
x = x << 16 | x >> 16;
x = (x & 0x00FF00FFU) << 8 |
(x & 0xFF00FF00U) >> 8;
x = (x & 0x0F0F0F0FU) << 4 |
(x & 0xF0F0F0F0U) >> 4;
x = (x & 0x33333333U) << 2 |
(x & 0xCCCCCCCCU) >> 2;
return x;
}
/**
* @brief Mapping a RowBlock with general indices into continuous indices
* starting from 0
* @tparam I the index type
*/
template<typename I>
class Localizer {
public:
Localizer(int nthreads = 2) : nt_(nthreads) { }
~Localizer() { }
/**
* @brief Localize a Rowblock
*/
template<typename C = unsigned>
void Localize(const RowBlock<I>& blk,
data::RowBlockContainer<unsigned> *localized,
std::vector<I>* uniq_idx = NULL,
std::vector<C>* idx_frq = NULL) {
std::vector<I>* uidx = uniq_idx == NULL ? new std::vector<I>() : uniq_idx;
CountUniqIndex<C>(blk, uidx, idx_frq);
RemapIndex(blk, *uidx, localized);
if (uniq_idx == NULL) delete uidx;
Clear();
}
/**
* @brief count unique items
*
* temporal results will be stored to accelerate RemapIndex().
*
* @param idx the item list in any order
* @param uniq_idx returns the sorted unique items
* @param idx_frq if not NULL then returns the according occurrence counts
*/
template<typename C>
void CountUniqIndex(const RowBlock<I>& blk,
std::vector<I>* uniq_idx,
std::vector<C>* idx_frq);
/**
* @brief Remaps the index.
*
* @param idx_dict the index dictionary. Any index does not exists in this
* dictionary is dropped.
*
* @param localized a rowblock with index mapped: idx_dict[i] -> i.
*/
void RemapIndex(const RowBlock<I>& blk,
const std::vector<I>& idx_dict,
data::RowBlockContainer<unsigned> *localized);
/**
* @brief Clears the temporal results
*/
void Clear() { pair_.clear(); }
private:
int nt_;
#pragma pack(push)
#pragma pack(4)
struct Pair {
I k; unsigned i;
};
#pragma pack(pop)
std::vector<Pair> pair_;
};
template<typename I>
template<typename C>
void Localizer<I>:: CountUniqIndex(
const RowBlock<I>& blk, std::vector<I> *uniq_idx, std::vector<C>* idx_frq) {
// sort
if (blk.size == 0) return;
size_t idx_size = blk.offset[blk.size];
CHECK_LT(idx_size, static_cast<size_t>(std::numeric_limits<unsigned>::max()))
<< "you need to change Pair.i from unsigned to uint64";
pair_.resize(idx_size);
//I max_index = std::numeric_limits<I>::max();
if (sizeof(I) == 4) {
#pragma omp parallel for num_threads(nt_)
for (size_t i = 0; i < idx_size; ++i) {
pair_[i].k = ReverseBytes32(blk.index[i]);
pair_[i].i = i;
}
} else if (sizeof(I) == 8) {
#pragma omp parallel for num_threads(nt_)
for (size_t i = 0; i < idx_size; ++i) {
pair_[i].k = ReverseBytes64(blk.index[i]);
pair_[i].i = i;
}
} else {
CHECK(NULL) << "unsupported key type";
}
// sort by key
ParallelSort(&pair_, nt_,
[](const Pair& a, const Pair& b) {return a.k < b.k; });
// save data
CHECK_NOTNULL(uniq_idx);
uniq_idx->clear();
if (idx_frq) idx_frq->clear();
// cnt_max doesn't work for float and double
bool int_cnt = std::is_integral<C>::value;
unsigned cnt_max = static_cast<unsigned>(std::numeric_limits<C>::max());
I curr = pair_[0].k;
unsigned cnt = 0;
for (size_t i = 0; i < pair_.size(); ++i) {
const Pair& v = pair_[i];
if (v.k != curr) {
uniq_idx->push_back(curr);
curr = v.k;
if (idx_frq) {
if (int_cnt) {
idx_frq->push_back(std::min(cnt, cnt_max));
} else {
idx_frq->push_back(static_cast<C>(cnt));
}
}
cnt = 0;
}
++ cnt;
}
uniq_idx->push_back(curr);
if (idx_frq) {
if (int_cnt) {
idx_frq->push_back(std::min(cnt, cnt_max));
} else {
idx_frq->push_back(static_cast<C>(cnt));
}
}
}
template<typename I>
void Localizer<I>::RemapIndex(
const RowBlock<I>& blk, const std::vector<I>& idx_dict,
data::RowBlockContainer<unsigned> *localized) {
if (blk.size == 0 || idx_dict.empty()) return;
CHECK_LT(idx_dict.size(),
static_cast<size_t>(std::numeric_limits<unsigned>::max()));
CHECK_EQ(blk.offset[blk.size], pair_.size());
// build the index mapping
unsigned matched = 0;
std::vector<unsigned> remapped_idx(pair_.size(), 0);
auto cur_dict = idx_dict.cbegin();
auto cur_pair = pair_.cbegin();
while (cur_dict != idx_dict.cend() && cur_pair != pair_.cend()) {
if (*cur_dict < cur_pair->k) {
++ cur_dict;
} else {
if (*cur_dict == cur_pair->k) {
remapped_idx[cur_pair->i]
= static_cast<unsigned>((cur_dict-idx_dict.cbegin()) + 1);
++ matched;
}
++ cur_pair;
}
}
//CHECK_EQ(pair_.size(), matched);
// construct the new rowblock
data::RowBlockContainer<unsigned>* o = localized;
CHECK_NOTNULL(o);
o->offset.resize(blk.size+1); o->offset[0] = 0;
o->index.resize(matched);
if (blk.value) o->value.resize(matched);
size_t k = 0;
for (size_t i = 0; i < blk.size; ++i) {
size_t n = 0;
for (size_t j = blk.offset[i]; j < blk.offset[i+1]; ++j) {
if (remapped_idx[j] == 0) continue;
++ n;
if (blk.value) o->value[k] = blk.value[j];
o->index[k++] = remapped_idx[j] - 1;
}
o->offset[i+1] = o->offset[i] + n;
}
CHECK_EQ(k, matched);
if (blk.label) {
o->label.resize(blk.size);
memcpy(o->label.data(), blk.label, blk.size*sizeof(real_t));
}
if (blk.weight) {
o->weight.resize(blk.size);
memcpy(o->weight.data(), blk.weight, blk.size*sizeof(real_t));
}
o->max_index = idx_dict.size() - 1;
}
}//namespace svdfeature
} // namespace dmlc
|
HYPRE_IJMatrix.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* HYPRE_IJMatrix interface
*
*****************************************************************************/
#include "./_hypre_IJ_mv.h"
#include "../HYPRE.h"
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixCreate
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixCreate( MPI_Comm comm,
HYPRE_BigInt ilower,
HYPRE_BigInt iupper,
HYPRE_BigInt jlower,
HYPRE_BigInt jupper,
HYPRE_IJMatrix *matrix )
{
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
HYPRE_BigInt *info;
HYPRE_Int num_procs;
HYPRE_Int myid;
hypre_IJMatrix *ijmatrix;
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_BigInt row0, col0, rowN, colN;
#else
HYPRE_BigInt *recv_buf;
HYPRE_Int i, i4;
HYPRE_Int square;
#endif
ijmatrix = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST);
hypre_IJMatrixComm(ijmatrix) = comm;
hypre_IJMatrixObject(ijmatrix) = NULL;
hypre_IJMatrixTranslator(ijmatrix) = NULL;
hypre_IJMatrixAssumedPart(ijmatrix) = NULL;
hypre_IJMatrixObjectType(ijmatrix) = HYPRE_UNITIALIZED;
hypre_IJMatrixAssembleFlag(ijmatrix) = 0;
hypre_IJMatrixPrintLevel(ijmatrix) = 0;
hypre_IJMatrixOMPFlag(ijmatrix) = 0;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm, &myid);
if (ilower > iupper+1 || ilower < 0)
{
hypre_error_in_arg(2);
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
if (iupper < -1)
{
hypre_error_in_arg(3);
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
if (jlower > jupper+1 || jlower < 0)
{
hypre_error_in_arg(4);
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
if (jupper < -1)
{
hypre_error_in_arg(5);
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
info = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
row_partitioning = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
col_partitioning = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
row_partitioning[0] = ilower;
row_partitioning[1] = iupper+1;
col_partitioning[0] = jlower;
col_partitioning[1] = jupper+1;
/* now we need the global number of rows and columns as well
as the global first row and column index */
/* proc 0 has the first row and col */
if (myid==0)
{
info[0] = ilower;
info[1] = jlower;
}
hypre_MPI_Bcast(info, 2, HYPRE_MPI_BIG_INT, 0, comm);
row0 = info[0];
col0 = info[1];
/* proc (num_procs-1) has the last row and col */
if (myid == (num_procs-1))
{
info[0] = iupper;
info[1] = jupper;
}
hypre_MPI_Bcast(info, 2, HYPRE_MPI_BIG_INT, num_procs-1, comm);
rowN = info[0];
colN = info[1];
hypre_IJMatrixGlobalFirstRow(ijmatrix) = row0;
hypre_IJMatrixGlobalFirstCol(ijmatrix) = col0;
hypre_IJMatrixGlobalNumRows(ijmatrix) = rowN - row0 + 1;
hypre_IJMatrixGlobalNumCols(ijmatrix) = colN - col0 + 1;
hypre_TFree(info, HYPRE_MEMORY_HOST);
#else
info = hypre_CTAlloc(HYPRE_BigInt, 4, HYPRE_MEMORY_HOST);
recv_buf = hypre_CTAlloc(HYPRE_BigInt, 4*num_procs, HYPRE_MEMORY_HOST);
row_partitioning = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
info[0] = ilower;
info[1] = iupper;
info[2] = jlower;
info[3] = jupper;
/* Generate row- and column-partitioning through information exchange
across all processors, check whether the matrix is square, and
if the partitionings match. i.e. no overlaps or gaps,
if there are overlaps or gaps in the row partitioning or column
partitioning , ierr will be set to -9 or -10, respectively */
hypre_MPI_Allgather(info,4,HYPRE_MPI_BIG_INT,recv_buf,4,HYPRE_MPI_BIG_INT,comm);
row_partitioning[0] = recv_buf[0];
square = 1;
for (i=0; i < num_procs-1; i++)
{
i4 = 4*i;
if ( recv_buf[i4+1] != (recv_buf[i4+4]-1) )
{
hypre_error(HYPRE_ERROR_GENERIC);
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
hypre_TFree(info, HYPRE_MEMORY_HOST);
hypre_TFree(recv_buf, HYPRE_MEMORY_HOST);
hypre_TFree(row_partitioning, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
else
{
row_partitioning[i+1] = recv_buf[i4+4];
}
if ((square && (recv_buf[i4] != recv_buf[i4+2])) ||
(recv_buf[i4+1] != recv_buf[i4+3]) )
{
square = 0;
}
}
i4 = (num_procs-1)*4;
row_partitioning[num_procs] = recv_buf[i4+1]+1;
if ((recv_buf[i4] != recv_buf[i4+2]) || (recv_buf[i4+1] != recv_buf[i4+3]))
{
square = 0;
}
if (square)
{
col_partitioning = row_partitioning;
}
else
{
col_partitioning = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
col_partitioning[0] = recv_buf[2];
for (i=0; i < num_procs-1; i++)
{
i4 = 4*i;
if (recv_buf[i4+3] != recv_buf[i4+6]-1)
{
hypre_error(HYPRE_ERROR_GENERIC);
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
hypre_TFree(info, HYPRE_MEMORY_HOST);
hypre_TFree(recv_buf, HYPRE_MEMORY_HOST);
hypre_TFree(row_partitioning, HYPRE_MEMORY_HOST);
hypre_TFree(col_partitioning, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
else
{
col_partitioning[i+1] = recv_buf[i4+6];
}
}
col_partitioning[num_procs] = recv_buf[num_procs*4-1]+1;
}
hypre_IJMatrixGlobalFirstRow(ijmatrix) = row_partitioning[0];
hypre_IJMatrixGlobalFirstCol(ijmatrix) = col_partitioning[0];
hypre_IJMatrixGlobalNumRows(ijmatrix) = row_partitioning[num_procs] -
row_partitioning[0];
hypre_IJMatrixGlobalNumCols(ijmatrix) = col_partitioning[num_procs] -
col_partitioning[0];
hypre_TFree(info, HYPRE_MEMORY_HOST);
hypre_TFree(recv_buf, HYPRE_MEMORY_HOST);
#endif
hypre_IJMatrixRowPartitioning(ijmatrix) = row_partitioning;
hypre_IJMatrixColPartitioning(ijmatrix) = col_partitioning;
*matrix = (HYPRE_IJMatrix) ijmatrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixDestroy( HYPRE_IJMatrix matrix )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (ijmatrix)
{
if (hypre_IJMatrixRowPartitioning(ijmatrix) ==
hypre_IJMatrixColPartitioning(ijmatrix))
{
hypre_TFree(hypre_IJMatrixRowPartitioning(ijmatrix), HYPRE_MEMORY_HOST);
}
else
{
hypre_TFree(hypre_IJMatrixRowPartitioning(ijmatrix), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_IJMatrixColPartitioning(ijmatrix), HYPRE_MEMORY_HOST);
}
if hypre_IJMatrixAssumedPart(ijmatrix)
{
hypre_AssumedPartitionDestroy((hypre_IJAssumedPart*)hypre_IJMatrixAssumedPart(ijmatrix));
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixDestroyParCSR( ijmatrix );
}
else if ( hypre_IJMatrixObjectType(ijmatrix) != -1 )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
}
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixInitialize( HYPRE_IJMatrix matrix )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixInitializeParCSR( ijmatrix ) ;
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetPrintLevel( HYPRE_IJMatrix matrix,
HYPRE_Int print_level )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_IJMatrixPrintLevel(ijmatrix) = 1;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* This is a helper routine to compute a prefix sum of integer values.
*
* The current implementation is okay for modest numbers of threads.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PrefixSumInt(HYPRE_Int nvals,
HYPRE_Int *vals,
HYPRE_Int *sums)
{
HYPRE_Int j, nthreads, bsize;
nthreads = hypre_NumThreads();
bsize = (nvals + nthreads - 1) / nthreads; /* This distributes the remainder */
if (nvals < nthreads || bsize == 1)
{
sums[0] = 0;
for (j=1; j < nvals; j++)
sums[j] += sums[j-1] + vals[j-1];
}
else
{
/* Compute preliminary partial sums (in parallel) within each interval */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < nvals; j += bsize)
{
HYPRE_Int i, n = hypre_min((j+bsize), nvals);
sums[0] = 0;
for (i = j+1; i < n; i++)
{
sums[i] = sums[i-1] + vals[i-1];
}
}
/* Compute final partial sums (in serial) for the first entry of every interval */
for (j = bsize; j < nvals; j += bsize)
{
sums[j] = sums[j-bsize] + sums[j-1] + vals[j-1];
}
/* Compute final partial sums (in parallel) for the remaining entries */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = bsize; j < nvals; j += bsize)
{
HYPRE_Int i, n = hypre_min((j+bsize), nvals);
for (i = j+1; i < n; i++)
{
sums[i] += sums[j];
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetValues( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
HYPRE_Int *row_indexes;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(5);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
/* Compute row_indexes and call Values2 routine (TODO: add OpenMP)*/
row_indexes = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST);
hypre_PrefixSumInt(nrows, ncols, row_indexes);
HYPRE_IJMatrixSetValues2(matrix, nrows, ncols, rows, row_indexes, cols, values);
hypre_TFree(row_indexes, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetConstantValues( HYPRE_IJMatrix matrix, HYPRE_Complex value)
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
return( hypre_IJMatrixSetConstantValuesParCSR( ijmatrix, value));
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixAddToValues( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
HYPRE_Int *row_indexes;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(5);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
/* Compute row_indexes and call Values2 routine */
row_indexes = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST);
hypre_PrefixSumInt(nrows, ncols, row_indexes);
HYPRE_IJMatrixAddToValues2(matrix, nrows, ncols, rows, row_indexes, cols, values);
hypre_TFree(row_indexes, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetValues2( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!row_indexes)
{
hypre_error_in_arg(5);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(7);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (hypre_IJMatrixOMPFlag(ijmatrix))
{
hypre_IJMatrixSetValuesOMPParCSR(ijmatrix, nrows, ncols, rows, row_indexes, cols, values);
}
else
{
hypre_IJMatrixSetValuesParCSR(ijmatrix, nrows, ncols, rows, row_indexes, cols, values);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixAddToValues2( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!row_indexes)
{
hypre_error_in_arg(5);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(7);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (hypre_IJMatrixOMPFlag(ijmatrix))
{
hypre_IJMatrixAddToValuesOMPParCSR(ijmatrix, nrows, ncols, rows, row_indexes, cols, values);
}
else
{
hypre_IJMatrixAddToValuesParCSR(ijmatrix, nrows, ncols, rows, row_indexes, cols, values);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixAssemble( HYPRE_IJMatrix matrix )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
return( hypre_IJMatrixAssembleParCSR( ijmatrix ) );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetRowCounts( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_BigInt *rows,
HYPRE_Int *ncols )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (!rows)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (!ncols)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixGetRowCountsParCSR( ijmatrix, nrows, rows, ncols );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetValues( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
HYPRE_BigInt *rows,
HYPRE_BigInt *cols,
HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(5);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixGetValuesParCSR( ijmatrix, nrows, ncols,
rows, cols, values );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetObjectType( HYPRE_IJMatrix matrix,
HYPRE_Int type )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_IJMatrixObjectType(ijmatrix) = type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetObjectType( HYPRE_IJMatrix matrix,
HYPRE_Int *type )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*type = hypre_IJMatrixObjectType(ijmatrix);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetLocalRange( HYPRE_IJMatrix matrix,
HYPRE_BigInt *ilower,
HYPRE_BigInt *iupper,
HYPRE_BigInt *jlower,
HYPRE_BigInt *jupper )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
MPI_Comm comm;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
HYPRE_Int my_id;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
comm = hypre_IJMatrixComm(ijmatrix);
row_partitioning = hypre_IJMatrixRowPartitioning(ijmatrix);
col_partitioning = hypre_IJMatrixColPartitioning(ijmatrix);
hypre_MPI_Comm_rank(comm, &my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
*ilower = row_partitioning[0];
*iupper = row_partitioning[1]-1;
*jlower = col_partitioning[0];
*jupper = col_partitioning[1]-1;
#else
*ilower = row_partitioning[my_id];
*iupper = row_partitioning[my_id+1]-1;
*jlower = col_partitioning[my_id];
*jupper = col_partitioning[my_id+1]-1;
#endif
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/**
Returns a pointer to an underlying ijmatrix type used to implement IJMatrix.
Assumes that the implementation has an underlying matrix, so it would not
work with a direct implementation of IJMatrix.
@return integer error code
@param IJMatrix [IN]
The ijmatrix to be pointed to.
*/
HYPRE_Int
HYPRE_IJMatrixGetObject( HYPRE_IJMatrix matrix,
void **object )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*object = hypre_IJMatrixObject( ijmatrix );
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetRowSizes( HYPRE_IJMatrix matrix,
const HYPRE_Int *sizes )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
return( hypre_IJMatrixSetRowSizesParCSR( ijmatrix , sizes ) );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetDiagOffdSizes( HYPRE_IJMatrix matrix,
const HYPRE_Int *diag_sizes,
const HYPRE_Int *offdiag_sizes )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixSetDiagOffdSizesParCSR( ijmatrix, diag_sizes, offdiag_sizes );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetMaxOffProcElmts( HYPRE_IJMatrix matrix,
HYPRE_Int max_off_proc_elmts)
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
return( hypre_IJMatrixSetMaxOffProcElmtsParCSR(ijmatrix,
max_off_proc_elmts) );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixRead( const char *filename,
MPI_Comm comm,
HYPRE_Int type,
HYPRE_IJMatrix *matrix_ptr )
{
HYPRE_IJMatrix matrix;
HYPRE_BigInt ilower, iupper, jlower, jupper;
HYPRE_BigInt I, J;
HYPRE_Int ncols;
HYPRE_Complex value;
HYPRE_Int myid, ret;
char new_filename[255];
FILE *file;
hypre_MPI_Comm_rank(comm, &myid);
hypre_sprintf(new_filename,"%s.%05d", filename, myid);
if ((file = fopen(new_filename, "r")) == NULL)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_fscanf(file, "%b %b %b %b", &ilower, &iupper, &jlower, &jupper);
HYPRE_IJMatrixCreate(comm, ilower, iupper, jlower, jupper, &matrix);
HYPRE_IJMatrixSetObjectType(matrix, type);
HYPRE_IJMatrixInitialize(matrix);
/* It is important to ensure that whitespace follows the index value to help
* catch mistakes in the input file. See comments in IJVectorRead(). */
ncols = 1;
while ( (ret = hypre_fscanf(file, "%b %b%*[ \t]%le", &I, &J, &value)) != EOF )
{
if (ret != 3)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error in IJ matrix input file.");
return hypre_error_flag;
}
if (I < ilower || I > iupper)
{
HYPRE_IJMatrixAddToValues(matrix, 1, &ncols, &I, &J, &value);
}
else
{
HYPRE_IJMatrixSetValues(matrix, 1, &ncols, &I, &J, &value);
}
}
HYPRE_IJMatrixAssemble(matrix);
fclose(file);
*matrix_ptr = matrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixPrint( HYPRE_IJMatrix matrix,
const char *filename )
{
MPI_Comm comm;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
HYPRE_BigInt ilower, iupper, jlower, jupper;
HYPRE_BigInt i, ii;
HYPRE_Int j;
HYPRE_Int ncols;
HYPRE_BigInt *cols;
HYPRE_Complex *values;
HYPRE_Int myid;
char new_filename[255];
FILE *file;
void *object;
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( (hypre_IJMatrixObjectType(matrix) != HYPRE_PARCSR) )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Comm_rank(comm, &myid);
hypre_sprintf(new_filename,"%s.%05d", filename, myid);
if ((file = fopen(new_filename, "w")) == NULL)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
#ifdef HYPRE_NO_GLOBAL_PARTITION
ilower = row_partitioning[0];
iupper = row_partitioning[1] - 1;
jlower = col_partitioning[0];
jupper = col_partitioning[1] - 1;
#else
ilower = row_partitioning[myid];
iupper = row_partitioning[myid+1] - 1;
jlower = col_partitioning[myid];
jupper = col_partitioning[myid+1] - 1;
#endif
hypre_fprintf(file, "%b %b %b %b\n", ilower, iupper, jlower, jupper);
HYPRE_IJMatrixGetObject(matrix, &object);
for (i = ilower; i <= iupper; i++)
{
if ( hypre_IJMatrixObjectType(matrix) == HYPRE_PARCSR )
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
ii = i - hypre_IJMatrixGlobalFirstRow(matrix);
#else
ii = i - row_partitioning[0];
#endif
HYPRE_ParCSRMatrixGetRow((HYPRE_ParCSRMatrix) object,
ii, &ncols, &cols, &values);
for (j = 0; j < ncols; j++)
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
cols[j] += hypre_IJMatrixGlobalFirstCol(matrix);
#else
cols[j] += col_partitioning[0];
#endif
}
}
for (j = 0; j < ncols; j++)
{
hypre_fprintf(file, "%b %b %.14e\n", i, cols[j], values[j]);
}
if ( hypre_IJMatrixObjectType(matrix) == HYPRE_PARCSR )
{
for (j = 0; j < ncols; j++)
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
cols[j] -= hypre_IJMatrixGlobalFirstCol(matrix);
#else
cols[j] -= col_partitioning[0];
#endif
}
HYPRE_ParCSRMatrixRestoreRow((HYPRE_ParCSRMatrix) object,
ii, &ncols, &cols, &values);
}
}
fclose(file);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetOMPFlag( HYPRE_IJMatrix matrix,
HYPRE_Int omp_flag )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_IJMatrixOMPFlag(ijmatrix) = omp_flag;
return hypre_error_flag;
}
|
tmandel2.c | /*
* Sequential Mandelbrot program
*
* This program computes and displays all or part of the Mandelbrot
* set. By default, it examines all points in the complex plane
* that have both real and imaginary parts between -2 and 2.
* Command-line parameters allow zooming in on a specific part of
* this range.
*
* Usage:
* mandel [-i maxiter -c x0 y0 -s size -w windowsize]
* where
* maxiter denotes the maximum number of iterations at each point -- by default 1000
* x0, y0, and size specify the range to examine (a square
* centered at (x0 + iy0) of size 2*size by 2*size -- by default,
* a square of size 4 by 4 centered at the origin)
* windowsize denotes the size of the image (diplay window) to compute
*
* Input: none, except the optional command-line arguments
* Output: a graphical display as described in Wilkinson & Allen,
* displayed using the X Window system, plus text output to
* standard output showing the above parameters, plus execution
* time in seconds.
*
* Code based on the original code from Web site for Wilkinson and Allen's
* text on parallel programming:
* http://www.cs.uncc.edu/~abw/parallel/par_prog/
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <stdlib.h>
#include <unistd.h>
#include <malloc.h>
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6fs\n",(_m), stamp);
/* Default values for things. */
#define N 2 /* size of problem space (x, y from -N to N) */
#define NPIXELS 800 /* size of display window in pixels */
int row, col; // variables used to traverse the problem space
/* Structure definition for complex numbers */
typedef struct {
double real, imag;
} complex;
void mandelbrot(int height,
int width,
double real_min,
double imag_min,
double scale_real,
double scale_imag,
int maxiter,
int ** output)
{
/* Calculate points and save/display */
#pragma omp taskloop num_tasks(16)
for (int row = 0; row < height; ++row) {
//#pragma omp task firstprivate(row)
for (int col = 0; col < width; ++col) {
complex z, c;
z.real = z.imag = 0;
/* Scale display coordinates to actual region */
c.real = real_min + ((double) col * scale_real);
c.imag = imag_min + ((double) (height-1-row) * scale_imag);
/* height-1-row so y axis displays
* with larger values at top
*/
/* Calculate z0, z1, .... until divergence or maximum iterations */
int k = 0;
double lengthsq, temp;
do {
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
++k;
} while (lengthsq < (N*N) && k < maxiter);
//printf("%d %d %d\n", row, col, k);
output[row][col]=k;
}
}
//#pragma omp taskwait
}
int main(int argc, char *argv[]) {
int maxiter = 10000;
double real_min;
double real_max;
double imag_min;
double imag_max;
int width = NPIXELS; /* dimensions of display window */
int height = NPIXELS;
double size=N, x0 = 0, y0 = 0;
FILE *fp = NULL;
int ** output;
double scale_real, scale_imag;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-i")==0) {
maxiter = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-w")==0) {
width = atoi(argv[++i]);
height = width;
}
else if (strcmp(argv[i], "-s")==0) {
size = atof(argv[++i]);
}
else if (strcmp(argv[i], "-o")==0) {
if((fp=fopen("mandel.out", "wb"))==NULL) {
fprintf(stderr, "Unable to open file\n");
return EXIT_FAILURE;
}
}
else if (strcmp(argv[i], "-c")==0) {
x0 = atof(argv[++i]);
y0 = atof(argv[++i]);
}
else {
fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
fprintf(stderr, " -o to write computed image to disk (default no file generated)\n");
fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n");
fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n");
fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n");
fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n");
return EXIT_FAILURE;
}
}
real_min = x0 - size;
real_max = x0 + size;
imag_min = y0 - size;
imag_max = y0 + size;
/* Produce text output */
fprintf(stdout, "\n");
fprintf(stdout, "Mandelbrot program\n");
fprintf(stdout, "center = (%g, %g), size = %g\n",
(real_max + real_min)/2, (imag_max + imag_min)/2,
(real_max - real_min)/2);
fprintf(stdout, "maximum iterations = %d\n", maxiter);
fprintf(stdout, "\n");
output = malloc(height*sizeof(int *));
for (int row = 0; row < height; ++row)
output[row] = malloc(width*sizeof(int));
/* Compute factors to scale computational region to window */
scale_real = (double) (real_max - real_min) / (double) width;
scale_imag = (double) (imag_max - imag_min) / (double) height;
/* Start timing */
double stamp;
START_COUNT_TIME;
#pragma omp parallel
#pragma omp single // uncomment single pragma when testing with the original libgomp
{
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
output);
}
/* End timing */
STOP_COUNT_TIME("Total execution time");
/* Be sure all output is written */
if (fp != NULL)
{
for (int row = 0; row < height; ++row)
if(fwrite(output[row], sizeof(int), width, fp) != width)
fprintf(stderr, "Output file not written correctly\n");
}
}
|
BRKGA.h | /**
* BRKGA.h
*
* This template class encapsulates a Biased Random-key Genetic Algorithm for minimization problems
* with K independent Populations stored in two vectors of Population, current and previous.
* It supports multi-threading via OpenMP, and implements the following key methods:
*
* - BRKGA() constructor: initializes the populations with parameters described below.
* - evolve() operator: evolve each Population following the BRKGA methodology. This method
* supports OpenMP to evolve up to K independent Populations in parallel.
* Please note that double Decoder::decode(...) MUST be thread-safe.
*
* Required parameters:
* - n: number of genes in each chromosome
* - p: number of elements in each population
* - pe: pct of elite items into each population
* - pm: pct of mutants introduced at each generation into the population
* - rhoe: probability that an offspring inherits the allele of its elite parent
*
* Optional parameters:
* - K: number of independent Populations (set to 1 if not supplied)
* - MAX_THREADS: number of threads to perform parallel decoding (set to 1 if not supplied)
* WARNING: Decoder::decode() MUST be thread-safe if MAX_THREADS > 1!
*
* The following objects are required upon declaration:
* RNG: random number generator that implements the methods below.
* - RNG(unsigned long seed) to initialize a new RNG with 'seed'
* - double rand() to return a double precision random deviate in range [0,1)
* - unsigned long randInt() to return a >=32-bit unsigned random deviate in range [0,2^32-1)
* - unsigned long randInt(N) to return a unsigned random deviate in range [0, N] with N < 2^32
*
* Decoder: problem-specific decoder that implements any of the decode methods outlined below. When
* compiling and linking BRKGA with -fopenmp (i.e., with multithreading support via
* OpenMP), the method must be thread-safe.
* - double decode(const vector< double >& chromosome) const, if you don't want to change
* chromosomes inside the framework, or
* - double decode(vector< double >& chromosome) const, if you'd like to update a chromosome.
* WARNING: even though both methods use const correctness to enforce that they are thread safe
* the use of mutable within the Decoder class could void such a feature! In other
* words, DO NOT use mutable within the decoder.
*
* Created on : Jun 22, 2010 by rtoso
* Last update: Sep 15, 2011 by rtoso
* Authors : Rodrigo Franco Toso <rtoso@cs.rutgers.edu>
* Mauricio G.C. Resende <mgcr@research.att.com>
* Copyright 2010, 2011 Rodrigo Franco Toso and Mauricio G.C. Resende.
*
* This file is part of the BRKGA API.
*
* The BRKGA API is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The BRKGA API is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with the BRKGA API. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef BRKGA_H
#define BRKGA_H
//#include <omp.h>
#include <algorithm>
#include <exception>
#include <stdexcept>
#include "Population.h"
template< class Decoder, class RNG >
class BRKGA {
public:
/*
* Default constructor
* Required hyperparameters:
* - n: number of genes in each chromosome
* - p: number of elements in each population
* - pe: pct of elite items into each population
* - pm: pct of mutants introduced at each generation into the population
* - rhoe: probability that an offspring inherits the allele of its elite parent
*
* Optional parameters:
* - K: number of independent Populations
* - MAX_THREADS: number of threads to perform parallel decoding
* WARNING: Decoder::decode() MUST be thread-safe; safe if implemented as
* + double Decoder::decode(std::vector< double >& chromosome) const
*/
BRKGA(unsigned n, unsigned p, double pe, double pm, double rhoe, const Decoder& refDecoder,
RNG& refRNG, unsigned K = 1, unsigned MAX_THREADS = 1) throw(std::range_error);
/**
* Destructor
*/
~BRKGA();
/**
* Resets all populations with brand new keys
*/
void reset();
/**
* Evolve the current populations following the guidelines of BRKGAs
* @param generations number of generations (must be even and nonzero)
* @param J interval to exchange elite chromosomes (must be even; 0 ==> no synchronization)
* @param M number of elite chromosomes to select from each population in order to exchange
*/
void evolve(unsigned generations = 1);
/**
* Exchange elite-solutions between the populations
* @param M number of elite chromosomes to select from each population
*/
void exchangeElite(unsigned M) throw(std::range_error);
/**
* Returns the current population
*/
const Population& getPopulation(unsigned k = 0) const;
/**
* Returns the chromosome with best fitness so far among all populations
*/
const std::vector< double >& getBestChromosome() const;
/**
* Returns the best fitness found so far among all populations
*/
double getBestFitness() const;
// Return copies to the internal parameters:
unsigned getN() const;
unsigned getP() const;
unsigned getPe() const;
unsigned getPm() const;
unsigned getPo() const;
double getRhoe() const;
unsigned getK() const;
unsigned getMAX_THREADS() const;
private:
// I don't see any reason to pimpl the internal methods and data, so here they are:
// Hyperparameters:
const unsigned n; // number of genes in the chromosome
const unsigned p; // number of elements in the population
const unsigned pe; // number of elite items in the population
const unsigned pm; // number of mutants introduced at each generation into the population
const double rhoe; // probability that an offspring inherits the allele of its elite parent
// Templates:
RNG& refRNG; // reference to the random number generator
const Decoder& refDecoder; // reference to the problem-dependent Decoder
// Parallel populations parameters:
const unsigned K; // number of independent parallel populations
const unsigned MAX_THREADS; // number of threads for parallel decoding
// Data:
std::vector< Population* > previous; // previous populations
std::vector< Population* > current; // current populations
// Local operations:
void initialize(const unsigned i); // initialize current population 'i' with random keys
void evolution(Population& curr, Population& next);
bool isRepeated(const std::vector< double >& chrA, const std::vector< double >& chrB) const;
};
template< class Decoder, class RNG >
BRKGA< Decoder, RNG >::BRKGA(unsigned _n, unsigned _p, double _pe, double _pm, double _rhoe,
const Decoder& decoder, RNG& rng, unsigned _K, unsigned MAX) throw(std::range_error) :
n(_n), p(_p), pe(unsigned(_pe * p)), pm(unsigned(_pm * p)), rhoe(_rhoe), refRNG(rng),
refDecoder(decoder), K(_K), MAX_THREADS(MAX), previous(K, 0), current(K, 0) {
// Error check:
using std::range_error;
if(n == 0) { throw range_error("Chromosome size equals zero."); }
if(p == 0) { throw range_error("Population size equals zero."); }
if(pe == 0) { throw range_error("Elite-set size equals zero."); }
if(pe > p) { throw range_error("Elite-set size greater than population size (pe > p)."); }
if(pm > p) { throw range_error("Mutant-set size (pm) greater than population size (p)."); }
if(pe + pm > p) { throw range_error("elite + mutant sets greater than population size (p)."); }
if(K == 0) { throw range_error("Number of parallel populations cannot be zero."); }
// Initialize and decode each chromosome of the current population, then copy to previous:
for(unsigned i = 0; i < K; ++i) {
// Allocate:
current[i] = new Population(n, p);
// Initialize:
initialize(i);
// Then just copy to previous:
previous[i] = new Population(*current[i]);
}
}
template< class Decoder, class RNG >
BRKGA< Decoder, RNG >::~BRKGA() {
for(unsigned i = 0; i < K; ++i) { delete current[i]; delete previous[i]; }
}
template< class Decoder, class RNG >
const Population& BRKGA< Decoder, RNG >::getPopulation(unsigned k) const {
#ifdef RANGECHECK
if(k >= K) { throw std::range_error("Invalid population identifier."); }
#endif
return (*current[k]);
}
template< class Decoder, class RNG >
double BRKGA< Decoder, RNG >::getBestFitness() const {
double best = current[0]->fitness[0].first;
for(unsigned i = 1; i < K; ++i) {
if(current[i]->fitness[0].first < best) { best = current[i]->fitness[0].first; }
}
return best;
}
template< class Decoder, class RNG >
const std::vector< double >& BRKGA< Decoder, RNG >::getBestChromosome() const {
unsigned bestK = 0;
for(unsigned i = 1; i < K; ++i) {
if( current[i]->getBestFitness() < current[bestK]->getBestFitness() ) { bestK = i; }
}
return current[bestK]->getChromosome(0); // The top one :-)
}
template< class Decoder, class RNG >
void BRKGA< Decoder, RNG >::reset() {
for(unsigned i = 0; i < K; ++i) { initialize(i); }
}
template< class Decoder, class RNG >
void BRKGA< Decoder, RNG >::evolve(unsigned generations) {
#ifdef RANGECHECK
if(generations == 0) { throw std::range_error("Cannot evolve for 0 generations."); }
#endif
for(unsigned i = 0; i < generations; ++i) {
for(unsigned j = 0; j < K; ++j) {
evolution(*current[j], *previous[j]); // First evolve the population (curr, next)
std::swap(current[j], previous[j]); // Update (prev = curr; curr = prev == next)
}
}
}
template< class Decoder, class RNG >
void BRKGA< Decoder, RNG >::exchangeElite(unsigned M) throw(std::range_error) {
#ifdef RANGECHECK
if(M == 0 || M >= p) { throw std::range_error("M cannot be zero or >= p."); }
#endif
for(unsigned i = 0; i < K; ++i) {
// Population i will receive some elite members from each Population j below:
unsigned dest = p - 1; // Last chromosome of i (will be updated below)
for(unsigned j = 0; j < K; ++j) {
if(j == i) { continue; }
// Copy the M best of Population j into Population i:
for(unsigned m = 0; m < M; ++m) {
// Copy the m-th best of Population j into the 'dest'-th position of Population i:
const std::vector< double >& bestOfJ = current[j]->getChromosome(m);
std::copy(bestOfJ.begin(), bestOfJ.end(), current[i]->getChromosome(dest).begin());
current[i]->fitness[dest].first = current[j]->fitness[m].first;
--dest;
}
}
}
for(int j = 0; j < int(K); ++j) { current[j]->sortFitness(); }
}
template< class Decoder, class RNG >
inline void BRKGA< Decoder, RNG >::initialize(const unsigned i) {
for(unsigned j = 0; j < p; ++j) {
for(unsigned k = 0; k < n; ++k) { (*current[i])(j, k) = refRNG.rand(); }
}
// Decode:
#ifdef _OPENMP
#pragma omp parallel for num_threads(MAX_THREADS)
#endif
for(int j = 0; j < int(p); ++j) {
current[i]->setFitness(j, refDecoder.decode((*current[i])(j)) );
}
// Sort:
current[i]->sortFitness();
}
template< class Decoder, class RNG >
inline void BRKGA< Decoder, RNG >::evolution(Population& curr, Population& next) {
// We now will set every chromosome of 'current', iterating with 'i':
unsigned i = 0; // Iterate chromosome by chromosome
unsigned j = 0; // Iterate allele by allele
// 2. The 'pe' best chromosomes are maintained, so we just copy these into 'current':
while(i < pe) {
for(j = 0 ; j < n; ++j) { next(i,j) = curr(curr.fitness[i].second, j); }
next.fitness[i].first = curr.fitness[i].first;
next.fitness[i].second = i;
++i;
}
// 3. We'll mate 'p - pe - pm' pairs; initially, i = pe, so we need to iterate until i < p - pm:
while(i < p - pm) {
// Select an elite parent:
const unsigned eliteParent = (refRNG.randInt(pe - 1));
// Select a non-elite parent:
const unsigned noneliteParent = pe + (refRNG.randInt(p - pe - 1));
// Mate:
for(j = 0; j < n; ++j) {
const unsigned& sourceParent = ((refRNG.rand() < rhoe) ? eliteParent : noneliteParent);
next(i, j) = curr(curr.fitness[sourceParent].second, j);
}
++i;
}
// We'll introduce 'pm' mutants:
while(i < p) {
for(j = 0; j < n; ++j) { next(i, j) = refRNG.rand(); }
++i;
}
// Time to compute fitness, in parallel:
#ifdef _OPENMP
#pragma omp parallel for num_threads(MAX_THREADS)
#endif
for(int i = int(pe); i < int(p); ++i) {
next.setFitness( i, refDecoder.decode(next.population[i]) );
}
// Now we must sort 'current' by fitness, since things might have changed:
next.sortFitness();
}
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getN() const { return n; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getP() const { return p; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getPe() const { return pe; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getPm() const { return pm; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getPo() const { return p - pe - pm; }
template< class Decoder, class RNG >
double BRKGA<Decoder, RNG>::getRhoe() const { return rhoe; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getK() const { return K; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getMAX_THREADS() const { return MAX_THREADS; }
#endif
|
thdat105.c | /*
* Redistribution and use in source and binary forms, with
* or without modification, are permitted provided that the
* following conditions are met:
*
* 1. Redistributions of source code must retain this list
* of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce this
* list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
#include <config.h>
#include <stdlib.h>
#include <thtk/thtk.h>
#include "thcrypt105.h"
#include "thdat.h"
#include "util.h"
typedef struct {
uint16_t entry_count;
uint32_t size;
} th105_archive_header_t;
static int
th105_open(
thdat_t* thdat,
thtk_error_t** error)
{
th105_archive_header_t header;
uint16_t entry_count;
uint32_t header_size;
if (thtk_io_read(thdat->stream, &entry_count, 2, error) == -1)
return 0;
if (thtk_io_read(thdat->stream, &header_size, 4, error) == -1)
return 0;
header.entry_count = entry_count;
header.size = header_size;
unsigned char* header_buf = malloc(header_size);
if (thtk_io_read(thdat->stream, header_buf, header_size, error) !=
header_size)
return 0;
th_crypt105_list(header_buf, header_size, 0xc5, 0x83, 0x53);
thdat->entry_count = entry_count;
thdat->entries = calloc(entry_count, sizeof(thdat_entry_t));
if (header.entry_count) {
unsigned char* ptr = header_buf;
for (uint16_t i = 0; i < entry_count; ++i) {
thdat_entry_t* entry = thdat->entries + i;
thdat_entry_init(entry);
entry->offset = *((uint32_t*)ptr);
ptr += 4;
entry->size = *((uint32_t*)ptr);
ptr += 4;
// zsize and extra are not used.
unsigned char name_length = *(ptr++);
strncpy(entry->name, (char*)ptr, name_length);
ptr += name_length;
}
}
free(header_buf);
return 1;
}
static void
th105_decrypt_data(
thdat_t* archive,
thdat_entry_t* entry,
unsigned char* data)
{
(void)archive;
th_crypt105_file(data, entry->size, entry->offset);
}
static ssize_t
th105_read(
thdat_t* thdat,
int entry_index,
thtk_io_t* output,
thtk_error_t** error)
{
thdat_entry_t* entry = thdat->entries + entry_index;
unsigned char* data = malloc(entry->size);
int failed = 0;
#pragma omp critical
{
failed = (thtk_io_seek(thdat->stream, entry->offset, SEEK_SET, error) == -1) ||
(thtk_io_read(thdat->stream, data, entry->size, error) != entry->size);
}
if (failed)
return -1;
th105_decrypt_data(thdat, entry, data);
if (thtk_io_write(output, data, entry->size, error) == -1)
return -1;
free(data);
return 1;
}
static int
th105_create(
thdat_t* thdat,
thtk_error_t** error)
{
// entry list is given
off_t size = 6;
unsigned int i;
for (i = 0; i < thdat->entry_count; ++i) {
const thdat_entry_t* entry = thdat->entries + i;
const size_t namelen = strlen(entry->name);
size += 8; // for offset and size
size += (1 + namelen); // for name
}
thdat->offset = size;
if (thtk_io_seek(thdat->stream, thdat->offset, SEEK_SET, error) == -1)
return 0;
return 1;
}
static void
th105_encrypt_data(
thdat_t* archive,
thdat_entry_t* entry,
unsigned char* data)
{
th_crypt105_file(data, entry->size, archive->offset);
}
static ssize_t
th105_write(
thdat_t* thdat,
int entry_index,
thtk_io_t* input,
size_t input_length,
thtk_error_t** error)
{
thdat_entry_t* entry = thdat->entries + entry_index;
unsigned char* data;
entry->size = input_length;
data = malloc(entry->size);
if (thtk_io_seek(input, 0, SEEK_SET, error) == -1)
return -1;
int ret = thtk_io_read(input, data, entry->size, error);
if (ret != entry->size)
return -1;
th105_encrypt_data(thdat, entry, data);
int failed = 0;
#pragma omp critical
{
failed = (thtk_io_write(thdat->stream, data, entry->size, error) != entry->size);
if (!failed) {
entry->offset = thdat->offset;
thdat->offset += entry->size;
}
}
free(data);
if (failed)
return -1;
return entry->size;
}
static int
th105_close(
thdat_t* thdat,
thtk_error_t** error)
{
unsigned char* buffer;
uint16_t entry_count = thdat->entry_count;
uint32_t header_size = 0;
for (unsigned i = 0; i < entry_count; ++i) {
const size_t namelen = strlen(thdat->entries[i].name);
header_size += 9 + namelen;
}
if (header_size == 0) {
thtk_error_new(error, "no entries");
return 0;
}
buffer = malloc(header_size);
unsigned char* buffer_ptr = buffer;
for (unsigned i = 0; i < entry_count; i++) {
uint32_t* buffer_ptr_32 = (uint32_t*) buffer_ptr;
const thdat_entry_t* entry = thdat->entries + i;
const uint8_t namelen = strlen(entry->name);
*(buffer_ptr_32++) = entry->offset;
*(buffer_ptr_32++) = entry->size;
buffer_ptr = (unsigned char*) buffer_ptr_32;
*(buffer_ptr++) = namelen;
buffer_ptr = MEMPCPY(buffer_ptr, entry->name, namelen);
}
th_crypt105_list(buffer, header_size, 0xc5, 0x83, 0x53);
if (thtk_io_seek(thdat->stream, 0, SEEK_SET, error) == -1)
return 0;
if (thtk_io_write(thdat->stream, &entry_count, 2, error) == -1)
return 0;
if (thtk_io_write(thdat->stream, &header_size, 4, error) == -1)
return 0;
if (thtk_io_write(thdat->stream, buffer, header_size, error) == -1)
return 0;
free(buffer);
return 1;
}
const thdat_module_t archive_th105 = {
0,
th105_open,
th105_create,
th105_close,
th105_read,
th105_write
};
|
GB_unop__identity_uint32_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint32_int8)
// op(A') function: GB (_unop_tran__identity_uint32_int8)
// C type: uint32_t
// A type: int8_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = (uint32_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint32_int8)
(
uint32_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint32_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
critical-1.c | int i;
void
foo (void)
{
#pragma omp critical
i = i + 1;
#pragma omp critical (foo)
i = i + 1;
#pragma omp critical (foo) hint (0)
i = i + 1;
#pragma omp critical (foo),hint(1)
i = i + 1;
}
|
demo.c | // openmp program that solves a random system of up to 64 eqs in up to 64 vars.
#include <assert.h>
#include <sys/time.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <err.h>
#include <omp.h>
#include "feslite.h"
/* Try to solve a large system as fast as possible. */
int n = 45;
static inline int idxq(int i, int j)
{
return j * (j - 1) / 2 + i;
}
u32 eval32(int n, const u32 * Fq, const u32 * Fl, int stride, u32 x)
{
// first expand the values of the variables from `x`
u32 v[32];
for (int k = 0; k < n; k++) {
v[k] = (x & 0x0001) ? 0xffffffff : 0x00000000;
x >>= 1;
}
u32 y = Fl[0];
for (int idx_0 = 0; idx_0 < n; idx_0++) {
// computes the contribution of degree-1 terms
u32 v_0 = v[idx_0];
u32 l = Fl[stride * (1 + idx_0)]; // FIXME : get rid of this multiplication
y ^= l & v_0;
for (int idx_1 = 0; idx_1 < idx_0; idx_1++) {
// computes the contribution of degree-2 terms
u32 v_1 = v_0 & v[idx_1];
u32 q = Fq[idxq(idx_1, idx_0)];
y ^= q & v_1;
}
}
return y;
}
u64 eval64(int n, const u64 * Q, const u64 * L, u64 x)
{
u64 v[64];
for (int k = 0; k < n; k++) {
v[k] = (x & 1) ? 0xffffffffffffffffull : 0;
x >>= 1ull;
}
assert(x == 0);
u64 y = L[0];
for (int i = 0; i < n; i++) {
// computes the contribution of degree-1 terms
y ^= L[1 + i] & v[i];
for (int j = 0; j < i; j++)
y ^= Q[idxq(j, i)] & v[i] & v[j];
}
return y;
}
u64 rand64()
{
return ((u64) lrand48()) ^ (((u64) lrand48()) << 32ull);
}
u32 Fq[2016];
u64 Fq_start[2016];
u64 Fl_start[65];
#define MAX_BATCH_SIZE 64
struct bundle_t {
int i;
u32 prefixes[MAX_BATCH_SIZE];
u32 Fl[];
};
int m;
struct bundle_t *current_bundle;
int in_flight, created, solved;
void fresh_bundle()
{
current_bundle = malloc(sizeof(struct bundle_t) + 33*m*sizeof(u32));
if (current_bundle == NULL)
err(1, "impossible to allocate new bundle");
current_bundle->i = 0;
#pragma omp atomic
in_flight++;
}
void process_bundle(struct bundle_t *ready_bundle)
{
/* solve ready bundle */
int count = 256;
u32 buffer[count * m];
int size[m];
feslite_solve(32, m, Fq, ready_bundle->Fl, count, buffer, size);
// check against remaining equations, print
for (int i = 0; i < m; i++)
for (int j = 0; j < size[i]; j++) {
u32 x = buffer[count*i + j];
u32 y = eval32(32, Fq, ready_bundle->Fl + i, m, x);
assert(y == 0);
u64 p = ready_bundle->prefixes[i];
u64 u = x ^ (p << 32);
u64 v = eval64(n, Fq_start, Fl_start, u);
if (v == 0)
printf("\nfound %016" PRIx64 "\n", u);
}
/* free ready bundle */
free(ready_bundle);
#pragma omp atomic
solved += m;
#pragma omp atomic
in_flight--;
#pragma omp critical
{
printf("\rcreated: %d\t Solved: %d\t In-flight: %d ", created, solved, in_flight);
fflush(stdout);
}
}
/* push a system to the current bundle */
void push(const u32 * Fl, u32 prefix)
{
created++;
/* copy to current bundle */
current_bundle->prefixes[current_bundle->i] = prefix;
for (int j = 0; j < 33; j++)
current_bundle->Fl[m *j + current_bundle->i] = Fl[j];
current_bundle->i += 1;
/* bundle full? */
if (current_bundle->i == m) {
/* prepare new bundle */
struct bundle_t *ready_bundle = current_bundle;
fresh_bundle();
#pragma omp task
process_bundle(ready_bundle);
}
}
void specialize(int n, const u32 * Fl, u32 prefix)
{
if (n == 32) {
push(Fl, prefix);
return;
}
/* specialize last variable to zero : do nothing! */
specialize(n-1, Fl, prefix << 1);
/* specialize last variable to one */
u32 Fl_[n];
for (int i = 0; i < n-1; i++)
Fl_[i + 1] = Fl[i + 1] ^ Fq[idxq(i, n-1)];
Fl_[0] = Fl[0] ^ Fl[n];
specialize(n-1, Fl_, (prefix << 1) ^ 1);
}
int main(int argc, char **argv)
{
if (argc > 1)
n = atoi(argv[1]);
m = feslite_preferred_batch_size();
printf("n = %d\n", n);
int kernel = feslite_default_kernel();
const char *name = feslite_kernel_name(kernel);
printf("Using kernel %s, %d lane(s)...\n", name, m);
srand48(1337);
/* initalize a random system */
int N = idxq(0, n);
for (int i = 0; i < N; i++)
Fq_start[i] = rand64();
for (int i = 0; i < n+1; i++)
Fl_start[i] = rand64();
u64 x = rand64() & ((1ull << n) - 1); /* designated solution */
Fl_start[0] ^= eval64(n, Fq_start, Fl_start, x);
assert(0 == eval64(n, Fq_start, Fl_start, x));
printf("Planted: %016" PRIx64 "\n", x);
/* create the truncated 32 bits version */
u32 Fl[65];
for (int i = 0; i < N; i++)
Fq[i] = Fq_start[i] & 0xffffffff;
for (int i = 0; i < n+1; i++)
Fl[i] = Fl_start[i] & 0xffffffff;
fresh_bundle();
double start_wt = omp_get_wtime();
#pragma omp parallel
#pragma omp single
specialize(n, Fl, 0);
double stop_wt = omp_get_wtime();
double seconds = stop_wt - start_wt;
double rate = n - log2(seconds);
printf("\t---> %.2f s\n", seconds);
printf("\t---> 2^%.2f candidate/s\n", rate);
return EXIT_SUCCESS;
} |
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImages() compares one or more pixel channels of an image to a
% reconstructed image and returns the difference image.
%
% The format of the CompareImages method is:
%
% Image *CompareImages(const Image *image,const Image *reconstruct_image,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
register ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) != 0)
channels++;
}
return(channels == 0 ? (size_t) 1 : channels);
}
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
const char
*artifact;
double
fuzz;
Image
*clone_image,
*difference_image,
*highlight_image;
MagickBooleanType
status;
PixelInfo
highlight,
lowlight,
masklight;
RectangleInfo
geometry;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageDistortion(image,reconstruct_image,metric,distortion,
exception);
if (status == MagickFalse)
return((Image *) NULL);
columns=MagickMax(image->columns,reconstruct_image->columns);
rows=MagickMax(image->rows,reconstruct_image->rows);
SetGeometry(image,&geometry);
geometry.width=columns;
geometry.height=rows;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception);
difference_image=ExtentImage(clone_image,&geometry,exception);
clone_image=DestroyImage(clone_image);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception);
highlight_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
status=SetImageStorageClass(highlight_image,DirectClass,exception);
if (status == MagickFalse)
{
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception);
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception);
(void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception);
artifact=GetImageArtifact(image,"compare:highlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception);
(void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception);
artifact=GetImageArtifact(image,"compare:lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception);
(void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception);
artifact=GetImageArtifact(image,"compare:masklight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception);
/*
Generate difference image.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,highlight_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p,
*magick_restrict q;
register Quantum
*magick_restrict r;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) ||
(r == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickStatusType
difference;
register ssize_t
i;
if ((GetPixelReadMask(image,p) == 0) ||
(GetPixelReadMask(reconstruct_image,q) == 0))
{
SetPixelViaPixelInfo(highlight_image,&masklight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
if ((distance*distance) > fuzz)
{
difference=MagickTrue;
break;
}
}
if (difference == MagickFalse)
SetPixelViaPixelInfo(highlight_image,&lowlight,r);
else
SetPixelViaPixelInfo(highlight_image,&highlight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,highlight_image,image->compose,
MagickTrue,0,0,exception);
(void) SetImageAlphaChannel(difference_image,OffAlphaChannel,exception);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortion() compares one or more pixel channels of an image to a
% reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageDistortion method is:
%
% MagickBooleanType GetImageDistortion(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
fuzz;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickBooleanType
difference;
register ssize_t
i;
if (GetPixelWriteMask(image,p) == 0)
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
if ((distance*distance) > fuzz)
{
channel_distortion[i]++;
difference=MagickTrue;
}
}
if (difference != MagickFalse)
channel_distortion[CompositePixelChannel]++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) == 0) ||
(GetPixelReadMask(reconstruct_image,q) == 0))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) == 0) ||
(GetPixelReadMask(reconstruct_image,q) == 0))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=QuantumScale*fabs(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance;
channel_distortion[CompositePixelChannel]+=distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
double
area,
maximum_error,
mean_error;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) == 0) ||
(GetPixelReadMask(reconstruct_image,q) == 0))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q));
distortion[i]+=distance;
distortion[CompositePixelChannel]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=distortion[CompositePixelChannel]/area;
image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) == 0) ||
(GetPixelReadMask(reconstruct_image,q) == 0))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=GetImageChannels(image);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,double *distortion,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
double
area;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageStatistics(image,exception);
reconstruct_statistics=GetImageStatistics(reconstruct_image,exception);
if ((image_statistics == (ChannelStatistics *) NULL) ||
(reconstruct_statistics == (ChannelStatistics *) NULL))
{
if (image_statistics != (ChannelStatistics *) NULL)
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
if (reconstruct_statistics != (ChannelStatistics *) NULL)
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
if ((GetPixelReadMask(image,p) == 0) ||
(GetPixelReadMask(reconstruct_image,q) == 0))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
area=PerceptibleReciprocal(area);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
if ((GetPixelReadMask(image,p) == 0) ||
(GetPixelReadMask(reconstruct_image,q) == 0))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*(image->alpha_trait != UndefinedPixelTrait ?
GetPixelAlpha(image,p) : OpaqueAlpha);
Da=QuantumScale*(reconstruct_image->alpha_trait != UndefinedPixelTrait ?
GetPixelAlpha(reconstruct_image,q) : OpaqueAlpha);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
{
distortion[i]+=area*QuantumScale*(p[i]-
image_statistics[channel].mean)*(GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
else
{
distortion[i]+=area*QuantumScale*(Sa*p[i]-
image_statistics[channel].mean)*(Da*GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SimilarityImageTag,progress++,rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
distortion[CompositePixelChannel]=0.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma;
PixelChannel channel=GetPixelChannelChannel(image,i);
gamma=image_statistics[channel].standard_deviation*
reconstruct_statistics[channel].standard_deviation;
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
distortion[CompositePixelChannel]+=distortion[i]*distortion[i];
}
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/
GetImageChannels(image));
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) == 0) ||
(GetPixelReadMask(reconstruct_image,q) == 0))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=QuantumScale*fabs(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
if (distance > channel_distortion[i])
channel_distortion[i]=distance;
if (distance > channel_distortion[CompositePixelChannel])
channel_distortion[CompositePixelChannel]=distance;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
if (channel_distortion[j] > distortion[j])
distortion[j]=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
if (fabs(distortion[i]) < MagickEpsilon)
distortion[i]=INFINITY;
else
distortion[i]=20.0*MagickLog10(1.0/sqrt(distortion[i]));
return(status);
}
static MagickBooleanType GetPerceptualHashDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
ChannelPerceptualHash
*channel_phash,
*reconstruct_phash;
const char
*artifact;
MagickBooleanType
normalize;
ssize_t
channel;
/*
Compute perceptual hash in the sRGB colorspace.
*/
channel_phash=GetImagePerceptualHash(image,exception);
if (channel_phash == (ChannelPerceptualHash *) NULL)
return(MagickFalse);
reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception);
if (reconstruct_phash == (ChannelPerceptualHash *) NULL)
{
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
channel_phash);
return(MagickFalse);
}
artifact=GetImageArtifact(image,"phash:normalize");
normalize=(artifact == (const char *) NULL) ||
(IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4)
#endif
for (channel=0; channel < MaxPixelChannels; channel++)
{
double
difference;
register ssize_t
i;
difference=0.0;
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
double
alpha,
beta;
register ssize_t
j;
for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++)
{
alpha=channel_phash[channel].phash[j][i];
beta=reconstruct_phash[channel].phash[j][i];
if (normalize == MagickFalse)
difference+=(beta-alpha)*(beta-alpha);
else
difference=sqrt((beta-alpha)*(beta-alpha)/
channel_phash[0].number_channels);
}
}
distortion[channel]+=difference;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPerceptualHashDistortion)
#endif
distortion[CompositePixelChannel]+=difference;
}
/*
Free resources.
*/
reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
reconstruct_phash);
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash);
return(MagickTrue);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=sqrt(distortion[i]);
return(status);
}
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_distortion,0,length*
sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositePixelChannel];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
(void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(),
*distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortions() compares the pixel channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageDistortions method is:
%
% double *GetImageDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() compare the pixels of two images and returns immediately
% if any pixel is not identical.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(const Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImagesEqual(const Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) == 0)
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q));
if (distance >= MagickEpsilon)
break;
}
if (i < (ssize_t) GetPixelChannels(image))
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (x < (ssize_t) columns)
break;
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r M e t r i c %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorMetric() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the SetImageColorMetric method is:
%
% MagickBooleanType SetImageColorMetric(Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorMetric(Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area,
maximum_error,
mean_error,
mean_error_per_pixel;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) == 0)
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q));
if (distance >= MagickEpsilon)
{
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
}
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area);
image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale*
mean_error/area);
image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error);
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% const MetricType metric,const double similarity_threshold,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o metric: the metric.
%
% o similarity_threshold: minimum distortion for (sub)image match.
%
% o offset: the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
similarity_image=DestroyImage(similarity_image);
if (status == MagickFalse)
return(0.0);
return(distortion);
}
MagickExport Image *SimilarityImage(const Image *image,const Image *reference,
const MetricType metric,const double similarity_threshold,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=MagickMaximumValue;
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(similarity_image,DirectClass,exception);
if (status == MagickFalse)
{
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel,
exception);
/*
Measure similarity of reference image against image.
*/
status=MagickTrue;
progress=0;
similarity_view=AcquireAuthenticCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
shared(progress,status,similarity_metric) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
break;
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if ((metric == NormalizedCrossCorrelationErrorMetric) ||
(metric == UndefinedErrorMetric))
similarity=1.0-similarity;
if (similarity < *similarity_metric)
{
offset->x=x;
offset->y=y;
*similarity_metric=similarity;
}
if (metric == PerceptualHashErrorMetric)
similarity=MagickMin(0.01*similarity,1.0);
if (GetPixelWriteMask(similarity_image,q) == 0)
{
SetPixelBackgoundColor(similarity_image,q);
q+=GetPixelChannels(similarity_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(similarity_traits == UndefinedPixelTrait) ||
((similarity_traits & UpdatePixelTrait) == 0))
continue;
SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange-
QuantumRange*similarity),q);
}
q+=GetPixelChannels(similarity_image);
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
proceed=SetImageProgress(image,SimilarityImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <inttypes.h>
#include <assert.h>
#ifdef __APPLE__
#include <OpenCL/opencl.h>
#else
#include <CL/cl.h>
#endif
#define MAX_GPU (0x8)
#define MAX_SOURCE_SIZE (0x100000)
#define DEVICENUM (0x2)
#define MAX_INPUT (20000)
int main(void) {
// Create the two input vectors
int CHUNKSIZE = 2 << 7;
long long int MAX_SIZE = (2LL << 29 + 1);
//fprintf(stderr, "%lld\n", MAX_SIZE);
long long int MAX_GROUPS = (MAX_SIZE/CHUNKSIZE);
//fprintf(stderr, "%lld\n", MAX_GROUPS);
int LOCAL_SIZE = 512;
uint32_t ans[MAX_INPUT];
int count = 0;
int N[MAX_INPUT];
uint32_t key1[MAX_INPUT];
uint32_t key2[MAX_INPUT];
// Load the kernel source code into the array source_str
FILE *fp;
char *source_str;
size_t source_size;
fp = fopen("vecdot.cl", "r");
if (!fp) {
fprintf(stderr, "Failed to load kernel.\n");
exit(1);
}
source_str = (char*)malloc(MAX_SOURCE_SIZE);
source_size = fread( source_str, 1, MAX_SOURCE_SIZE, fp);
fclose( fp );
// Get platform and device information
cl_platform_id platform_id = NULL;
cl_device_id device_id[MAX_GPU];
cl_uint ret_num_devices;
cl_uint ret_num_platforms;
cl_int ret = clGetPlatformIDs(1, &platform_id, &ret_num_platforms);
ret = clGetDeviceIDs( platform_id, CL_DEVICE_TYPE_GPU, MAX_GPU,
device_id, &ret_num_devices);
//printf("%d\n", ret_num_devices);
assert(ret == CL_SUCCESS && ret_num_devices >= DEVICENUM);
int tmp_n;
uint32_t tmp_key1, tmp_key2;
while (scanf("%d %" PRIu32 " %" PRIu32, &tmp_n, &tmp_key1, &tmp_key2) == 3) {
N[count] = tmp_n;
key1[count] = tmp_key1;
key2[count++] = tmp_key2;
}
#pragma omp parallel for
for (int device = 0; device < DEVICENUM; device++) {
// Create an OpenCL context
cl_context context = clCreateContext( NULL, 1, device_id+device, NULL, NULL, &ret);
assert(ret == CL_SUCCESS);
// Create a command queue
cl_command_queue command_queue = clCreateCommandQueue(context, device_id[device], 0, &ret);
assert(ret == CL_SUCCESS);
// Create a program from the kernel source
cl_program program = clCreateProgramWithSource(context, 1,
(const char **)&source_str, (const size_t *)&source_size, &ret);
assert(ret == CL_SUCCESS);
// Build the program
ret = clBuildProgram(program, 1, device_id+device, NULL, NULL, NULL);
assert(ret == CL_SUCCESS);
// Create the OpenCL kernel
cl_kernel kernel = clCreateKernel(program, "dot_sum", &ret);
assert(ret == CL_SUCCESS);
uint32_t *sum = (uint32_t*)malloc(sizeof(uint32_t)*MAX_GROUPS/LOCAL_SIZE);
cl_mem sum_mem_obj = clCreateBuffer(context, CL_MEM_WRITE_ONLY, MAX_GROUPS/LOCAL_SIZE*sizeof(uint32_t), NULL, &ret);
// fprintf(stderr, "%lld\n", MAX_GROUPS/LOCAL_SIZE/DEVICENUM*sizeof(uint32_t));
// fprintf(stderr, "%lld\n", MAX_GROUPS);
// sum_mem_obj[device] = clCreateBuffer(context, CL_MEM_WRITE_ONLY, sizeof(uint32_t), NULL, &ret);
assert(ret == CL_SUCCESS);
for (int i = (count/DEVICENUM+1)*device; i < (count/DEVICENUM+1)*(device+1) && i < count; i++) {
// Execute the OpenCL kernel on the list
size_t globalSize = N[i]/CHUNKSIZE+1;
while (globalSize % LOCAL_SIZE)
globalSize++;
size_t globalThreads[] = {(size_t)globalSize}; // Process the entire listsA
size_t localThreads[] = {(size_t)LOCAL_SIZE}; // Divide work items into groups of LOCAL_SIZE
// Set the arguments of the kernel
ret = clSetKernelArg(kernel, 0, sizeof(uint32_t), (void *)&key1[i]);
ret = clSetKernelArg(kernel, 1, sizeof(uint32_t), (void *)&key2[i]);
ret = clSetKernelArg(kernel, 2, sizeof(cl_mem), (void *)&sum_mem_obj);
ret = clSetKernelArg(kernel, 3, sizeof(int), (void *)&CHUNKSIZE);
ret = clSetKernelArg(kernel, 4, sizeof(int), (void *)&N[i]);
ret = clSetKernelArg(kernel, 5, sizeof(uint32_t)*LOCAL_SIZE, NULL);
ret = clSetKernelArg(kernel, 6, sizeof(int), (void *)&LOCAL_SIZE);
assert(ret == CL_SUCCESS);
ret = clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL,
globalThreads, localThreads, 0, NULL, NULL);
//fprintf(stderr, "%d\n", ret);
assert(ret == CL_SUCCESS);
ret = clEnqueueReadBuffer(command_queue, sum_mem_obj, CL_TRUE, 0, globalSize/LOCAL_SIZE * sizeof(uint32_t), (cl_uint*)sum, 0, NULL, NULL);
//printf("globalSize: %d\n", int(globalSize/LOCAL_SIZE));
// fprintf(stderr, "%d\n", ret);
assert(ret == CL_SUCCESS);
// Display the result to the screen
uint32_t tmp = 0;
for(int j = 0; j < globalSize/LOCAL_SIZE; j++) {
// printf("%u ", sum[j]);
tmp += sum[j];
}
// printf("\n");
ans[i] = tmp;
}
ret = clFlush(command_queue);
ret = clFinish(command_queue);
ret = clReleaseCommandQueue(command_queue);
ret = clReleaseMemObject(sum_mem_obj);
ret = clReleaseKernel(kernel);
ret = clReleaseProgram(program);
ret = clReleaseContext(context);
free(sum);
}
for (int i = 0; i < count; i++) {
//printf("%" PRIu32 " %" PRIu32 " %" PRIu32 " %" PRIu32 "\n", sum[0], sum[1], sum[2], sum[3]);
printf("%" PRIu32 "\n", ans[i]);
}
return 0;
} |
GB_binop__lt_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__lt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__lt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__lt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_fp64)
// A*D function (colscale): GB (_AxD__lt_fp64)
// D*A function (rowscale): GB (_DxB__lt_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__lt_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__lt_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_fp64)
// C=scalar+B GB (_bind1st__lt_fp64)
// C=scalar+B' GB (_bind1st_tran__lt_fp64)
// C=A+scalar GB (_bind2nd__lt_fp64)
// C=A'+scalar GB (_bind2nd_tran__lt_fp64)
// C type: bool
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_FP64 || GxB_NO_LT_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lt_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lt_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lt_fp64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lt_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lt_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lt_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lt_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lt_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lt_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lt_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__lt_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__lt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__bnot_int64_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__bnot_int64_int64)
// op(A') function: GB (_unop_tran__bnot_int64_int64)
// C type: int64_t
// A type: int64_t
// cast: int64_t cij = aij
// unaryop: cij = ~(aij)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ~(x) ;
// casting
#define GB_CAST(z, aij) \
int64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = aij ; \
Cx [pC] = ~(z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BNOT || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__bnot_int64_int64)
(
int64_t *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
int64_t z = aij ;
Cx [p] = ~(z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
int64_t z = aij ;
Cx [p] = ~(z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__bnot_int64_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(24*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(12*t1+Ny+21,24)),floord(24*t2+Ny+20,24)),floord(24*t1-24*t2+Nz+Ny+19,24));t3++) {
for (t4=max(max(max(0,ceild(3*t1-31,32)),ceild(24*t2-Nz-124,128)),ceild(24*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(12*t1+Nx+21,128)),floord(24*t2+Nx+20,128)),floord(24*t3+Nx+20,128)),floord(24*t1-24*t2+Nz+Nx+19,128));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),24*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),24*t3+22),128*t4+126),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) {
lbv=max(128*t4,t5+1);
ubv=min(128*t4+127,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_unop__lnot_uint32_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__lnot_uint32_uint32)
// op(A') function: GB (_unop_tran__lnot_uint32_uint32)
// C type: uint32_t
// A type: uint32_t
// cast: uint32_t cij = aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = aij ; \
Cx [pC] = !(z != 0) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__lnot_uint32_uint32)
(
uint32_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
uint32_t z = aij ;
Cx [p] = !(z != 0) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
uint32_t z = aij ;
Cx [p] = !(z != 0) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__lnot_uint32_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_3x3_pack1to8_fp16.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_pack1to8_fp16_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p + 1);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f);
__m256 _bias1 = bias ? _mm256_loadu_ps((const float*)bias + (p + 1) * 8) : _mm256_set1_ps(0.f);
out0.fill(_bias0);
out1.fill(_bias1);
//fp16
const unsigned short* k0 = (const unsigned short*)kernel.channel(p);
const unsigned short* k1 = (const unsigned short*)kernel.channel(p + 1);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m256 _k00_0 = loadfp16(k0);
__m256 _k01_0 = loadfp16(k0 + 8);
__m256 _k02_0 = loadfp16(k0 + 16);
__m256 _k10_0 = loadfp16(k0 + 24);
__m256 _k11_0 = loadfp16(k0 + 32);
__m256 _k12_0 = loadfp16(k0 + 40);
__m256 _k20_0 = loadfp16(k0 + 48);
__m256 _k21_0 = loadfp16(k0 + 56);
__m256 _k22_0 = loadfp16(k0 + 64);
__m256 _k00_1 = loadfp16(k1);
__m256 _k01_1 = loadfp16(k1 + 8);
__m256 _k02_1 = loadfp16(k1 + 16);
__m256 _k10_1 = loadfp16(k1 + 24);
__m256 _k11_1 = loadfp16(k1 + 32);
__m256 _k12_1 = loadfp16(k1 + 40);
__m256 _k20_1 = loadfp16(k1 + 48);
__m256 _k21_1 = loadfp16(k1 + 56);
__m256 _k22_1 = loadfp16(k1 + 64);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
__m256 _sum00 = _mm256_loadu_ps(outptr0);
__m256 _sum10 = _mm256_loadu_ps(outptr1);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum00 = _mm256_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r23, _k22_0, _sum00);
_sum10 = _mm256_fmadd_ps(_r01, _k00_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r02, _k01_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r03, _k02_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r11, _k10_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r12, _k11_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r13, _k12_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r21, _k20_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r22, _k21_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r23, _k22_1, _sum10);
_mm256_storeu_ps(outptr0, _sum00);
_mm256_storeu_ps(outptr1, _sum10);
__m256 _sum01 = _mm256_loadu_ps(outptr0 + 8);
__m256 _sum11 = _mm256_loadu_ps(outptr1 + 8);
__m256 _r04 = _mm256_broadcast_ss(r0 + 3);
__m256 _r14 = _mm256_broadcast_ss(r1 + 3);
__m256 _r24 = _mm256_broadcast_ss(r2 + 3);
_sum01 = _mm256_fmadd_ps(_r02, _k00_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r03, _k01_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r04, _k02_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r12, _k10_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r13, _k11_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r14, _k12_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r22, _k20_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r23, _k21_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r24, _k22_0, _sum01);
_sum11 = _mm256_fmadd_ps(_r02, _k00_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r03, _k01_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r04, _k02_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r12, _k10_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r13, _k11_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r14, _k12_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r22, _k20_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r23, _k21_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r24, _k22_1, _sum11);
_mm256_storeu_ps(outptr0 + 8, _sum01);
_mm256_storeu_ps(outptr1 + 8, _sum11);
__m256 _sum02 = _mm256_loadu_ps(outptr0 + 16);
__m256 _sum12 = _mm256_loadu_ps(outptr1 + 16);
__m256 _r05 = _mm256_broadcast_ss(r0 + 4);
__m256 _r15 = _mm256_broadcast_ss(r1 + 4);
__m256 _r25 = _mm256_broadcast_ss(r2 + 4);
_sum02 = _mm256_fmadd_ps(_r03, _k00_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r04, _k01_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r05, _k02_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r13, _k10_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r14, _k11_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r15, _k12_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r23, _k20_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r24, _k21_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r25, _k22_0, _sum02);
_sum12 = _mm256_fmadd_ps(_r03, _k00_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r04, _k01_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r05, _k02_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r13, _k10_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r14, _k11_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r15, _k12_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r23, _k20_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r24, _k21_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r25, _k22_1, _sum12);
_mm256_storeu_ps(outptr0 + 16, _sum02);
_mm256_storeu_ps(outptr1 + 16, _sum12);
__m256 _r06 = _mm256_broadcast_ss(r0 + 5);
__m256 _r16 = _mm256_broadcast_ss(r1 + 5);
__m256 _r26 = _mm256_broadcast_ss(r2 + 5);
__m256 _sum03 = _mm256_loadu_ps(outptr0 + 24);
__m256 _sum13 = _mm256_loadu_ps(outptr1 + 24);
_sum03 = _mm256_fmadd_ps(_r04, _k00_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r05, _k01_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r06, _k02_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r14, _k10_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r15, _k11_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r16, _k12_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r24, _k20_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r25, _k21_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r26, _k22_0, _sum03);
_sum13 = _mm256_fmadd_ps(_r04, _k00_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r05, _k01_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r06, _k02_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r14, _k10_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r15, _k11_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r16, _k12_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r24, _k20_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r25, _k21_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r26, _k22_1, _sum13);
_mm256_storeu_ps(outptr0 + 24, _sum03);
_mm256_storeu_ps(outptr1 + 24, _sum13);
r0 += 4;
r1 += 4;
r2 += 4;
outptr0 += 32;
outptr1 += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _sum00 = _mm256_loadu_ps(outptr0);
__m256 _sum10 = _mm256_loadu_ps(outptr1);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum00 = _mm256_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r23, _k22_0, _sum00);
_sum10 = _mm256_fmadd_ps(_r01, _k00_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r02, _k01_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r03, _k02_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r11, _k10_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r12, _k11_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r13, _k12_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r21, _k20_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r22, _k21_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r23, _k22_1, _sum10);
_mm256_storeu_ps(outptr0, _sum00);
_mm256_storeu_ps(outptr1, _sum10);
__m256 _sum01 = _mm256_loadu_ps(outptr0 + 8);
__m256 _sum11 = _mm256_loadu_ps(outptr1 + 8);
__m256 _r04 = _mm256_broadcast_ss(r0 + 3);
__m256 _r14 = _mm256_broadcast_ss(r1 + 3);
__m256 _r24 = _mm256_broadcast_ss(r2 + 3);
_sum01 = _mm256_fmadd_ps(_r02, _k00_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r03, _k01_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r04, _k02_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r12, _k10_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r13, _k11_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r14, _k12_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r22, _k20_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r23, _k21_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r24, _k22_0, _sum01);
_sum11 = _mm256_fmadd_ps(_r02, _k00_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r03, _k01_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r04, _k02_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r12, _k10_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r13, _k11_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r14, _k12_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r22, _k20_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r23, _k21_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r24, _k22_1, _sum11);
_mm256_storeu_ps(outptr0 + 8, _sum01);
_mm256_storeu_ps(outptr1 + 8, _sum11);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 16;
outptr1 += 16;
}
for (; j < outw; j++)
{
__m256 _sum00 = _mm256_loadu_ps(outptr0);
__m256 _sum10 = _mm256_loadu_ps(outptr1);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum00 = _mm256_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r23, _k22_0, _sum00);
_sum10 = _mm256_fmadd_ps(_r01, _k00_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r02, _k01_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r03, _k02_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r11, _k10_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r12, _k11_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r13, _k12_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r21, _k20_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r22, _k21_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r23, _k22_1, _sum10);
_mm256_storeu_ps(outptr0, _sum00);
_mm256_storeu_ps(outptr1, _sum10);
r0 += 1;
r1 += 1;
r2 += 1;
outptr0 += 8;
outptr1 += 8;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9 * 8;
k1 += 9 * 8;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f);
out0.fill(_bias0);
//fp16
const unsigned short* k0 = ( const unsigned short*)kernel.channel(p);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m256 _k00 = loadfp16(k0);
__m256 _k01 = loadfp16(k0 + 8);
__m256 _k02 = loadfp16(k0 + 16);
__m256 _k10 = loadfp16(k0 + 24);
__m256 _k11 = loadfp16(k0 + 32);
__m256 _k12 = loadfp16(k0 + 40);
__m256 _k20 = loadfp16(k0 + 48);
__m256 _k21 = loadfp16(k0 + 56);
__m256 _k22 = loadfp16(k0 + 64);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
__m256 _sum0 = _mm256_loadu_ps(outptr0);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum0 = _mm256_fmadd_ps(_r01, _k00, _sum0);
_sum0 = _mm256_fmadd_ps(_r02, _k01, _sum0);
_sum0 = _mm256_fmadd_ps(_r03, _k02, _sum0);
_sum0 = _mm256_fmadd_ps(_r11, _k10, _sum0);
_sum0 = _mm256_fmadd_ps(_r12, _k11, _sum0);
_sum0 = _mm256_fmadd_ps(_r13, _k12, _sum0);
_sum0 = _mm256_fmadd_ps(_r21, _k20, _sum0);
_sum0 = _mm256_fmadd_ps(_r22, _k21, _sum0);
_sum0 = _mm256_fmadd_ps(_r23, _k22, _sum0);
__m256 _sum1 = _mm256_loadu_ps(outptr0 + 8);
__m256 _r04 = _mm256_broadcast_ss(r0 + 3);
__m256 _r14 = _mm256_broadcast_ss(r1 + 3);
__m256 _r24 = _mm256_broadcast_ss(r2 + 3);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_fmadd_ps(_r02, _k00, _sum1);
_sum1 = _mm256_fmadd_ps(_r03, _k01, _sum1);
_sum1 = _mm256_fmadd_ps(_r04, _k02, _sum1);
_sum1 = _mm256_fmadd_ps(_r12, _k10, _sum1);
_sum1 = _mm256_fmadd_ps(_r13, _k11, _sum1);
_sum1 = _mm256_fmadd_ps(_r14, _k12, _sum1);
_sum1 = _mm256_fmadd_ps(_r22, _k20, _sum1);
_sum1 = _mm256_fmadd_ps(_r23, _k21, _sum1);
_sum1 = _mm256_fmadd_ps(_r24, _k22, _sum1);
__m256 _sum2 = _mm256_loadu_ps(outptr0 + 16);
__m256 _r05 = _mm256_broadcast_ss(r0 + 4);
__m256 _r15 = _mm256_broadcast_ss(r1 + 4);
__m256 _r25 = _mm256_broadcast_ss(r2 + 4);
_mm256_storeu_ps(outptr0 + 8, _sum1);
_sum2 = _mm256_fmadd_ps(_r03, _k00, _sum2);
_sum2 = _mm256_fmadd_ps(_r04, _k01, _sum2);
_sum2 = _mm256_fmadd_ps(_r05, _k02, _sum2);
_sum2 = _mm256_fmadd_ps(_r13, _k10, _sum2);
_sum2 = _mm256_fmadd_ps(_r14, _k11, _sum2);
_sum2 = _mm256_fmadd_ps(_r15, _k12, _sum2);
_sum2 = _mm256_fmadd_ps(_r23, _k20, _sum2);
_sum2 = _mm256_fmadd_ps(_r24, _k21, _sum2);
_sum2 = _mm256_fmadd_ps(_r25, _k22, _sum2);
__m256 _sum3 = _mm256_loadu_ps(outptr0 + 24);
__m256 _r06 = _mm256_broadcast_ss(r0 + 5);
__m256 _r16 = _mm256_broadcast_ss(r1 + 5);
__m256 _r26 = _mm256_broadcast_ss(r2 + 5);
_mm256_storeu_ps(outptr0 + 16, _sum2);
_sum3 = _mm256_fmadd_ps(_r04, _k00, _sum3);
_sum3 = _mm256_fmadd_ps(_r05, _k01, _sum3);
_sum3 = _mm256_fmadd_ps(_r06, _k02, _sum3);
_sum3 = _mm256_fmadd_ps(_r14, _k10, _sum3);
_sum3 = _mm256_fmadd_ps(_r15, _k11, _sum3);
_sum3 = _mm256_fmadd_ps(_r16, _k12, _sum3);
_sum3 = _mm256_fmadd_ps(_r24, _k20, _sum3);
_sum3 = _mm256_fmadd_ps(_r25, _k21, _sum3);
_sum3 = _mm256_fmadd_ps(_r26, _k22, _sum3);
_mm256_storeu_ps(outptr0 + 24, _sum3);
r0 += 4;
r1 += 4;
r2 += 4;
outptr0 += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _sum0 = _mm256_loadu_ps(outptr0);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum0 = _mm256_fmadd_ps(_r01, _k00, _sum0);
_sum0 = _mm256_fmadd_ps(_r02, _k01, _sum0);
_sum0 = _mm256_fmadd_ps(_r03, _k02, _sum0);
_sum0 = _mm256_fmadd_ps(_r11, _k10, _sum0);
_sum0 = _mm256_fmadd_ps(_r12, _k11, _sum0);
_sum0 = _mm256_fmadd_ps(_r13, _k12, _sum0);
_sum0 = _mm256_fmadd_ps(_r21, _k20, _sum0);
_sum0 = _mm256_fmadd_ps(_r22, _k21, _sum0);
_sum0 = _mm256_fmadd_ps(_r23, _k22, _sum0);
__m256 _sum1 = _mm256_loadu_ps(outptr0 + 8);
__m256 _r04 = _mm256_broadcast_ss(r0 + 3);
__m256 _r14 = _mm256_broadcast_ss(r1 + 3);
__m256 _r24 = _mm256_broadcast_ss(r2 + 3);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_fmadd_ps(_r02, _k00, _sum1);
_sum1 = _mm256_fmadd_ps(_r03, _k01, _sum1);
_sum1 = _mm256_fmadd_ps(_r04, _k02, _sum1);
_sum1 = _mm256_fmadd_ps(_r12, _k10, _sum1);
_sum1 = _mm256_fmadd_ps(_r13, _k11, _sum1);
_sum1 = _mm256_fmadd_ps(_r14, _k12, _sum1);
_sum1 = _mm256_fmadd_ps(_r22, _k20, _sum1);
_sum1 = _mm256_fmadd_ps(_r23, _k21, _sum1);
_sum1 = _mm256_fmadd_ps(_r24, _k22, _sum1);
_mm256_storeu_ps(outptr0 + 8, _sum1);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 16;
}
for (; j < outw; j++)
{
__m256 _sum0 = _mm256_loadu_ps(outptr0);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum0 = _mm256_fmadd_ps(_r01, _k00, _sum0);
_sum0 = _mm256_fmadd_ps(_r02, _k01, _sum0);
_sum0 = _mm256_fmadd_ps(_r03, _k02, _sum0);
_sum0 = _mm256_fmadd_ps(_r11, _k10, _sum0);
_sum0 = _mm256_fmadd_ps(_r12, _k11, _sum0);
_sum0 = _mm256_fmadd_ps(_r13, _k12, _sum0);
_sum0 = _mm256_fmadd_ps(_r21, _k20, _sum0);
_sum0 = _mm256_fmadd_ps(_r22, _k21, _sum0);
_sum0 = _mm256_fmadd_ps(_r23, _k22, _sum0);
_mm256_storeu_ps(outptr0, _sum0);
r0 += 1;
r1 += 1;
r2 += 1;
outptr0 += 8;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9 * 8;
}
}
}
static void conv3x3s2_pack1to8_fp16_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const float* bias = _bias;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p + 1);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f);
__m256 _bias1 = bias ? _mm256_loadu_ps((const float*)bias + (p + 1) * 8) : _mm256_set1_ps(0.f);
out0.fill(_bias0);
out1.fill(_bias1);
//fp16
const unsigned short* k0 =(const unsigned short*)kernel.channel(p);
const unsigned short* k1 = (const unsigned short*)kernel.channel(p + 1);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m256 _k00_0 = loadfp16(k0);
__m256 _k01_0 = loadfp16(k0 + 8);
__m256 _k02_0 = loadfp16(k0 + 16);
__m256 _k10_0 = loadfp16(k0 + 24);
__m256 _k11_0 = loadfp16(k0 + 32);
__m256 _k12_0 = loadfp16(k0 + 40);
__m256 _k20_0 = loadfp16(k0 + 48);
__m256 _k21_0 = loadfp16(k0 + 56);
__m256 _k22_0 = loadfp16(k0 + 64);
__m256 _k00_1 = loadfp16(k1);
__m256 _k01_1 = loadfp16(k1 + 8);
__m256 _k02_1 = loadfp16(k1 + 16);
__m256 _k10_1 = loadfp16(k1 + 24);
__m256 _k11_1 = loadfp16(k1 + 32);
__m256 _k12_1 = loadfp16(k1 + 40);
__m256 _k20_1 = loadfp16(k1 + 48);
__m256 _k21_1 = loadfp16(k1 + 56);
__m256 _k22_1 = loadfp16(k1 + 64);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 7 < outw; j += 8)
{
__m256 _sum00 = _mm256_loadu_ps(outptr0);
__m256 _sum10 = _mm256_loadu_ps(outptr1);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum00 = _mm256_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r23, _k22_0, _sum00);
_sum10 = _mm256_fmadd_ps(_r01, _k00_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r02, _k01_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r03, _k02_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r11, _k10_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r12, _k11_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r13, _k12_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r21, _k20_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r22, _k21_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r23, _k22_1, _sum10);
_mm256_storeu_ps(outptr0, _sum00);
_mm256_storeu_ps(outptr1, _sum10);
__m256 _sum01 = _mm256_loadu_ps(outptr0 + 8);
__m256 _sum11 = _mm256_loadu_ps(outptr1 + 8);
__m256 _r04 = _mm256_broadcast_ss(r0 + 3);
__m256 _r14 = _mm256_broadcast_ss(r1 + 3);
__m256 _r24 = _mm256_broadcast_ss(r2 + 3);
__m256 _r05 = _mm256_broadcast_ss(r0 + 4);
__m256 _r15 = _mm256_broadcast_ss(r1 + 4);
__m256 _r25 = _mm256_broadcast_ss(r2 + 4);
_sum01 = _mm256_fmadd_ps(_r03, _k00_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r04, _k01_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r05, _k02_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r13, _k10_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r14, _k11_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r15, _k12_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r23, _k20_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r24, _k21_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r25, _k22_0, _sum01);
_sum11 = _mm256_fmadd_ps(_r03, _k00_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r04, _k01_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r05, _k02_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r13, _k10_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r14, _k11_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r15, _k12_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r23, _k20_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r24, _k21_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r25, _k22_1, _sum11);
_mm256_storeu_ps(outptr0 + 8, _sum01);
_mm256_storeu_ps(outptr1 + 8, _sum11);
__m256 _sum02 = _mm256_loadu_ps(outptr0 + 16);
__m256 _sum12 = _mm256_loadu_ps(outptr1 + 16);
__m256 _r06 = _mm256_broadcast_ss(r0 + 5);
__m256 _r16 = _mm256_broadcast_ss(r1 + 5);
__m256 _r26 = _mm256_broadcast_ss(r2 + 5);
__m256 _r07 = _mm256_broadcast_ss(r0 + 6);
__m256 _r17 = _mm256_broadcast_ss(r1 + 6);
__m256 _r27 = _mm256_broadcast_ss(r2 + 6);
_sum02 = _mm256_fmadd_ps(_r05, _k00_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r06, _k01_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r07, _k02_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r15, _k10_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r16, _k11_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r17, _k12_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r25, _k20_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r26, _k21_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r27, _k22_0, _sum02);
_sum12 = _mm256_fmadd_ps(_r05, _k00_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r06, _k01_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r07, _k02_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r15, _k10_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r16, _k11_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r17, _k12_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r25, _k20_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r26, _k21_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r27, _k22_1, _sum12);
_mm256_storeu_ps(outptr0 + 16, _sum02);
_mm256_storeu_ps(outptr1 + 16, _sum12);
__m256 _r08 = _mm256_broadcast_ss(r0 + 7);
__m256 _r18 = _mm256_broadcast_ss(r1 + 7);
__m256 _r28 = _mm256_broadcast_ss(r2 + 7);
__m256 _r09 = _mm256_broadcast_ss(r0 + 8);
__m256 _r19 = _mm256_broadcast_ss(r1 + 8);
__m256 _r29 = _mm256_broadcast_ss(r2 + 8);
__m256 _sum03 = _mm256_loadu_ps(outptr0 + 24);
__m256 _sum13 = _mm256_loadu_ps(outptr1 + 24);
_sum03 = _mm256_fmadd_ps(_r07, _k00_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r08, _k01_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r09, _k02_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r17, _k10_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r18, _k11_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r19, _k12_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r27, _k20_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r28, _k21_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r29, _k22_0, _sum03);
_sum13 = _mm256_fmadd_ps(_r07, _k00_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r08, _k01_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r09, _k02_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r17, _k10_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r18, _k11_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r19, _k12_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r27, _k20_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r28, _k21_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r29, _k22_1, _sum13);
_mm256_storeu_ps(outptr0 + 24, _sum03);
_mm256_storeu_ps(outptr1 + 24, _sum13);
__m256 _r010 = _mm256_broadcast_ss(r0 + 9);
__m256 _r110 = _mm256_broadcast_ss(r1 + 9);
__m256 _r210 = _mm256_broadcast_ss(r2 + 9);
__m256 _r011 = _mm256_broadcast_ss(r0 + 10);
__m256 _r111 = _mm256_broadcast_ss(r1 + 10);
__m256 _r211 = _mm256_broadcast_ss(r2 + 10);
__m256 _sum04 = _mm256_loadu_ps(outptr0 + 32);
__m256 _sum14 = _mm256_loadu_ps(outptr1 + 32);
_sum04 = _mm256_fmadd_ps(_r09, _k00_0, _sum04);
_sum04 = _mm256_fmadd_ps(_r010, _k01_0, _sum04);
_sum04 = _mm256_fmadd_ps(_r011, _k02_0, _sum04);
_sum04 = _mm256_fmadd_ps(_r19, _k10_0, _sum04);
_sum04 = _mm256_fmadd_ps(_r110, _k11_0, _sum04);
_sum04 = _mm256_fmadd_ps(_r111, _k12_0, _sum04);
_sum04 = _mm256_fmadd_ps(_r29, _k20_0, _sum04);
_sum04 = _mm256_fmadd_ps(_r210, _k21_0, _sum04);
_sum04 = _mm256_fmadd_ps(_r211, _k22_0, _sum04);
_sum14 = _mm256_fmadd_ps(_r09, _k00_1, _sum14);
_sum14 = _mm256_fmadd_ps(_r010, _k01_1, _sum14);
_sum14 = _mm256_fmadd_ps(_r011, _k02_1, _sum14);
_sum14 = _mm256_fmadd_ps(_r19, _k10_1, _sum14);
_sum14 = _mm256_fmadd_ps(_r110, _k11_1, _sum14);
_sum14 = _mm256_fmadd_ps(_r111, _k12_1, _sum14);
_sum14 = _mm256_fmadd_ps(_r29, _k20_1, _sum14);
_sum14 = _mm256_fmadd_ps(_r210, _k21_1, _sum14);
_sum14 = _mm256_fmadd_ps(_r211, _k22_1, _sum14);
_mm256_storeu_ps(outptr0 + 32, _sum04);
_mm256_storeu_ps(outptr1 + 32, _sum14);
__m256 _r012 = _mm256_broadcast_ss(r0 + 11);
__m256 _r112 = _mm256_broadcast_ss(r1 + 11);
__m256 _r212 = _mm256_broadcast_ss(r2 + 11);
__m256 _r013 = _mm256_broadcast_ss(r0 + 12);
__m256 _r113 = _mm256_broadcast_ss(r1 + 12);
__m256 _r213 = _mm256_broadcast_ss(r2 + 12);
__m256 _sum05 = _mm256_loadu_ps(outptr0 + 40);
__m256 _sum15 = _mm256_loadu_ps(outptr1 + 40);
_sum05 = _mm256_fmadd_ps(_r011, _k00_0, _sum05);
_sum05 = _mm256_fmadd_ps(_r012, _k01_0, _sum05);
_sum05 = _mm256_fmadd_ps(_r013, _k02_0, _sum05);
_sum05 = _mm256_fmadd_ps(_r111, _k10_0, _sum05);
_sum05 = _mm256_fmadd_ps(_r112, _k11_0, _sum05);
_sum05 = _mm256_fmadd_ps(_r113, _k12_0, _sum05);
_sum05 = _mm256_fmadd_ps(_r211, _k20_0, _sum05);
_sum05 = _mm256_fmadd_ps(_r212, _k21_0, _sum05);
_sum05 = _mm256_fmadd_ps(_r213, _k22_0, _sum05);
_sum15 = _mm256_fmadd_ps(_r011, _k00_1, _sum15);
_sum15 = _mm256_fmadd_ps(_r012, _k01_1, _sum15);
_sum15 = _mm256_fmadd_ps(_r013, _k02_1, _sum15);
_sum15 = _mm256_fmadd_ps(_r111, _k10_1, _sum15);
_sum15 = _mm256_fmadd_ps(_r112, _k11_1, _sum15);
_sum15 = _mm256_fmadd_ps(_r113, _k12_1, _sum15);
_sum15 = _mm256_fmadd_ps(_r211, _k20_1, _sum15);
_sum15 = _mm256_fmadd_ps(_r212, _k21_1, _sum15);
_sum15 = _mm256_fmadd_ps(_r213, _k22_1, _sum15);
_mm256_storeu_ps(outptr0 + 40, _sum05);
_mm256_storeu_ps(outptr1 + 40, _sum15);
__m256 _r014 = _mm256_broadcast_ss(r0 + 13);
__m256 _r114 = _mm256_broadcast_ss(r1 + 13);
__m256 _r214 = _mm256_broadcast_ss(r2 + 13);
__m256 _r015 = _mm256_broadcast_ss(r0 + 14);
__m256 _r115 = _mm256_broadcast_ss(r1 + 14);
__m256 _r215 = _mm256_broadcast_ss(r2 + 14);
__m256 _sum06 = _mm256_loadu_ps(outptr0 + 48);
__m256 _sum16 = _mm256_loadu_ps(outptr1 + 48);
_sum06 = _mm256_fmadd_ps(_r013, _k00_0, _sum06);
_sum06 = _mm256_fmadd_ps(_r014, _k01_0, _sum06);
_sum06 = _mm256_fmadd_ps(_r015, _k02_0, _sum06);
_sum06 = _mm256_fmadd_ps(_r113, _k10_0, _sum06);
_sum06 = _mm256_fmadd_ps(_r114, _k11_0, _sum06);
_sum06 = _mm256_fmadd_ps(_r115, _k12_0, _sum06);
_sum06 = _mm256_fmadd_ps(_r213, _k20_0, _sum06);
_sum06 = _mm256_fmadd_ps(_r214, _k21_0, _sum06);
_sum06 = _mm256_fmadd_ps(_r215, _k22_0, _sum06);
_sum16 = _mm256_fmadd_ps(_r013, _k00_1, _sum16);
_sum16 = _mm256_fmadd_ps(_r014, _k01_1, _sum16);
_sum16 = _mm256_fmadd_ps(_r015, _k02_1, _sum16);
_sum16 = _mm256_fmadd_ps(_r113, _k10_1, _sum16);
_sum16 = _mm256_fmadd_ps(_r114, _k11_1, _sum16);
_sum16 = _mm256_fmadd_ps(_r115, _k12_1, _sum16);
_sum16 = _mm256_fmadd_ps(_r213, _k20_1, _sum16);
_sum16 = _mm256_fmadd_ps(_r214, _k21_1, _sum16);
_sum16 = _mm256_fmadd_ps(_r215, _k22_1, _sum16);
_mm256_storeu_ps(outptr0 + 48, _sum06);
_mm256_storeu_ps(outptr1 + 48, _sum16);
__m256 _r016 = _mm256_broadcast_ss(r0 + 15);
__m256 _r116 = _mm256_broadcast_ss(r1 + 15);
__m256 _r216 = _mm256_broadcast_ss(r2 + 15);
__m256 _r017 = _mm256_broadcast_ss(r0 + 16);
__m256 _r117 = _mm256_broadcast_ss(r1 + 16);
__m256 _r217 = _mm256_broadcast_ss(r2 + 16);
__m256 _sum07 = _mm256_loadu_ps(outptr0 + 56);
__m256 _sum17 = _mm256_loadu_ps(outptr1 + 56);
_sum07 = _mm256_fmadd_ps(_r015, _k00_0, _sum07);
_sum07 = _mm256_fmadd_ps(_r016, _k01_0, _sum07);
_sum07 = _mm256_fmadd_ps(_r017, _k02_0, _sum07);
_sum07 = _mm256_fmadd_ps(_r115, _k10_0, _sum07);
_sum07 = _mm256_fmadd_ps(_r116, _k11_0, _sum07);
_sum07 = _mm256_fmadd_ps(_r117, _k12_0, _sum07);
_sum07 = _mm256_fmadd_ps(_r215, _k20_0, _sum07);
_sum07 = _mm256_fmadd_ps(_r216, _k21_0, _sum07);
_sum07 = _mm256_fmadd_ps(_r217, _k22_0, _sum07);
_sum17 = _mm256_fmadd_ps(_r015, _k00_1, _sum17);
_sum17 = _mm256_fmadd_ps(_r016, _k01_1, _sum17);
_sum17 = _mm256_fmadd_ps(_r017, _k02_1, _sum17);
_sum17 = _mm256_fmadd_ps(_r115, _k10_1, _sum17);
_sum17 = _mm256_fmadd_ps(_r116, _k11_1, _sum17);
_sum17 = _mm256_fmadd_ps(_r117, _k12_1, _sum17);
_sum17 = _mm256_fmadd_ps(_r215, _k20_1, _sum17);
_sum17 = _mm256_fmadd_ps(_r216, _k21_1, _sum17);
_sum17 = _mm256_fmadd_ps(_r217, _k22_1, _sum17);
_mm256_storeu_ps(outptr0 + 56, _sum07);
_mm256_storeu_ps(outptr1 + 56, _sum17);
r0 += 16;
r1 += 16;
r2 += 16;
outptr0 += 64;
outptr1 += 64;
}
for (; j + 3 < outw; j += 4)
{
__m256 _sum00 = _mm256_loadu_ps(outptr0);
__m256 _sum10 = _mm256_loadu_ps(outptr1);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum00 = _mm256_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r23, _k22_0, _sum00);
_sum10 = _mm256_fmadd_ps(_r01, _k00_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r02, _k01_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r03, _k02_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r11, _k10_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r12, _k11_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r13, _k12_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r21, _k20_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r22, _k21_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r23, _k22_1, _sum10);
_mm256_storeu_ps(outptr0, _sum00);
_mm256_storeu_ps(outptr1, _sum10);
__m256 _sum01 = _mm256_loadu_ps(outptr0 + 8);
__m256 _sum11 = _mm256_loadu_ps(outptr1 + 8);
__m256 _r04 = _mm256_broadcast_ss(r0 + 3);
__m256 _r14 = _mm256_broadcast_ss(r1 + 3);
__m256 _r24 = _mm256_broadcast_ss(r2 + 3);
__m256 _r05 = _mm256_broadcast_ss(r0 + 4);
__m256 _r15 = _mm256_broadcast_ss(r1 + 4);
__m256 _r25 = _mm256_broadcast_ss(r2 + 4);
_sum01 = _mm256_fmadd_ps(_r03, _k00_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r04, _k01_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r05, _k02_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r13, _k10_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r14, _k11_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r15, _k12_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r23, _k20_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r24, _k21_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r25, _k22_0, _sum01);
_sum11 = _mm256_fmadd_ps(_r03, _k00_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r04, _k01_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r05, _k02_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r13, _k10_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r14, _k11_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r15, _k12_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r23, _k20_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r24, _k21_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r25, _k22_1, _sum11);
_mm256_storeu_ps(outptr0 + 8, _sum01);
_mm256_storeu_ps(outptr1 + 8, _sum11);
__m256 _sum02 = _mm256_loadu_ps(outptr0 + 16);
__m256 _sum12 = _mm256_loadu_ps(outptr1 + 16);
__m256 _r06 = _mm256_broadcast_ss(r0 + 5);
__m256 _r16 = _mm256_broadcast_ss(r1 + 5);
__m256 _r26 = _mm256_broadcast_ss(r2 + 5);
__m256 _r07 = _mm256_broadcast_ss(r0 + 6);
__m256 _r17 = _mm256_broadcast_ss(r1 + 6);
__m256 _r27 = _mm256_broadcast_ss(r2 + 6);
_sum02 = _mm256_fmadd_ps(_r05, _k00_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r06, _k01_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r07, _k02_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r15, _k10_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r16, _k11_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r17, _k12_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r25, _k20_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r26, _k21_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r27, _k22_0, _sum02);
_sum12 = _mm256_fmadd_ps(_r05, _k00_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r06, _k01_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r07, _k02_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r15, _k10_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r16, _k11_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r17, _k12_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r25, _k20_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r26, _k21_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r27, _k22_1, _sum12);
_mm256_storeu_ps(outptr0 + 16, _sum02);
_mm256_storeu_ps(outptr1 + 16, _sum12);
__m256 _r08 = _mm256_broadcast_ss(r0 + 7);
__m256 _r18 = _mm256_broadcast_ss(r1 + 7);
__m256 _r28 = _mm256_broadcast_ss(r2 + 7);
__m256 _r09 = _mm256_broadcast_ss(r0 + 8);
__m256 _r19 = _mm256_broadcast_ss(r1 + 8);
__m256 _r29 = _mm256_broadcast_ss(r2 + 8);
__m256 _sum03 = _mm256_loadu_ps(outptr0 + 24);
__m256 _sum13 = _mm256_loadu_ps(outptr1 + 24);
_sum03 = _mm256_fmadd_ps(_r07, _k00_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r08, _k01_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r09, _k02_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r17, _k10_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r18, _k11_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r19, _k12_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r27, _k20_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r28, _k21_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r29, _k22_0, _sum03);
_sum13 = _mm256_fmadd_ps(_r07, _k00_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r08, _k01_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r09, _k02_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r17, _k10_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r18, _k11_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r19, _k12_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r27, _k20_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r28, _k21_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r29, _k22_1, _sum13);
_mm256_storeu_ps(outptr0 + 24, _sum03);
_mm256_storeu_ps(outptr1 + 24, _sum13);
r0 += 8;
r1 += 8;
r2 += 8;
outptr0 += 32;
outptr1 += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _sum00 = _mm256_loadu_ps(outptr0);
__m256 _sum10 = _mm256_loadu_ps(outptr1);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum00 = _mm256_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r23, _k22_0, _sum00);
_sum10 = _mm256_fmadd_ps(_r01, _k00_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r02, _k01_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r03, _k02_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r11, _k10_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r12, _k11_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r13, _k12_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r21, _k20_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r22, _k21_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r23, _k22_1, _sum10);
_mm256_storeu_ps(outptr0, _sum00);
_mm256_storeu_ps(outptr1, _sum10);
__m256 _sum01 = _mm256_loadu_ps(outptr0 + 8);
__m256 _sum11 = _mm256_loadu_ps(outptr1 + 8);
__m256 _r04 = _mm256_broadcast_ss(r0 + 3);
__m256 _r14 = _mm256_broadcast_ss(r1 + 3);
__m256 _r24 = _mm256_broadcast_ss(r2 + 3);
__m256 _r05 = _mm256_broadcast_ss(r0 + 4);
__m256 _r15 = _mm256_broadcast_ss(r1 + 4);
__m256 _r25 = _mm256_broadcast_ss(r2 + 4);
_sum01 = _mm256_fmadd_ps(_r03, _k00_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r04, _k01_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r05, _k02_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r13, _k10_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r14, _k11_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r15, _k12_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r23, _k20_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r24, _k21_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r25, _k22_0, _sum01);
_sum11 = _mm256_fmadd_ps(_r03, _k00_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r04, _k01_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r05, _k02_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r13, _k10_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r14, _k11_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r15, _k12_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r23, _k20_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r24, _k21_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r25, _k22_1, _sum11);
_mm256_storeu_ps(outptr0 + 8, _sum01);
_mm256_storeu_ps(outptr1 + 8, _sum11);
r0 += 4;
r1 += 4;
r2 += 4;
outptr0 += 16;
outptr1 += 16;
}
for (; j < outw; j++)
{
__m256 _sum00 = _mm256_loadu_ps(outptr0);
__m256 _sum10 = _mm256_loadu_ps(outptr1);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum00 = _mm256_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r23, _k22_0, _sum00);
_sum10 = _mm256_fmadd_ps(_r01, _k00_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r02, _k01_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r03, _k02_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r11, _k10_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r12, _k11_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r13, _k12_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r21, _k20_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r22, _k21_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r23, _k22_1, _sum10);
_mm256_storeu_ps(outptr0, _sum00);
_mm256_storeu_ps(outptr1, _sum10);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 8;
outptr1 += 8;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9 * 8;
k1 += 9 * 8;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f);
out0.fill(_bias0);
//fp16
const unsigned short* k0 = (const unsigned short*)kernel.channel(p);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m256 _k00_0 = loadfp16(k0);
__m256 _k01_0 = loadfp16(k0 + 8);
__m256 _k02_0 = loadfp16(k0 + 16);
__m256 _k10_0 = loadfp16(k0 + 24);
__m256 _k11_0 = loadfp16(k0 + 32);
__m256 _k12_0 = loadfp16(k0 + 40);
__m256 _k20_0 = loadfp16(k0 + 48);
__m256 _k21_0 = loadfp16(k0 + 56);
__m256 _k22_0 = loadfp16(k0 + 64);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 7 < outw; j += 8)
{
__m256 _sum00 = _mm256_loadu_ps(outptr0);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum00 = _mm256_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r23, _k22_0, _sum00);
_mm256_storeu_ps(outptr0, _sum00);
__m256 _sum01 = _mm256_loadu_ps(outptr0 + 8);
__m256 _r04 = _mm256_broadcast_ss(r0 + 3);
__m256 _r14 = _mm256_broadcast_ss(r1 + 3);
__m256 _r24 = _mm256_broadcast_ss(r2 + 3);
__m256 _r05 = _mm256_broadcast_ss(r0 + 4);
__m256 _r15 = _mm256_broadcast_ss(r1 + 4);
__m256 _r25 = _mm256_broadcast_ss(r2 + 4);
_sum01 = _mm256_fmadd_ps(_r03, _k00_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r04, _k01_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r05, _k02_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r13, _k10_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r14, _k11_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r15, _k12_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r23, _k20_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r24, _k21_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r25, _k22_0, _sum01);
_mm256_storeu_ps(outptr0 + 8, _sum01);
__m256 _sum02 = _mm256_loadu_ps(outptr0 + 16);
__m256 _r06 = _mm256_broadcast_ss(r0 + 5);
__m256 _r16 = _mm256_broadcast_ss(r1 + 5);
__m256 _r26 = _mm256_broadcast_ss(r2 + 5);
__m256 _r07 = _mm256_broadcast_ss(r0 + 6);
__m256 _r17 = _mm256_broadcast_ss(r1 + 6);
__m256 _r27 = _mm256_broadcast_ss(r2 + 6);
_sum02 = _mm256_fmadd_ps(_r05, _k00_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r06, _k01_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r07, _k02_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r15, _k10_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r16, _k11_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r17, _k12_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r25, _k20_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r26, _k21_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r27, _k22_0, _sum02);
_mm256_storeu_ps(outptr0 + 16, _sum02);
__m256 _r08 = _mm256_broadcast_ss(r0 + 7);
__m256 _r18 = _mm256_broadcast_ss(r1 + 7);
__m256 _r28 = _mm256_broadcast_ss(r2 + 7);
__m256 _r09 = _mm256_broadcast_ss(r0 + 8);
__m256 _r19 = _mm256_broadcast_ss(r1 + 8);
__m256 _r29 = _mm256_broadcast_ss(r2 + 8);
__m256 _sum03 = _mm256_loadu_ps(outptr0 + 24);
_sum03 = _mm256_fmadd_ps(_r07, _k00_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r08, _k01_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r09, _k02_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r17, _k10_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r18, _k11_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r19, _k12_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r27, _k20_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r28, _k21_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r29, _k22_0, _sum03);
_mm256_storeu_ps(outptr0 + 24, _sum03);
__m256 _r010 = _mm256_broadcast_ss(r0 + 9);
__m256 _r110 = _mm256_broadcast_ss(r1 + 9);
__m256 _r210 = _mm256_broadcast_ss(r2 + 9);
__m256 _r011 = _mm256_broadcast_ss(r0 + 10);
__m256 _r111 = _mm256_broadcast_ss(r1 + 10);
__m256 _r211 = _mm256_broadcast_ss(r2 + 10);
__m256 _sum04 = _mm256_loadu_ps(outptr0 + 32);
_sum04 = _mm256_fmadd_ps(_r09, _k00_0, _sum04);
_sum04 = _mm256_fmadd_ps(_r010, _k01_0, _sum04);
_sum04 = _mm256_fmadd_ps(_r011, _k02_0, _sum04);
_sum04 = _mm256_fmadd_ps(_r19, _k10_0, _sum04);
_sum04 = _mm256_fmadd_ps(_r110, _k11_0, _sum04);
_sum04 = _mm256_fmadd_ps(_r111, _k12_0, _sum04);
_sum04 = _mm256_fmadd_ps(_r29, _k20_0, _sum04);
_sum04 = _mm256_fmadd_ps(_r210, _k21_0, _sum04);
_sum04 = _mm256_fmadd_ps(_r211, _k22_0, _sum04);
_mm256_storeu_ps(outptr0 + 32, _sum04);
__m256 _r012 = _mm256_broadcast_ss(r0 + 11);
__m256 _r112 = _mm256_broadcast_ss(r1 + 11);
__m256 _r212 = _mm256_broadcast_ss(r2 + 11);
__m256 _r013 = _mm256_broadcast_ss(r0 + 12);
__m256 _r113 = _mm256_broadcast_ss(r1 + 12);
__m256 _r213 = _mm256_broadcast_ss(r2 + 12);
__m256 _sum05 = _mm256_loadu_ps(outptr0 + 40);
_sum05 = _mm256_fmadd_ps(_r011, _k00_0, _sum05);
_sum05 = _mm256_fmadd_ps(_r012, _k01_0, _sum05);
_sum05 = _mm256_fmadd_ps(_r013, _k02_0, _sum05);
_sum05 = _mm256_fmadd_ps(_r111, _k10_0, _sum05);
_sum05 = _mm256_fmadd_ps(_r112, _k11_0, _sum05);
_sum05 = _mm256_fmadd_ps(_r113, _k12_0, _sum05);
_sum05 = _mm256_fmadd_ps(_r211, _k20_0, _sum05);
_sum05 = _mm256_fmadd_ps(_r212, _k21_0, _sum05);
_sum05 = _mm256_fmadd_ps(_r213, _k22_0, _sum05);
_mm256_storeu_ps(outptr0 + 40, _sum05);
__m256 _r014 = _mm256_broadcast_ss(r0 + 13);
__m256 _r114 = _mm256_broadcast_ss(r1 + 13);
__m256 _r214 = _mm256_broadcast_ss(r2 + 13);
__m256 _r015 = _mm256_broadcast_ss(r0 + 14);
__m256 _r115 = _mm256_broadcast_ss(r1 + 14);
__m256 _r215 = _mm256_broadcast_ss(r2 + 14);
__m256 _sum06 = _mm256_loadu_ps(outptr0 + 48);
_sum06 = _mm256_fmadd_ps(_r013, _k00_0, _sum06);
_sum06 = _mm256_fmadd_ps(_r014, _k01_0, _sum06);
_sum06 = _mm256_fmadd_ps(_r015, _k02_0, _sum06);
_sum06 = _mm256_fmadd_ps(_r113, _k10_0, _sum06);
_sum06 = _mm256_fmadd_ps(_r114, _k11_0, _sum06);
_sum06 = _mm256_fmadd_ps(_r115, _k12_0, _sum06);
_sum06 = _mm256_fmadd_ps(_r213, _k20_0, _sum06);
_sum06 = _mm256_fmadd_ps(_r214, _k21_0, _sum06);
_sum06 = _mm256_fmadd_ps(_r215, _k22_0, _sum06);
_mm256_storeu_ps(outptr0 + 48, _sum06);
__m256 _r016 = _mm256_broadcast_ss(r0 + 15);
__m256 _r116 = _mm256_broadcast_ss(r1 + 15);
__m256 _r216 = _mm256_broadcast_ss(r2 + 15);
__m256 _r017 = _mm256_broadcast_ss(r0 + 16);
__m256 _r117 = _mm256_broadcast_ss(r1 + 16);
__m256 _r217 = _mm256_broadcast_ss(r2 + 16);
__m256 _sum07 = _mm256_loadu_ps(outptr0 + 56);
_sum07 = _mm256_fmadd_ps(_r015, _k00_0, _sum07);
_sum07 = _mm256_fmadd_ps(_r016, _k01_0, _sum07);
_sum07 = _mm256_fmadd_ps(_r017, _k02_0, _sum07);
_sum07 = _mm256_fmadd_ps(_r115, _k10_0, _sum07);
_sum07 = _mm256_fmadd_ps(_r116, _k11_0, _sum07);
_sum07 = _mm256_fmadd_ps(_r117, _k12_0, _sum07);
_sum07 = _mm256_fmadd_ps(_r215, _k20_0, _sum07);
_sum07 = _mm256_fmadd_ps(_r216, _k21_0, _sum07);
_sum07 = _mm256_fmadd_ps(_r217, _k22_0, _sum07);
_mm256_storeu_ps(outptr0 + 56, _sum07);
r0 += 16;
r1 += 16;
r2 += 16;
outptr0 += 64;
}
for (; j + 3 < outw; j += 4)
{
__m256 _sum00 = _mm256_loadu_ps(outptr0);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum00 = _mm256_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r23, _k22_0, _sum00);
_mm256_storeu_ps(outptr0, _sum00);
__m256 _sum01 = _mm256_loadu_ps(outptr0 + 8);
__m256 _r04 = _mm256_broadcast_ss(r0 + 3);
__m256 _r14 = _mm256_broadcast_ss(r1 + 3);
__m256 _r24 = _mm256_broadcast_ss(r2 + 3);
__m256 _r05 = _mm256_broadcast_ss(r0 + 4);
__m256 _r15 = _mm256_broadcast_ss(r1 + 4);
__m256 _r25 = _mm256_broadcast_ss(r2 + 4);
_sum01 = _mm256_fmadd_ps(_r03, _k00_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r04, _k01_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r05, _k02_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r13, _k10_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r14, _k11_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r15, _k12_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r23, _k20_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r24, _k21_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r25, _k22_0, _sum01);
_mm256_storeu_ps(outptr0 + 8, _sum01);
__m256 _sum02 = _mm256_loadu_ps(outptr0 + 16);
__m256 _r06 = _mm256_broadcast_ss(r0 + 5);
__m256 _r16 = _mm256_broadcast_ss(r1 + 5);
__m256 _r26 = _mm256_broadcast_ss(r2 + 5);
__m256 _r07 = _mm256_broadcast_ss(r0 + 6);
__m256 _r17 = _mm256_broadcast_ss(r1 + 6);
__m256 _r27 = _mm256_broadcast_ss(r2 + 6);
_sum02 = _mm256_fmadd_ps(_r05, _k00_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r06, _k01_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r07, _k02_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r15, _k10_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r16, _k11_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r17, _k12_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r25, _k20_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r26, _k21_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r27, _k22_0, _sum02);
_mm256_storeu_ps(outptr0 + 16, _sum02);
__m256 _r08 = _mm256_broadcast_ss(r0 + 7);
__m256 _r18 = _mm256_broadcast_ss(r1 + 7);
__m256 _r28 = _mm256_broadcast_ss(r2 + 7);
__m256 _r09 = _mm256_broadcast_ss(r0 + 8);
__m256 _r19 = _mm256_broadcast_ss(r1 + 8);
__m256 _r29 = _mm256_broadcast_ss(r2 + 8);
__m256 _sum03 = _mm256_loadu_ps(outptr0 + 24);
_sum03 = _mm256_fmadd_ps(_r07, _k00_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r08, _k01_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r09, _k02_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r17, _k10_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r18, _k11_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r19, _k12_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r27, _k20_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r28, _k21_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r29, _k22_0, _sum03);
_mm256_storeu_ps(outptr0 + 24, _sum03);
r0 += 8;
r1 += 8;
r2 += 8;
outptr0 += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _sum00 = _mm256_loadu_ps(outptr0);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum00 = _mm256_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r23, _k22_0, _sum00);
_mm256_storeu_ps(outptr0, _sum00);
__m256 _sum01 = _mm256_loadu_ps(outptr0 + 8);
__m256 _r04 = _mm256_broadcast_ss(r0 + 3);
__m256 _r14 = _mm256_broadcast_ss(r1 + 3);
__m256 _r24 = _mm256_broadcast_ss(r2 + 3);
__m256 _r05 = _mm256_broadcast_ss(r0 + 4);
__m256 _r15 = _mm256_broadcast_ss(r1 + 4);
__m256 _r25 = _mm256_broadcast_ss(r2 + 4);
_sum01 = _mm256_fmadd_ps(_r03, _k00_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r04, _k01_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r05, _k02_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r13, _k10_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r14, _k11_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r15, _k12_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r23, _k20_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r24, _k21_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r25, _k22_0, _sum01);
_mm256_storeu_ps(outptr0 + 8, _sum01);
r0 += 4;
r1 += 4;
r2 += 4;
outptr0 += 16;
}
for (; j < outw; j++)
{
__m256 _sum00 = _mm256_loadu_ps(outptr0);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum00 = _mm256_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r23, _k22_0, _sum00);
_mm256_storeu_ps(outptr0, _sum00);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 8;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9 * 8;
}
}
}
|
Example_target.1.c | extern void init(float*, float*, int);
extern void output(float*, int);
void vec_mult(int N)
{
int i;
float p[N], v1[N], v2[N];
init(v1, v2, N);
#pragma omp target
#pragma omp parallel for private(i)
for (i=0; i<N; i++)
p[i] = v1[i] * v2[i];
output(p, N);
}
|
Example7.c | //#include <stdio.h>
//#include <omp.h>
//#include <conio.h>
//
//int main(int argc, char *argv[])
//{
// int tid, totalLocal=0, total=0, n = 6, i, a[] = {1,2,3,4,5,6};
//#pragma omp parallel num_threads(3) shared(n,a,total) private(tid, totalLocal)
// {
// tid = omp_get_thread_num();
// totalLocal = 0;
//#pragma omp for
// for (i = 0; i < n; i++)
// totalLocal += a[i];
//#pragma omp critical (total)
// {
// total += totalLocal;
// printf("tid = %d : totalLocal =%d total = %d\n", tid, totalLocal, total);
// }
// } /*-- End of for loop --*/
//
// printf("The value of the total after the parallel region: %d\n", total);
//
// _getch(); // for keep console from <conio.h> library
// return 0;
//} |
cholesky-no-dependency.c |
/*
* Cholesky por bloques.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "ctimer.h"
#define L(i,j) L[j*n+i]
#define A(i,j) A[j*n+i]
#define C(i,j) C[j*n+i]
int cholesky_escalar( int n, double *C );
int cholesky_bloques( int n, int b, double *C );
int main( int argc, char *argv[] ) {
int n, b, i, j, info;
double *L, *A;
if( argc<3 ) {
fprintf(stderr,"usage: %s n block_size\n",argv[0]);
exit(-1);
}
sscanf(argv[1],"%d",&n);
if( ( L = (double*) malloc(n*n*sizeof(double)) ) == NULL ) {
fprintf(stderr,"Error en la reserva de memoria para la matriz L\n");
exit(-1);
}
for( j=0; j<n; j++ ) {
for( i=0; i<j; i++ ) {
L(i,j) = 0.0;
}
for( i=j; i<n; i++ ) {
L(i,j) = ((double) rand()) / RAND_MAX;
}
L(j,j) += n;
}
/* Imprimir matriz */
/*
for( i=0; i<n; i++ ) {
for( j=0; j<n; j++ ) {
printf("%10.3lf",L(i,j));
}
printf("\n");
}
*/
if( ( A = (double*) malloc(n*n*sizeof(double)) ) == NULL ) {
fprintf(stderr,"Error en la reserva de memoria para la matriz A\n");
exit(-1);
}
/*********************************************************/
/* Multiplicación A=L*L', donde L es triangular inferior */
/* Devuelve la parte triangular inferior en A */
double zero = 0.0;
double one = 1.0;
dsyrk_( "L", "N", &n, &n, &one, &L(0,0), &n, &zero, &A(0,0), &n );
/*********************************************************/
sscanf(argv[2],"%d",&b);
/* Imprimir matriz */
/*
for( i=0; i<n; i++ ) {
for( j=0; j<n; j++ ) {
printf("%10.3lf",A(i,j));
}
printf("\n");
}
*/
double t1, t2, ucpu, scpu;
ctimer( &t1, &ucpu, &scpu );
//info = cholesky_escalar( n, A );
#pragma omp parallel
#pragma omp single
info = cholesky_bloques( n, b, A );
//dpotrf_( "L", &n, A, &n, &info );
ctimer( &t2, &ucpu, &scpu );
if( info != 0 ) {
fprintf(stderr,"Error = %d en la descomposición de Cholesky de la matriz A\n",info);
exit(-1);
}
/* Imprimir matriz */
/*
for( i=0; i<n; i++ ) {
for( j=0; j<n; j++ ) {
printf("%10.3lf",A(i,j));
}
printf("\n");
}
*/
/* ¿ A = L ? */
double error = 0.0;
for( j=0; j<n; j++ ) {
for( i=j; i<n; i++ ) {
double b = (A(i,j)-L(i,j));
error += b*b;
}
}
error = sqrt(error);
//printf("Error = %10.4e\n",error);
printf("%10d %10d %20.2f sec. %15.4e\n",n,b,t2-t1,error);
free(A);
free(L);
}
int cholesky_escalar( int n, double *C ) {
int k;
for ( k = 0; k < n ; k++ ) {
/* CODIGO DE CHOLESKY ESCALAR */
}
return 0;
}
inline int min(int a, int b) { return (a < b) ? a : b; }
int cholesky_bloques( int n, int b, double *C ) {
int i, j, k, m;
int info;
const double one = 1.0;
const double minusone = -1.0;
for ( k = 0; k < n ; k+=b ) {
m = min( n-k, b );
dpotrf_( "L", &m, &C(k,k), &n, &info );
if( info != 0 ) {
fprintf(stderr,"Error = %d en la descomposición de Cholesky de la matriz C\n",info);
return info;
}
for ( i = k + b; i < n; i += b ) {
#pragma omp task
{
m = min( n-i, b );
dtrsm_( "R", "L", "T", "N", &m, &b, &one, &C(k,k), &n, &C(i,k), &n );
}
}
#pragma omp taskwait
for ( i = k + b; i < n; i += b ) {
m = min( n-i, b );
for ( j = k + b; j < i ; j += b ) {
#pragma omp task
dgemm_( "N", "T", &m, &b, &b, &minusone, &C(i,k), &n, &C(j,k), &n, &one, &C(i,j), &n );
}
#pragma omp task
dsyrk_( "L", "N", &m, &b, &minusone, &C(i,k), &n, &one, &C(i,i), &n );
}
#pragma omp taskwait
}
return 0;
}
|
test_intel.c | /*
Copyright 2012 Intel Corporation. All Rights Reserved.
The source code contained or described herein and all documents
related to the source code ("Material") are owned by Intel Corporation
or its suppliers or licensors. Title to the Material remains with
Intel Corporation or its suppliers and licensors. The Material is
protected by worldwide copyright and trade secret laws and treaty
provisions. No part of the Material may be used, copied, reproduced,
modified, published, uploaded, posted, transmitted, distributed, or
disclosed in any way without Intel's prior express written permission.
No license under any patent, copyright, trade secret or other
intellectual property right is granted to or conferred upon you by
disclosure or delivery of the Materials, either expressly, by
implication, inducement, estoppel or otherwise. Any license under
such intellectual property rights must be express and approved by
Intel in writing.
*/
/*
This is a hello world program utilizing both MPI and OpenMP.
In order to coordinate output, all output is handled by the master
process. Within the master process, first, each thread says hello.
Once this is completed, the master thread waits for MPI sends from
each of the other processes. The first piece of data is how many
threads the process has. This is sent by the master thread of the
remote process. Then, each thread will send a thread ID, process
rank, and processor name to the master process. This will then be
formatted and sent to standard output as a hello from the sending
thread.
*/
// Include the MPI header <mpi.h> and the OpenMP header <omp.h>
// The MPI header should be included before stdio.h.
#include <mpi.h>
#include <omp.h>
#include <stdio.h>
int main(int argc, char* argv[]) {
int rank; // Rank ID of the current process
int nproc; // Total number of processes
int nthreads; // Total number of threads
int threadID; // ID of the current thread
int namelen; // Length of the processor name
int required=MPI_THREAD_SERIALIZED; // Required level of MPI threading support
/* Each thread will call MPI routines, but these calls will be coordinated
to occur only one at a time within a process.
*/
int provided; // Provided level of MPI threading support
char name[MPI_MAX_PROCESSOR_NAME]; // Name of the processor
int dThread; // Display thread ID
int dRank; // Display rank ID
int dNamelen; // Length of display name
char dName[MPI_MAX_PROCESSOR_NAME]; // Display processor name
int sNthreads; // nthreads from sender
MPI_Status stat; // Status from MPI calls
int r; // Rank loop counter
int t; // Thread loop counter
// Initialize MPI with threading
MPI_Init_thread(&argc, &argv, required, &provided);
// Determine the MPI rank, number of processes, and processor name
MPI_Comm_size(MPI_COMM_WORLD,&nproc);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Get_processor_name(name,&namelen);
// Check the threading support level
if (provided < required) {
// Insufficient support, degrade to 1 thread and warn the user
if (rank == 0) {
printf("Warning: This MPI implementation provides insufficient");
printf(" threading support.\n");
}
omp_set_num_threads(1);
}
// The multithreaded section where all threads will say hello
#pragma omp parallel default(shared) private(threadID)
{
// All processes should get the total number of threads, each
// threads needs to know its own ID.
threadID=omp_get_thread_num(); // Get the thread ID
nthreads=omp_get_num_threads(); // Get the total number of threads
// Time to say hello, the master process performs all output.
// Within the master process, each thread will handle its own
// output, the master thread will handle output from all threads
// of all other processes.
if (rank == 0) {
// The master process outputs from its own threads
// This section is done by every OpenMP thread, but only one at a time.
// This requires MPI_THREAD_SERIALIZED.
#pragma omp critical
{
printf("Hello from thread %d of %d in rank %d of %d on %s.\n",
threadID, nthreads, rank, nproc, name);
} // End of #pragma omp critical
#pragma omp barrier
// Now, receive data from each of the other processes and
// give an appropriate greeting. Only the master thread
// should do this. Since only the master thread is calling
// MPI, this is an example of MPI_THREAD_FUNNELED.
#pragma omp master
{
for (r=1;r<nproc;r++) {
// Get the number of threads in the sender
MPI_Recv(&sNthreads, 1, MPI_INT, r, 10*r, MPI_COMM_WORLD, &stat);
for (t=0;t<sNthreads;t++) {
// For each thread, get the rank ID, thread ID, and name
MPI_Recv(&dRank, 1, MPI_INT, r, 10*r+1, MPI_COMM_WORLD, &stat);
MPI_Recv(&dThread, 1, MPI_INT, r, 10*r+2, MPI_COMM_WORLD, &stat);
MPI_Recv(&dNamelen, 1, MPI_INT, r, 1000*r+10*dThread, MPI_COMM_WORLD, &stat);
MPI_Recv(dName, dNamelen+1, MPI_CHAR, r, 1000*r+10*dThread+1, MPI_COMM_WORLD, &stat);
printf("Hello from thread %d of %d in rank %d of %d on %s.\n",
dThread, sNthreads, dRank, nproc, dName);
}
}
} // End of #pragma omp master
} else { // All other processes will send their data to the master
// Only the master sends the number of threads. MPI_THREAD_FUNNELED
#pragma omp master
{
MPI_Send(&nthreads, 1, MPI_INT, 0, 10*rank, MPI_COMM_WORLD);
} // End of #pragma omp master
#pragma omp critical
{
// Each thread will send its own data, but there is no
// particular order required, so a critical section works
// exactly as needed. As such, this requires MPI_THREAD_SERIALIZED
MPI_Send(&rank, 1, MPI_INT, 0, 10*rank+1, MPI_COMM_WORLD);
MPI_Send(&threadID, 1, MPI_INT, 0, 10*rank+2, MPI_COMM_WORLD);
MPI_Send(&namelen, 1, MPI_INT, 0, 1000*rank+10*threadID, MPI_COMM_WORLD);
MPI_Send(name, namelen+1, MPI_CHAR, 0, 1000*rank+10*threadID+1, MPI_COMM_WORLD);
} // End of #pragma omp critical
}
} // End of #pragma omp parallel
// Close out MPI and the program
MPI_Finalize();
return 0;
}
|
mxEvaluate.c | #include "../../SWEAbstractNumFluxSolver2d/private/SWENumFlux2d.h"
// #define DEBUG
typedef struct {
double h;
double u;
double v;
double c;
} RoeState;
inline void evaluateVelocity(const double hcrit, ///< depth threshold
const double h, ///< depth
const double hu, ///< water flux
const double hv, ///< water flux
double *u, ///< result velocity
double *v ///< velocity
) {
if (h > hcrit) {
*u = hu / h;
*v = hv / h;
} else {
*u = 0.0;
*v = 0.0;
}
return;
}
inline void evaluateRoeAverage(const double gra, ///< gravity acceleration
const double hcrit, ///< water depth threshold
const double hM, ///< local water depth
const double huM, ///< local flux
const double hvM, ///< local flux
const double hP, ///< neighbour water depth
const double huP, ///< neighbour flux
const double hvP, ///< neighbour flux
RoeState *roe ///< averaged Roe state
) {
// double uM, vM;
// double uP, vP;
// evaluateVelocity(hcrit, hM, huM, hvM, &uM, &vM);
// evaluateVelocity(hcrit, hP, huP, hvP, &uP, &vP);
double hsqrtM = sqrt(hM);
double hsqrtP = sqrt(hP);
roe->h = hsqrtM * hsqrtP;
double uM, uP;
double vM, vP;
evaluateVelocity(hcrit, hM, huM, hvM, &uM, &vM);
evaluateVelocity(hcrit, hP, huP, hvP, &uP, &vP);
roe->u = (uM * hsqrtM + uP * hsqrtP) / (hsqrtM + hsqrtP);
roe->v = (vM * hsqrtM + vP * hsqrtP) / (hsqrtM + hsqrtP);
roe->c = sqrt(gra * (hM + hP) * 0.5);
#ifdef DEBUG
mexPrintf("Roe averaged states\n");
mexPrintf("h = %f\nu = %f\nv = %f\nc = %f\n", roe->h, roe->u, roe->v, roe->c);
#endif
return;
}
void evaluateRoeWaveStrength(const double hcrit, ///< water depth threshold
const double hM, ///< local water depth
const double huM, ///< local flux
const double hvM, ///< local flux
const double hP, ///< neighbour water depth
const double huP, ///< neighbour flux
const double hvP, ///< neighbour flux
const double nx, ///< outward normal vector
const double ny, ///< outward normal vector
const RoeState *roe, ///< roe averaged states
double *alpha ///< wave strength
) {
// const double dh = hP - hM;
// alpha[0] = dh * 0.5 + (roe->u * dh - (huP - huM)) * 0.5 / roe->c;
// alpha[1] = hvP - hvM - roe->v * dh;
// alpha[2] = dh - alpha[0];
const double qnM = huM * nx + hvM * ny;
const double qnP = huP * nx + hvP * ny;
const double qvM = -huM * ny + hvM * nx;
const double qvP = -huP * ny + hvP * nx;
double unM, vnM, unP, vnP;
evaluateVelocity(hcrit, hM, qnM, qvM, &unM, &vnM);
evaluateVelocity(hcrit, hP, qnP, qvP, &unP, &vnP);
alpha[0] = 0.5 * (hP - hM - roe->h / roe->c * (unP - unM));
alpha[1] = roe->h * (vnP - vnM);
alpha[2] = 0.5 * (hP - hM + roe->h / roe->c * (unP - unM));
#ifdef DEBUG
mexPrintf("Wave strength\n");
mexPrintf("local velocity [%f, %f]\n", uM, vM);
mexPrintf("neigh velocity [%f, %f]\n", uP, vP);
mexPrintf("alpha = [%f, %f, %f]\n", alpha[0], alpha[1], alpha[2]);
#endif
return;
}
void evaluateRoeSolver(const double hmin, ///< water depth threshold
const double gra, ///< gravity acceleration
const double hM, ///< local water depth
const double huM, ///< local flux
const double hvM, ///< local flux
const double hP, ///< neighbour water depth
const double huP, ///< neighbour flux
const double hvP, ///< neighbour flux
const double nx, ///< outward normal vector
const double ny, ///< outward normal vector
const RoeState *roe, ///< roe averaged states
double *Fh, ///< roe flux on h
double *Fhu, ///< roe flux on hu
double *Fhv ///< roe flux on hv
) {
double EM[3], GM[3];
evaluateFluxTerm2d(hmin, gra, hM, huM, hvM, EM, GM);
Fh[0] = EM[0] * nx + GM[0] * ny;
Fhu[0] = EM[1] * nx + GM[1] * ny;
Fhv[0] = EM[2] * nx + GM[2] * ny;
evaluateFluxTerm2d(hmin, gra, hP, huP, hvP, EM, GM);
Fh[0] += EM[0] * nx + GM[0] * ny;
Fhu[0] += EM[1] * nx + GM[1] * ny;
Fhv[0] += EM[2] * nx + GM[2] * ny;
double alpha[3];
evaluateRoeWaveStrength(hmin, hM, huM, hvM, hP, huP, hvP, nx, ny, roe, alpha);
const double unroe = roe->u * nx + roe->v * ny;
const double lambda1 = fabs(unroe - roe->c);
const double lambda2 = fabs(unroe);
const double lambda3 = fabs(unroe + roe->c);
#ifdef DEBUG
mexPrintf("eigenvalue lambda = [%f, %f, %f]\n", lambda1, lambda2, lambda3);
#endif
Fh[0] -= lambda1 * alpha[0];
Fhu[0] -= lambda1 * alpha[0] * (roe->u - roe->c * nx);
Fhv[0] -= lambda1 * alpha[0] * (roe->v - roe->c * ny);
Fhu[0] += lambda2 * alpha[1] * ny;
Fhv[0] -= lambda2 * alpha[1] * nx;
Fh[0] -= lambda3 * alpha[2];
Fhu[0] -= lambda3 * alpha[2] * (roe->u + roe->c * nx);
Fhv[0] -= lambda3 * alpha[2] * (roe->v + roe->c * ny);
Fh[0] *= 0.5;
Fhu[0] *= 0.5;
Fhv[0] *= 0.5;
#ifdef DEBUG
mexPrintf("Roe flux = [%f, %f, %f]\n", Fh[0], Fhu[0], Fhv[0]);
#endif
return;
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
FluxSolver solver = ConvertInputMexVariable2d(nlhs, nrhs, plhs, prhs);
const size_t NdimOut = 3;
const size_t K = solver.K;
const size_t TNfp = solver.TNfp;
const mwSize dimOut[3] = {TNfp, K, 3};
plhs[0] = mxCreateNumericArray(NdimOut, dimOut, mxDOUBLE_CLASS, mxREAL);
double *Fh = mxGetPr(plhs[0]);
double *Fqx = Fh + TNfp * K;
double *Fqy = Fh + 2 * TNfp * K;
#ifndef DEBUG
#ifdef _OPENMP
#pragma omp parallel for num_threads(DG_THREADS)
#endif
#endif
for (int k = 0; k < K; k++) {
for (int n = 0; n < TNfp; n++) {
const size_t sk = k * TNfp + n;
const double hM = solver.hM[sk];
const double hP = solver.hP[sk];
const double huM = solver.huM[sk];
const double huP = solver.huP[sk];
const double hvM = solver.hvM[sk];
const double hvP = solver.hvP[sk];
#ifdef DEBUG
mexPrintf("k = %d, sk = %d\n", k, n);
mexPrintf("h = [%f, %f]\nhu = [%f, %f]\nhv = [%f, %f]\n", hM, hP, huM,
huP, hvM, hvP);
#endif
if ((hM > solver.hmin) || (hP > solver.hmin)) {
const double nx = solver.nx[sk];
const double ny = solver.ny[sk];
RoeState roe;
evaluateRoeAverage(solver.gra, solver.hmin, hM, huM, hvM, hP, huP, hvP,
&roe);
evaluateRoeSolver(solver.hmin, solver.gra, hM, huM, hvM, hP, huP, hvP,
nx, ny, &roe, Fh + sk, Fqx + sk, Fqy + sk);
}
}
}
return;
} |
naive_math_impl.h | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <algorithm>
#include <cmath>
template <typename type>
static void basic_trans_mat_to_c4(const type* input,
type* output,
const int ldin,
const int M,
const int K,
bool pack_k) {
const int m_round = (M + 3) / 4 * 4;
int k_round = (K + 3) / 4 * 4;
if (!pack_k) {
k_round = K;
}
const int m_loop = m_round / 4;
type* zero_buf = new type[K];
memset(zero_buf, 0, K * sizeof(type));
for (int i = 0; i < m_loop; ++i) {
const type* in0 = input + i * 4 * ldin;
const type* in1 = in0 + ldin;
const type* in2 = in1 + ldin;
const type* in3 = in2 + ldin;
if (4 * (i + 1) - M > 0) {
switch (4 * (i + 1) - M) {
case 3:
in1 = zero_buf;
case 2:
in2 = zero_buf;
case 1:
in3 = zero_buf;
default:
break;
}
}
for (int j = 0; j < K; ++j) {
*output++ = *in0++;
*output++ = *in1++;
*output++ = *in2++;
*output++ = *in3++;
}
for (int j = K; j < k_round; ++j) {
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
}
}
delete[] zero_buf;
}
template <typename type>
static void basic_trans_mat_to_c8(const type* input,
type* output,
const int ldin,
const int M,
const int K,
bool pack_k) {
const int m_round = (M + 7) / 8 * 8;
int k_round = (K + 7) / 8 * 8;
if (!pack_k) {
k_round = K;
}
const int m_loop = m_round / 8;
type zero_buf[K];
memset(zero_buf, 0, K * sizeof(type));
for (int i = 0; i < m_loop; ++i) {
const type* in0 = input + i * 8 * ldin;
const type* in1 = in0 + ldin;
const type* in2 = in1 + ldin;
const type* in3 = in2 + ldin;
const type* in4 = in3 + ldin;
const type* in5 = in4 + ldin;
const type* in6 = in5 + ldin;
const type* in7 = in6 + ldin;
if (8 * (i + 1) - M > 0) {
switch (8 * (i + 1) - M) {
case 7:
in1 = zero_buf;
case 6:
in2 = zero_buf;
case 5:
in3 = zero_buf;
case 4:
in4 = zero_buf;
case 3:
in5 = zero_buf;
case 2:
in6 = zero_buf;
case 1:
in7 = zero_buf;
default:
break;
}
}
for (int j = 0; j < K; ++j) {
*output++ = *in0++;
*output++ = *in1++;
*output++ = *in2++;
*output++ = *in3++;
*output++ = *in4++;
*output++ = *in5++;
*output++ = *in6++;
*output++ = *in7++;
}
for (int j = K; j < k_round; ++j) {
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
}
}
}
template <typename type, typename type2>
static void basic_gemm_c4(bool trans_a,
bool trans_b,
int m,
int n,
int k,
type2 alpha,
const type* a,
int lda,
const type* b,
int ldb,
type2 beta,
type2* c,
int ldc,
const type2* bias,
bool flag_bias = false,
bool flag_relu = false) {
type2* tmp_c = reinterpret_cast<type2*>(malloc(m * ldc * sizeof(type2)));
memset(tmp_c, 0, m * ldc * sizeof(type2));
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
for (int j = 0; j < n; ++j) {
auto sum = static_cast<type2>(0);
for (int l = 0; l < k; ++l) {
type av;
type bv;
if (trans_a) {
av = a[l * lda + i];
} else {
av = a[i * lda + l];
}
if (trans_b) {
bv = b[j * ldb + l];
} else {
bv = b[l * ldb + j];
}
sum += av * bv;
}
type2 tmp = alpha * sum + beta * tmp_c[i * ldc + j] + bias_data;
if (flag_relu) {
tmp_c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0;
} else {
tmp_c[i * ldc + j] = tmp;
}
}
}
//! trans c to c4
basic_trans_mat_to_c4(tmp_c, c, ldc, m, n, false);
free(tmp_c);
}
template <typename type, typename type2>
static void basic_gemm_c8(bool trans_a,
bool trans_b,
int m,
int n,
int k,
type2 alpha,
const type* a,
int lda,
const type* b,
int ldb,
type2 beta,
type2* c,
int ldc,
const type2* bias,
bool flag_bias = false,
bool flag_relu = false) {
type2* tmp_c = reinterpret_cast<type2*>(malloc(m * ldc * sizeof(type2)));
memset(tmp_c, 0, m * ldc * sizeof(type2));
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
for (int j = 0; j < n; ++j) {
auto sum = static_cast<type2>(0);
for (int l = 0; l < k; ++l) {
type av;
type bv;
if (trans_a) {
av = a[l * lda + i];
} else {
av = a[i * lda + l];
}
if (trans_b) {
bv = b[j * ldb + l];
} else {
bv = b[l * ldb + j];
}
sum += av * bv;
}
type2 tmp = alpha * sum + beta * tmp_c[i * ldc + j] + bias_data;
if (flag_relu) {
tmp_c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0;
} else {
tmp_c[i * ldc + j] = tmp;
}
}
}
//! trans c to c4
basic_trans_mat_to_c8(tmp_c, c, ldc, m, n, false);
free(tmp_c);
}
template <typename type, typename type2>
static void basic_gemm(bool trans_a,
bool trans_b,
int m,
int n,
int k,
type2 alpha,
const type* a,
int lda,
const type* b,
int ldb,
type2 beta,
type2* c,
int ldc,
const type2* bias,
bool flag_bias = false,
int flag_act = false,
float six = 6.f,
float leakey_relu_alpha = 1.f,
float scale = 6.f,
float offset = 3.f,
float threshold = 6.f) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
for (int j = 0; j < n; ++j) {
auto sum = static_cast<type2>(0);
for (int l = 0; l < k; ++l) {
type av;
type bv;
if (trans_a) {
av = a[l * lda + i];
} else {
av = a[i * lda + l];
}
if (trans_b) {
bv = b[j * ldb + l];
} else {
bv = b[l * ldb + j];
}
sum += av * bv;
}
type2 tmp = alpha * sum + beta * c[i * ldc + j] + bias_data;
if (flag_act > 0) {
if (flag_act == 1) { // relu
c[i * ldc + j] =
tmp > static_cast<type2>(0) ? tmp : static_cast<type2>(0);
} else if (flag_act == 2) { // relu 6
c[i * ldc + j] =
tmp > static_cast<type2>(0) ? tmp : static_cast<type2>(0);
c[i * ldc + j] = c[i * ldc + j] < static_cast<type2>(six)
? c[i * ldc + j]
: static_cast<type2>(six);
} else if (flag_act == 4) { // leaky relu
c[i * ldc + j] = tmp < static_cast<type2>(0)
? static_cast<type2>(tmp * leakey_relu_alpha)
: tmp;
} else if (flag_act == 10) { // hard swish
auto tmp1 = tmp + offset;
if (tmp1 > 0) {
if (tmp1 < threshold) {
c[i * ldc + j] = static_cast<type2>(tmp1 * tmp * 1.0 / scale);
} else {
c[i * ldc + j] =
static_cast<type2>(threshold * tmp * 1.0 / scale);
}
} else {
if (threshold > 0) {
c[i * ldc + j] = static_cast<type2>(0);
} else {
c[i * ldc + j] =
static_cast<type2>(threshold * tmp * 1.0 / scale);
}
}
}
} else {
c[i * ldc + j] = tmp;
}
}
}
}
template <typename type, typename type2>
static void basic_gemv(int m,
int k,
const type* a,
const type* b,
const type2* bias,
type2* c,
type2 alpha,
type2 beta,
bool trans_a = false,
bool flag_bias = false,
int flag_act = false,
float six = 6.f,
float leakey_relu_alpha = 1.f,
float scale = 6.f,
float offset = 3.f,
float threshold = 6.f) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
auto sum = static_cast<type2>(0);
for (int j = 0; j < k; ++j) {
type av;
if (trans_a) {
av = a[j * m + i];
} else {
av = a[i * k + j];
}
sum += av * b[j];
}
type2 tmp = alpha * sum + beta * c[i] + bias_data;
if (flag_act > 0) {
if (flag_act == 1) { // relu
c[i] = tmp > (type2)0 ? tmp : (type2)0;
} else if (flag_act == 2) { // relu 6
c[i] = tmp > (type2)0 ? tmp : (type2)0;
c[i] = c[i] < six ? c[i] : six; // ut compute
} else if (flag_act == 4) { // leakey relu
c[i] = tmp < (type2)0 ? (type2)(tmp * leakey_relu_alpha) : tmp;
} else if (flag_act == 10) { // hard_swish
c[i] = std::min(static_cast<type2>(threshold),
std::max(static_cast<type2>(0),
static_cast<type2>(tmp + offset))) *
static_cast<type2>(tmp * 1.0 / scale);
}
} else {
c[i] = tmp;
}
}
}
/**
* \brief basic direct convolution function
*/
//! for float, dtype1 and type2 is float
//! for int8, dytpe1 is char, dtype2 is int
template <typename Dtype1, typename Dtype2>
static void conv_basic(const Dtype1* din,
Dtype2* dout,
int num,
int chout,
int hout,
int wout,
int chin,
int hin,
int win,
const Dtype1* weights,
const Dtype2* bias,
int group,
int kernel_w,
int kernel_h,
int stride_w,
int stride_h,
int dila_w,
int dila_h,
int pad_w,
int pad_h,
bool flag_bias,
int act_type,
float six = 6.f,
float scale = 1.f,
const float hard_scale = 6.f,
const float offset = 3.f,
const float threshold = 6.f) {
Dtype2 beta = 0;
auto src_data = din;
auto dst_data_ref = dout;
auto weights_data = weights;
auto with_bias = flag_bias;
auto bias_data = bias;
int in_num = num;
int out_channels = chout;
int out_h = hout;
int out_w = wout;
int in_channel = chin;
int in_h = hin;
int in_w = win;
int out_c_group = out_channels / group;
int in_c_group = in_channel / group;
for (int n = 0; n < in_num; ++n) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(4)
#endif
for (int g = 0; g < group; ++g) {
for (int oc = 0; oc < out_c_group; ++oc) {
for (int oh = 0; oh < out_h; ++oh) {
for (int ow = 0; ow < out_w; ++ow) {
int out_idx = n * group * out_c_group * out_h * out_w +
g * out_c_group * out_h * out_w + oc * out_h * out_w +
oh * out_w + ow;
Dtype2 bias_d = with_bias ? (bias_data[g * out_c_group + oc]) : 0;
dst_data_ref[out_idx] = bias_d + dst_data_ref[out_idx] * beta;
for (int ic = 0; ic < in_c_group; ++ic) {
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
int iw = ow * stride_w - pad_w + kw * (dila_w);
int ih = oh * stride_h - pad_h + kh * (dila_h);
if (iw < 0 || iw >= in_w) continue;
if (ih < 0 || ih >= in_h) continue;
int iidx = n * in_channel * in_h * in_w +
g * in_c_group * in_h * in_w + ic * in_h * in_w +
ih * in_w + iw;
int widx =
g * out_c_group * in_c_group * kernel_h * kernel_w +
oc * in_c_group * kernel_h * kernel_w +
ic * kernel_h * kernel_w + kh * kernel_w + kw;
dst_data_ref[out_idx] += src_data[iidx] * weights_data[widx];
}
}
}
if (act_type > 0) {
// 1-relu 2-relu6 4-leakyrelu
if (act_type == 1) {
dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0
? dst_data_ref[out_idx]
: (Dtype2)0;
} else if (act_type == 2) {
dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0
? dst_data_ref[out_idx]
: (Dtype2)0;
dst_data_ref[out_idx] = dst_data_ref[out_idx] < (Dtype2)six
? dst_data_ref[out_idx]
: (Dtype2)six;
} else if (act_type == 4) {
dst_data_ref[out_idx] =
dst_data_ref[out_idx] > (Dtype2)0
? dst_data_ref[out_idx]
: (Dtype2)(dst_data_ref[out_idx] * scale);
} else if (act_type == 10) {
auto tmp = dst_data_ref[out_idx] + offset;
auto tmp1 = dst_data_ref[out_idx] * 1.0 / hard_scale;
if (tmp > 0) {
if (tmp < threshold) {
dst_data_ref[out_idx] = static_cast<Dtype2>(tmp * tmp1);
} else {
dst_data_ref[out_idx] =
static_cast<Dtype2>(threshold * tmp1);
}
} else {
if (threshold > 0) {
dst_data_ref[out_idx] = static_cast<Dtype2>(0);
} else {
dst_data_ref[out_idx] =
static_cast<Dtype2>(threshold * tmp1);
}
}
} else {
printf("this act type: %d does not support \n", act_type);
}
}
}
}
}
}
}
}
template <typename Dtype>
static void fill_bias_relu(Dtype* tensor,
const Dtype* bias,
int channel,
int channel_size,
bool flag_bias,
bool flag_relu) {
Dtype* data = tensor;
for (int j = 0; j < channel; ++j) {
Dtype bias_c = flag_bias ? bias[j] : 0;
for (int i = 0; i < channel_size; i++) {
data[i] += bias_c;
if (flag_relu) {
data[i] = data[i] > 0 ? data[i] : 0.f;
}
}
data += channel_size;
}
}
template <typename Dtype>
static void do_relu(Dtype* tensor, int size) {
for (int j = 0; j < size; ++j) {
tensor[j] = tensor[j] > 0 ? tensor[j] : (Dtype)0;
}
}
inline bool is_a_ge_zero_and_a_lt_b(int a, int b) {
return static_cast<unsigned>(a) < static_cast<unsigned>(b);
}
template <typename Dtype>
static void col2im(const Dtype* data_col,
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h0,
const int pad_h1,
const int pad_w0,
const int pad_w1,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
Dtype* data_im) {
memset(data_im, 0, height * width * channels * sizeof(Dtype));
const int output_h =
(height + pad_h0 + pad_h1 - (dilation_h * (kernel_h - 1) + 1)) /
stride_h +
1;
const int output_w =
(width + pad_w0 + pad_w1 - (dilation_w * (kernel_w - 1) + 1)) / stride_w +
1;
const int channel_size = height * width;
for (int channel = channels; channel--; data_im += channel_size) {
for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
int input_row = -pad_h0 + kernel_row * dilation_h;
for (int output_rows = output_h; output_rows; output_rows--) {
if (!is_a_ge_zero_and_a_lt_b(input_row, height)) {
data_col += output_w;
} else {
int input_col = -pad_w0 + kernel_col * dilation_w;
for (int output_col = output_w; output_col; output_col--) {
if (is_a_ge_zero_and_a_lt_b(input_col, width)) {
data_im[input_row * width + input_col] += *data_col;
}
data_col++;
input_col += stride_w;
}
}
input_row += stride_h;
}
}
}
}
}
//! for float, dtype1 and type2 is float
//! for int8, dytpe1 is char, dtype2 is int
template <typename Dtype1, typename Dtype2>
void deconv_basic(const Dtype1* din,
Dtype2* dout,
int num,
int chout,
int hout,
int wout,
int chin,
int hin,
int win,
const Dtype1* weights,
const Dtype2* bias,
int group,
int kernel_w,
int kernel_h,
int stride_w,
int stride_h,
int dila_w,
int dila_h,
int pad_w0,
int pad_w1,
int pad_h0,
int pad_h1,
bool flag_bias,
bool flag_relu) {
int m = chout * kernel_w * kernel_h / group;
int n = hin * win;
int k = chin / group;
int group_size_in = win * hin * chin / group;
int group_size_coldata = m * n;
int group_size_weights = chin * chout * kernel_w * kernel_h / (group * group);
bool flag_1x1s1p1 = (kernel_w == 1) && (kernel_h == 1) && (stride_h == 1) &&
(stride_w == 1) && (pad_w0 == 0) && (pad_h0 == 0) &&
(pad_w1 == 0) && (pad_h1 == 0) && (dila_w == 1) &&
(dila_h == 1);
Dtype2* workspace_ptr =
static_cast<Dtype2*>(malloc(sizeof(float) * m * n * group));
for (int i = 0; i < num; ++i) {
const Dtype1* din_batch = din + i * chin * hin * win;
Dtype2* dout_batch = dout + i * chout * hout * wout;
Dtype2* col_data = workspace_ptr;
if (flag_1x1s1p1) {
col_data = dout_batch;
}
memset(col_data, 0, sizeof(Dtype2) * group_size_coldata * group);
for (int g = 0; g < group; ++g) {
const Dtype1* din_group = din_batch + g * group_size_in;
const Dtype1* weights_group = weights + g * group_size_weights;
Dtype2* coldata_group = col_data + g * group_size_coldata;
basic_gemm<Dtype1, Dtype2>(true,
false,
m,
n,
k,
1,
weights_group,
m,
din_group,
n,
0,
coldata_group,
n,
nullptr,
false,
false);
}
if (!flag_1x1s1p1) {
col2im(col_data,
chout,
hout,
wout,
kernel_h,
kernel_w,
pad_h0,
pad_h1,
pad_w0,
pad_w1,
stride_h,
stride_w,
dila_h,
dila_w,
dout_batch);
}
//! add bias
if (flag_bias || flag_relu) {
fill_bias_relu(
dout_batch, bias, chout, wout * hout, flag_bias, flag_relu);
}
}
free(workspace_ptr);
}
float deformable_bilinear(const float* bottom_data,
const int data_width,
const int height,
const int width,
float h,
float w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
if (h_low >= height - 1) {
h_high = h_low = height - 1;
h = static_cast<float>(h_low);
} else {
h_high = h_low + 1;
}
if (w_low >= width - 1) {
w_high = w_low = width - 1;
w = static_cast<float>(w_low);
} else {
w_high = w_low + 1;
}
float lh = h - h_low;
float lw = w - w_low;
float hh = 1 - lh;
float hw = 1 - lw;
float v1 = bottom_data[h_low * data_width + w_low];
float v2 = bottom_data[h_low * data_width + w_high];
float v3 = bottom_data[h_high * data_width + w_low];
float v4 = bottom_data[h_high * data_width + w_high];
float w1 = hh * hw;
float w2 = hh * lw;
float w3 = lh * hw;
float w4 = lh * lw;
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
//! for float, dtype1 and type2 is float
//! for int8, dytpe1 is char, dtype2 is int
template <typename Dtype1, typename Dtype2>
void deformable_conv_basic(const Dtype1* in_data,
const float* offset_data,
const float* mask_data,
Dtype2* out_data,
int num,
int chout,
int hout,
int wout,
int chin,
int hin,
int win,
const Dtype1* weights,
const Dtype2* bias,
int group,
int kernel_w,
int kernel_h,
int stride_w,
int stride_h,
int dila_w,
int dila_h,
int pad_w,
int pad_h,
bool flag_bias,
bool flag_relu,
bool modulated) {
int out_c_group = chout / group;
int in_c_group = chin / group;
int in_size = hin * win;
int out_size = hout * wout;
int c_in_size = chin * in_size;
int c_out_size = chout * out_size;
int kernel_size = kernel_w * kernel_h;
for (int n = 0; n < num; n++) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(4)
#endif
for (int g = 0; g < group; ++g) {
for (int oc = 0; oc < out_c_group; ++oc) {
for (int oh = 0; oh < hout; oh++) {
for (int ow = 0; ow < wout; ow++) {
int out_idx = n * c_out_size + g * out_c_group * out_size +
oc * out_size + oh * wout + ow;
Dtype2 bias_d = flag_bias ? bias[g * out_c_group + oc] : 0;
out_data[out_idx] = bias_d + out_data[out_idx];
for (int ic = 0; ic < in_c_group; ++ic) {
for (int fh = 0; fh < kernel_h; fh++) {
for (int fw = 0; fw < kernel_w; fw++) {
const float* offset_data_ptr =
offset_data + n * group * 2 * kernel_size * out_size +
g * 2 * kernel_size * out_size;
const int data_offset_h_ptr =
((2 * (fh * kernel_w + fw)) * hout + oh) * wout + ow;
const int data_offset_w_ptr =
((2 * (fh * kernel_w + fw) + 1) * hout + oh) * wout + ow;
const float offset_h = offset_data_ptr[data_offset_h_ptr];
const float offset_w = offset_data_ptr[data_offset_w_ptr];
const float iw =
ow * stride_w - pad_w + kernel_w * dila_w + offset_w;
const float ih =
oh * stride_h - pad_h + kernel_h * dila_h + offset_h;
if (ih >= 0 && ih < hin && iw >= 0 && iw < win) {
const float map_h = kernel_h * dila_h + offset_h;
const float map_w = kernel_w * dila_w + offset_w;
const int cur_height = hin - (oh * stride_h - pad_h);
const int cur_width = win - (ow * stride_w - pad_w);
const float* in_data_offset =
in_data + n * c_in_size +
(g * in_c_group + ic) * in_size +
(oh * stride_h - pad_h) * win + (ow * stride_w - pad_w);
float val = deformable_bilinear(in_data_offset,
win,
cur_height,
cur_width,
map_h,
map_w);
if (modulated) {
// use mask
const float* mask_ptr =
mask_data + n * group * kernel_size * out_size +
g * kernel_size * out_size +
(fh * kernel_w + fw) * hout * wout + oh * wout + ow;
val *= mask_ptr[0];
}
int widx = g * out_c_group * in_c_group * kernel_size +
oc * in_c_group * kernel_size +
ic * kernel_size + fh * kernel_w + fw;
out_data[out_idx] += val * weights[widx];
}
}
}
}
if (flag_relu) {
out_data[out_idx] = out_data[out_idx] > 0 ? out_data[out_idx] : 0;
}
}
}
}
}
}
}
|
box_coder_op.h | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/math_function.h"
namespace paddle {
namespace operators {
enum class BoxCodeType { kEncodeCenterSize = 0, kDecodeCenterSize = 1 };
inline BoxCodeType GetBoxCodeType(const std::string& type) {
if (type == "encode_center_size") {
return BoxCodeType::kEncodeCenterSize;
} else if (type == "decode_center_size") {
return BoxCodeType::kDecodeCenterSize;
}
PADDLE_THROW("Not support type %s.", type);
}
template <typename DeviceContext, typename T>
class BoxCoderKernel : public framework::OpKernel<T> {
public:
void EncodeCenterSize(const framework::Tensor* target_box,
const framework::Tensor* prior_box,
const framework::Tensor* prior_box_var,
const bool normalized, T* output) const {
int64_t row = target_box->dims()[0];
int64_t col = prior_box->dims()[0];
int64_t len = prior_box->dims()[1];
auto* target_box_data = target_box->data<T>();
auto* prior_box_data = prior_box->data<T>();
const T* prior_box_var_data = nullptr;
if (prior_box_var) prior_box_var_data = prior_box_var->data<T>();
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(2)
#endif
for (int64_t i = 0; i < row; ++i) {
for (int64_t j = 0; j < col; ++j) {
T prior_box_width = prior_box_data[j * len + 2] -
prior_box_data[j * len] + (normalized == false);
T prior_box_height = prior_box_data[j * len + 3] -
prior_box_data[j * len + 1] +
(normalized == false);
T prior_box_center_x =
(prior_box_data[j * len + 2] + prior_box_data[j * len]) / 2;
T prior_box_center_y =
(prior_box_data[j * len + 3] + prior_box_data[j * len + 1]) / 2;
T target_box_center_x =
(target_box_data[i * len + 2] + target_box_data[i * len]) / 2;
T target_box_center_y =
(target_box_data[i * len + 3] + target_box_data[i * len + 1]) / 2;
T target_box_width = target_box_data[i * len + 2] -
target_box_data[i * len] + (normalized == false);
T target_box_height = target_box_data[i * len + 3] -
target_box_data[i * len + 1] +
(normalized == false);
size_t offset = i * col * len + j * len;
output[offset] =
(target_box_center_x - prior_box_center_x) / prior_box_width;
output[offset + 1] =
(target_box_center_y - prior_box_center_y) / prior_box_height;
output[offset + 2] =
std::log(std::fabs(target_box_width / prior_box_width));
output[offset + 3] =
std::log(std::fabs(target_box_height / prior_box_height));
if (prior_box_var) {
output[offset] /= prior_box_var_data[j * len];
output[offset + 1] /= prior_box_var_data[j * len + 1];
output[offset + 2] /= prior_box_var_data[j * len + 2];
output[offset + 3] /= prior_box_var_data[j * len + 3];
}
}
}
}
void DecodeCenterSize(const framework::Tensor* target_box,
const framework::Tensor* prior_box,
const framework::Tensor* prior_box_var,
const bool normalized, T* output) const {
int64_t row = target_box->dims()[0];
int64_t col = prior_box->dims()[0];
int64_t len = prior_box->dims()[1];
auto* target_box_data = target_box->data<T>();
auto* prior_box_data = prior_box->data<T>();
const T* prior_box_var_data = nullptr;
if (prior_box_var) prior_box_var_data = prior_box_var->data<T>();
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(2)
#endif
for (int64_t i = 0; i < row; ++i) {
for (int64_t j = 0; j < col; ++j) {
size_t offset = i * col * len + j * len;
T prior_box_width = prior_box_data[j * len + 2] -
prior_box_data[j * len] + (normalized == false);
T prior_box_height = prior_box_data[j * len + 3] -
prior_box_data[j * len + 1] +
(normalized == false);
T prior_box_center_x =
(prior_box_data[j * len + 2] + prior_box_data[j * len]) / 2;
T prior_box_center_y =
(prior_box_data[j * len + 3] + prior_box_data[j * len + 1]) / 2;
T target_box_center_x = 0, target_box_center_y = 0;
T target_box_width = 0, target_box_height = 0;
if (prior_box_var) {
target_box_center_x = prior_box_var_data[j * len] *
target_box_data[offset] * prior_box_width +
prior_box_center_x;
target_box_center_y = prior_box_var_data[j * len + 1] *
target_box_data[offset + 1] *
prior_box_height +
prior_box_center_y;
target_box_width = std::exp(prior_box_var_data[j * len + 2] *
target_box_data[offset + 2]) *
prior_box_width;
target_box_height = std::exp(prior_box_var_data[j * len + 3] *
target_box_data[offset + 3]) *
prior_box_height;
} else {
target_box_center_x =
target_box_data[offset] * prior_box_width + prior_box_center_x;
target_box_center_y = target_box_data[offset + 1] * prior_box_height +
prior_box_center_y;
target_box_width =
std::exp(target_box_data[offset + 2]) * prior_box_width;
target_box_height =
std::exp(target_box_data[offset + 3]) * prior_box_height;
}
output[offset] = target_box_center_x - target_box_width / 2;
output[offset + 1] = target_box_center_y - target_box_height / 2;
output[offset + 2] =
target_box_center_x + target_box_width / 2 - (normalized == false);
output[offset + 3] =
target_box_center_y + target_box_height / 2 - (normalized == false);
}
}
}
void Compute(const framework::ExecutionContext& context) const override {
auto* prior_box = context.Input<framework::Tensor>("PriorBox");
auto* prior_box_var = context.Input<framework::Tensor>("PriorBoxVar");
auto* target_box = context.Input<framework::LoDTensor>("TargetBox");
auto* output_box = context.Output<framework::Tensor>("OutputBox");
if (target_box->lod().size()) {
PADDLE_ENFORCE_EQ(target_box->lod().size(), 1UL,
"Only support 1 level of LoD.");
}
auto row = target_box->dims()[0];
auto col = prior_box->dims()[0];
auto len = prior_box->dims()[1];
output_box->mutable_data<T>({row, col, len}, context.GetPlace());
auto code_type = GetBoxCodeType(context.Attr<std::string>("code_type"));
bool normalized = context.Attr<bool>("box_normalized");
T* output = output_box->data<T>();
if (code_type == BoxCodeType::kEncodeCenterSize) {
EncodeCenterSize(target_box, prior_box, prior_box_var, normalized,
output);
} else if (code_type == BoxCodeType::kDecodeCenterSize) {
DecodeCenterSize(target_box, prior_box, prior_box_var, normalized,
output);
}
}
};
} // namespace operators
} // namespace paddle
|
cpumasks.c | #define _GNU_SOURCE
#include <stdio.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <errno.h>
#include <string.h>
#include <sched.h>
#include <mpi.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#ifndef HOST_NAME_MAX
#define HOST_NAME_MAX 256
#endif
void print_mask(int rank)
{
int i, mythread, thread_turn, nthreads, nrcpus = 1024;
cpu_set_t *mask;
size_t size;
char hostname[HOST_NAME_MAX];
if ((gethostname(hostname, HOST_NAME_MAX) != 0)) {
printf("Could not read host name!\n");
}
#pragma omp parallel private(mask, size, mythread, i, nthreads, thread_turn)
{
#ifdef _OPENMP
mythread = omp_get_thread_num();
nthreads = omp_get_num_threads();
#else
mythread = 0;
nthreads = 1;
#endif
for (thread_turn = 0; thread_turn < nthreads; thread_turn++) {
#pragma omp barrier
if (mythread == thread_turn) {
realloc:
mask = CPU_ALLOC(nrcpus);
size = CPU_ALLOC_SIZE(nrcpus);
CPU_ZERO_S(size, mask);
if (sched_getaffinity(0, size, mask) == -1) {
CPU_FREE(mask);
if (errno == EINVAL && nrcpus < (1024 << 8)) {
nrcpus = nrcpus << 2;
printf("nrcpus: %i\n", nrcpus);
goto realloc;
}
perror("sched_getaffinity");
}
printf("%s: task %4i, thread %2i, ccount %2i, cores: ",
hostname, rank, mythread, CPU_COUNT_S(size, mask));
for (i = 0; i < nrcpus; i++) {
if (CPU_ISSET_S(i, size, mask)) {
printf("%2i ", i);
}
}
printf("\n");
CPU_FREE(mask);
}
#pragma omp barrier
}
} /* end of task specific part */
}
int main(int argc, char *argv[])
{
int rank, ntasks;
int provided, turn_rank;
MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided);
if (provided < MPI_THREAD_FUNNELED) {
printf("Thread funneled not supported!\n");
MPI_Abort(MPI_COMM_WORLD, -1);
}
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
for (turn_rank = 0; turn_rank < ntasks; turn_rank++) {
MPI_Barrier(MPI_COMM_WORLD);
if (rank == turn_rank) {
print_mask(rank);
}
fflush(stdout);
MPI_Barrier(MPI_COMM_WORLD);
}
MPI_Finalize();
return 0;
}
|
main.c | /***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include "../../common/parboil.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "convert_dataset.h"
#include "file.h"
#include "BenchmarksUtil.h"
#define ERROR_THRESHOLD 0.05
double t_start, t_end, t_start_GPU, t_end_GPU;
float *h_Ax_vector_GPU, *h_Ax_vector_CPU;
int N;
typedef float DATA_TYPE;
int compareResults(DATA_TYPE *A, DATA_TYPE *A_GPU) {
int i, fail = 0;
for (i = 0; i < N; i++) {
if (percentDiff(A[i], A_GPU[i]) > ERROR_THRESHOLD) {
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
ERROR_THRESHOLD, fail);
return fail;
}
static int generate_vector(float *x_vector, int dim) {
srand(54321);
int i;
for (i = 0; i < dim; i++) {
x_vector[i] = (rand() / (float)RAND_MAX);
}
return 0;
}
/*
void jdsmv(int height, int len, float* value, int* perm, int* jds_ptr, int*
col_index, float* vector,
float* result){
int i;
int col,row;
int row_index =0;
int prem_indicator=0;
for (i=0; i<len; i++){
if (i>=jds_ptr[prem_indicator+1]){
prem_indicator++;
row_index=0;
}
if (row_index<height){
col = col_index[i];
row = perm[row_index];
result[row]+=value[i]*vector[col];
}
row_index++;
}
return;
}
*/
double spmvGPU(int argc, char **argv) {
// struct pb_TimerSet timers;
struct pb_Parameters *parameters;
// printf("CPU-based sparse matrix vector multiplication****\n");
// printf("Original version by Li-Wen Chang <lchang20@illinois.edu> and
//Shengzhao Wu<wu14@illinois.edu>\n");
// printf("This version maintained by Chris Rodrigues ***********\n");
parameters = pb_ReadParameters(&argc, argv);
if ((parameters->inpFiles[0] == NULL) || (parameters->inpFiles[1] == NULL)) {
fprintf(stderr, "Expecting two input filenames\n");
exit(-1);
}
// pb_InitializeTimerSet(&timers);
// pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
// parameters declaration
int len;
int depth;
int dim;
int pad = 1;
int nzcnt_len;
// host memory allocation
// matrix
float *h_data;
int *h_indices;
int *h_ptr;
int *h_perm;
int *h_nzcnt;
// vector
float *h_Ax_vector;
float *h_x_vector;
// load matrix from files
// pb_SwitchToTimer(&timers, pb_TimerID_IO);
// inputData(parameters->inpFiles[0], &len, &depth, &dim,&nzcnt_len,&pad,
// &h_data, &h_indices, &h_ptr,
// &h_perm, &h_nzcnt);
int col_count;
coo_to_jds(parameters->inpFiles[0], // bcsstk32.mtx, fidapm05.mtx, jgl009.mtx
1, // row padding
pad, // warp size
1, // pack size
1, // is mirrored?
0, // binary matrix
0, // debug level [0:2]
&h_data, &h_ptr, &h_nzcnt, &h_indices, &h_perm, &col_count, &dim,
&len, &nzcnt_len, &depth);
h_Ax_vector = (float *)malloc(sizeof(float) * dim);
h_x_vector = (float *)malloc(sizeof(float) * dim);
// generate_vector(h_x_vector, dim);
input_vec(parameters->inpFiles[1], h_x_vector, dim);
// pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
int p, i;
t_start_GPU = rtclock();
// main execution
#pragma omp target map( \
to : h_nzcnt[ : nzcnt_len], \
h_ptr[ : col_count], \
h_indices[ : len], \
h_data[ : len], \
h_perm[ : col_count], \
h_x_vector[ : dim]) map( \
from : h_Ax_vector[ : dim]) \
device(DEVICE_ID)
for (p = 0; p < 50; p++) {
#pragma omp parallel for
for (i = 0; i < dim; i++) {
int k;
float sum = 0.0f;
// int bound = h_nzcnt[i / 32];
int bound = h_nzcnt[i];
for (k = 0; k < bound; k++) {
int j = h_ptr[k] + i;
int in = h_indices[j];
float d = h_data[j];
float t = h_x_vector[in];
sum += d * t;
}
// #pragma omp critical
h_Ax_vector[h_perm[i]] = sum;
}
}
t_end_GPU = rtclock();
h_Ax_vector_GPU = h_Ax_vector;
N = dim;
// if (parameters->outFile) {
// pb_SwitchToTimer(&timers, pb_TimerID_IO);
// outputData(parameters->outFile,h_Ax_vector,dim);
// }
// pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
free(h_data);
free(h_indices);
free(h_ptr);
free(h_perm);
free(h_nzcnt);
free(h_x_vector);
// pb_SwitchToTimer(&timers, pb_TimerID_NONE);
// pb_PrintTimerSet(&timers);
pb_FreeParameters(parameters);
return t_end_GPU - t_start_GPU;
}
double spmvCPU(int argc, char **argv) {
// struct pb_TimerSet timers;
struct pb_Parameters *parameters;
// printf("CPU-based sparse matrix vector multiplication****\n");
// printf("Original version by Li-Wen Chang <lchang20@illinois.edu> and
//Shengzhao Wu<wu14@illinois.edu>\n");
// printf("This version maintained by Chris Rodrigues ***********\n");
parameters = pb_ReadParameters(&argc, argv);
if ((parameters->inpFiles[0] == NULL) || (parameters->inpFiles[1] == NULL)) {
fprintf(stderr, "Expecting two input filenames\n");
exit(-1);
}
// pb_InitializeTimerSet(&timers);
// pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
// parameters declaration
int len;
int depth;
int dim;
int pad = 1;
int nzcnt_len;
// host memory allocation
// matrix
float *h_data;
int *h_indices;
int *h_ptr;
int *h_perm;
int *h_nzcnt;
// vector
float *h_Ax_vector;
float *h_x_vector;
// load matrix from files
// pb_SwitchToTimer(&timers, pb_TimerID_IO);
// inputData(parameters->inpFiles[0], &len, &depth, &dim,&nzcnt_len,&pad,
// &h_data, &h_indices, &h_ptr,
// &h_perm, &h_nzcnt);
int col_count;
coo_to_jds(parameters->inpFiles[0], // bcsstk32.mtx, fidapm05.mtx, jgl009.mtx
1, // row padding
pad, // warp size
1, // pack size
1, // is mirrored?
0, // binary matrix
0, // debug level [0:2]
&h_data, &h_ptr, &h_nzcnt, &h_indices, &h_perm, &col_count, &dim,
&len, &nzcnt_len, &depth);
h_Ax_vector = (float *)malloc(sizeof(float) * dim);
h_x_vector = (float *)malloc(sizeof(float) * dim);
// generate_vector(h_x_vector, dim);
input_vec(parameters->inpFiles[1], h_x_vector, dim);
// pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
int p, i;
// main execution
t_start = rtclock();
for (p = 0; p < 50; p++) {
for (i = 0; i < dim; i++) {
int k;
float sum = 0.0f;
// int bound = h_nzcnt[i / 32];
int bound = h_nzcnt[i];
for (k = 0; k < bound; k++) {
int j = h_ptr[k] + i;
int in = h_indices[j];
float d = h_data[j];
float t = h_x_vector[in];
sum += d * t;
}
// #pragma omp critical
h_Ax_vector[h_perm[i]] = sum;
}
}
t_end = rtclock();
h_Ax_vector_CPU = h_Ax_vector;
N = dim;
// if (parameters->outFile) {
// pb_SwitchToTimer(&timers, pb_TimerID_IO);
// outputData(parameters->outFile,h_Ax_vector,dim);
// }
// pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
free(h_data);
free(h_indices);
free(h_ptr);
free(h_perm);
free(h_nzcnt);
free(h_x_vector);
// pb_SwitchToTimer(&timers, pb_TimerID_NONE);
// pb_PrintTimerSet(&timers);
pb_FreeParameters(parameters);
return t_end - t_start;
}
int main(int argc, char **argv) {
double t_GPU, t_CPU;
int fail = 0;
t_GPU = spmvGPU(argc, argv);
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_GPU);
#ifdef RUN_TEST
t_CPU = spmvCPU(argc, argv);
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_CPU);
fail = compareResults(h_Ax_vector_GPU, h_Ax_vector_CPU);
#endif
free(h_Ax_vector_GPU);
free(h_Ax_vector_CPU);
return fail;
}
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 32;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
test_task_single.c | //===-- test_task_single.cc - Test task barrier of single ---------*- C -*-===//
//
// Part of the LOMP project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
//
// This file has been modified from the file
// openmp/runtime/test/tasking/omp_task.c
// of the LLVM project (https://github.com/llvm/llvm-project)
// under the Apache License v2.0 with LLVM Exceptions.
//
//===----------------------------------------------------------------------===//
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <omp.h>
#include "tests.h"
int test_omp_task(void) {
int tids[NUM_TASKS];
int i;
#pragma omp parallel
{
#pragma omp single
{
for (i = 0; i < NUM_TASKS; i++) {
/* First we have to store the value of the loop index in a new variable
* which will be private for each task because otherwise it will be overwritten
* if the execution of the task takes longer than the time which is needed to
* enter the next step of the loop!
*/
int myi;
myi = i;
printf("Create task %d\n", myi);
#pragma omp task
{
sleep(SLEEPTIME);
tids[myi] = omp_get_thread_num();
printf("Executed task %d in thread %d\n", myi, omp_get_thread_num());
} /* end of omp task */
} /* end of for */
} /* end of single */
} /*end of parallel */
/* Now we check if more than one thread executed the tasks. */
for (i = 1; i < NUM_TASKS; i++) {
if (tids[0] != tids[i])
return 1;
}
return 0;
} /* end of check_parallel_for_private */
int main(void) {
int i;
int num_failed = 0;
if (omp_get_max_threads() < 2) {
printf("Not enough threads for this test! Need >2 threads!\n");
}
// omp_set_num_threads(8);
for (i = 0; i < REPETITIONS; i++) {
if (!test_omp_task()) {
num_failed++;
}
}
return num_failed != 0 ? EXIT_FAILURE : EXIT_SUCCESS;
} |
red_black_gs.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.12 $
***********************************************************************EHEADER*/
/******************************************************************************
*
* This routine assumes a 3-pt (1D), 5-pt (2D), or 7-pt (3D) stencil.
*
*****************************************************************************/
#include "_hypre_struct_ls.h"
#include "red_black_gs.h"
#ifndef hypre_abs
#define hypre_abs(a) (((a)>0) ? (a) : -(a))
#endif
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
void *
hypre_RedBlackGSCreate( MPI_Comm comm )
{
hypre_RedBlackGSData *relax_data;
relax_data = hypre_CTAlloc(hypre_RedBlackGSData, 1);
(relax_data -> comm) = comm;
(relax_data -> time_index) = hypre_InitializeTiming("RedBlackGS");
/* set defaults */
(relax_data -> tol) = 1.0e-06;
(relax_data -> max_iter) = 1000;
(relax_data -> rel_change) = 0;
(relax_data -> zero_guess) = 0;
(relax_data -> rb_start) = 1;
(relax_data -> flops) = 0;
(relax_data -> A) = NULL;
(relax_data -> b) = NULL;
(relax_data -> x) = NULL;
(relax_data -> compute_pkg) = NULL;
return (void *) relax_data;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_RedBlackGSDestroy( void *relax_vdata )
{
hypre_RedBlackGSData *relax_data = relax_vdata;
if (relax_data)
{
hypre_StructMatrixDestroy(relax_data -> A);
hypre_StructVectorDestroy(relax_data -> b);
hypre_StructVectorDestroy(relax_data -> x);
hypre_ComputePkgDestroy(relax_data -> compute_pkg);
hypre_FinalizeTiming(relax_data -> time_index);
hypre_TFree(relax_data);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_RedBlackGSSetup( void *relax_vdata,
hypre_StructMatrix *A,
hypre_StructVector *b,
hypre_StructVector *x )
{
hypre_RedBlackGSData *relax_data = relax_vdata;
HYPRE_Int diag_rank;
hypre_ComputePkg *compute_pkg;
hypre_StructGrid *grid;
hypre_StructStencil *stencil;
hypre_Index diag_index;
hypre_ComputeInfo *compute_info;
/*----------------------------------------------------------
* Find the matrix diagonal
*----------------------------------------------------------*/
grid = hypre_StructMatrixGrid(A);
stencil = hypre_StructMatrixStencil(A);
hypre_SetIndex(diag_index, 0, 0, 0);
diag_rank = hypre_StructStencilElementRank(stencil, diag_index);
/*----------------------------------------------------------
* Set up the compute packages
*----------------------------------------------------------*/
hypre_CreateComputeInfo(grid, stencil, &compute_info);
hypre_ComputePkgCreate(compute_info, hypre_StructVectorDataSpace(x), 1,
grid, &compute_pkg);
/*----------------------------------------------------------
* Set up the relax data structure
*----------------------------------------------------------*/
(relax_data -> A) = hypre_StructMatrixRef(A);
(relax_data -> x) = hypre_StructVectorRef(x);
(relax_data -> b) = hypre_StructVectorRef(b);
(relax_data -> diag_rank) = diag_rank;
(relax_data -> compute_pkg) = compute_pkg;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_RedBlackGS( void *relax_vdata,
hypre_StructMatrix *A,
hypre_StructVector *b,
hypre_StructVector *x )
{
hypre_RedBlackGSData *relax_data = relax_vdata;
HYPRE_Int max_iter = (relax_data -> max_iter);
HYPRE_Int zero_guess = (relax_data -> zero_guess);
HYPRE_Int rb_start = (relax_data -> rb_start);
HYPRE_Int diag_rank = (relax_data -> diag_rank);
hypre_ComputePkg *compute_pkg = (relax_data -> compute_pkg);
hypre_CommHandle *comm_handle;
hypre_BoxArrayArray *compute_box_aa;
hypre_BoxArray *compute_box_a;
hypre_Box *compute_box;
hypre_Box *A_dbox;
hypre_Box *b_dbox;
hypre_Box *x_dbox;
HYPRE_Int Ai, Astart, Ani, Anj;
HYPRE_Int bi, bstart, bni, bnj;
HYPRE_Int xi, xstart, xni, xnj;
HYPRE_Int xoff0, xoff1, xoff2, xoff3, xoff4, xoff5;
double *Ap;
double *Ap0, *Ap1, *Ap2, *Ap3, *Ap4, *Ap5;
double *bp;
double *xp;
hypre_IndexRef start;
hypre_Index loop_size;
hypre_StructStencil *stencil;
hypre_Index *stencil_shape;
HYPRE_Int stencil_size;
HYPRE_Int offd[6];
HYPRE_Int iter, rb, redblack;
HYPRE_Int compute_i, i, j, ii, jj, kk;
HYPRE_Int ni, nj, nk;
/*----------------------------------------------------------
* Initialize some things and deal with special cases
*----------------------------------------------------------*/
hypre_BeginTiming(relax_data -> time_index);
hypre_StructMatrixDestroy(relax_data -> A);
hypre_StructVectorDestroy(relax_data -> b);
hypre_StructVectorDestroy(relax_data -> x);
(relax_data -> A) = hypre_StructMatrixRef(A);
(relax_data -> x) = hypre_StructVectorRef(x);
(relax_data -> b) = hypre_StructVectorRef(b);
(relax_data -> num_iterations) = 0;
/* if max_iter is zero, return */
if (max_iter == 0)
{
/* if using a zero initial guess, return zero */
if (zero_guess)
{
hypre_StructVectorSetConstantValues(x, 0.0);
}
hypre_EndTiming(relax_data -> time_index);
return hypre_error_flag;
}
else
{
stencil = hypre_StructMatrixStencil(A);
stencil_shape = hypre_StructStencilShape(stencil);
stencil_size = hypre_StructStencilSize(stencil);
/* get off-diag entry ranks ready */
i = 0;
for (j = 0; j < stencil_size; j++)
{
if (j != diag_rank)
{
offd[i] = j;
i++;
}
}
}
/*----------------------------------------------------------
* Do zero_guess iteration
*----------------------------------------------------------*/
rb = rb_start;
iter = 0;
if (zero_guess)
{
for (compute_i = 0; compute_i < 2; compute_i++)
{
switch(compute_i)
{
case 0:
{
compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg);
}
break;
case 1:
{
compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg);
}
break;
}
hypre_ForBoxArrayI(i, compute_box_aa)
{
compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), i);
b_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(b), i);
x_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i);
Ap = hypre_StructMatrixBoxData(A, i, diag_rank);
bp = hypre_StructVectorBoxData(b, i);
xp = hypre_StructVectorBoxData(x, i);
hypre_ForBoxI(j, compute_box_a)
{
compute_box = hypre_BoxArrayBox(compute_box_a, j);
start = hypre_BoxIMin(compute_box);
hypre_BoxGetSize(compute_box, loop_size);
/* Are we relaxing index start or start+(1,0,0)? */
redblack = hypre_abs(hypre_IndexX(start) +
hypre_IndexY(start) +
hypre_IndexZ(start) + rb) % 2;
Astart = hypre_BoxIndexRank(A_dbox, start);
bstart = hypre_BoxIndexRank(b_dbox, start);
xstart = hypre_BoxIndexRank(x_dbox, start);
ni = hypre_IndexX(loop_size);
nj = hypre_IndexY(loop_size);
nk = hypre_IndexZ(loop_size);
Ani = hypre_BoxSizeX(A_dbox);
bni = hypre_BoxSizeX(b_dbox);
xni = hypre_BoxSizeX(x_dbox);
Anj = hypre_BoxSizeY(A_dbox);
bnj = hypre_BoxSizeY(b_dbox);
xnj = hypre_BoxSizeY(x_dbox);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii,jj,Ai,bi,xi,kk) HYPRE_SMP_SCHEDULE
#endif
for (kk = 0; kk < nk; kk++)
{
for (jj = 0; jj < nj; jj++)
{
ii = (kk + jj + redblack) % 2;
Ai = Astart + kk*Anj*Ani + jj*Ani + ii;
bi = bstart + kk*bnj*bni + jj*bni + ii;
xi = xstart + kk*xnj*xni + jj*xni + ii;
for (; ii < ni; ii+=2, Ai+=2, bi+=2, xi+=2)
{
xp[xi] = bp[bi] / Ap[Ai];
}
}
}
}
}
}
rb = (rb + 1) % 2;
iter++;
}
/*----------------------------------------------------------
* Do regular iterations
*----------------------------------------------------------*/
while (iter < 2*max_iter)
{
for (compute_i = 0; compute_i < 2; compute_i++)
{
switch(compute_i)
{
case 0:
{
xp = hypre_StructVectorData(x);
hypre_InitializeIndtComputations(compute_pkg, xp, &comm_handle);
compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg);
}
break;
case 1:
{
hypre_FinalizeIndtComputations(comm_handle);
compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg);
}
break;
}
hypre_ForBoxArrayI(i, compute_box_aa)
{
compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), i);
b_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(b), i);
x_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i);
Ap = hypre_StructMatrixBoxData(A, i, diag_rank);
bp = hypre_StructVectorBoxData(b, i);
xp = hypre_StructVectorBoxData(x, i);
hypre_ForBoxI(j, compute_box_a)
{
compute_box = hypre_BoxArrayBox(compute_box_a, j);
start = hypre_BoxIMin(compute_box);
hypre_BoxGetSize(compute_box, loop_size);
/* Are we relaxing index start or start+(1,0,0)? */
redblack = hypre_abs(hypre_IndexX(start) +
hypre_IndexY(start) +
hypre_IndexZ(start) + rb) % 2;
Astart = hypre_BoxIndexRank(A_dbox, start);
bstart = hypre_BoxIndexRank(b_dbox, start);
xstart = hypre_BoxIndexRank(x_dbox, start);
ni = hypre_IndexX(loop_size);
nj = hypre_IndexY(loop_size);
nk = hypre_IndexZ(loop_size);
Ani = hypre_BoxSizeX(A_dbox);
bni = hypre_BoxSizeX(b_dbox);
xni = hypre_BoxSizeX(x_dbox);
Anj = hypre_BoxSizeY(A_dbox);
bnj = hypre_BoxSizeY(b_dbox);
xnj = hypre_BoxSizeY(x_dbox);
switch(stencil_size)
{
case 7:
Ap5 = hypre_StructMatrixBoxData(A, i, offd[5]);
Ap4 = hypre_StructMatrixBoxData(A, i, offd[4]);
xoff5 = hypre_BoxOffsetDistance(
x_dbox, stencil_shape[offd[5]]);
xoff4 = hypre_BoxOffsetDistance(
x_dbox, stencil_shape[offd[4]]);
case 5:
Ap3 = hypre_StructMatrixBoxData(A, i, offd[3]);
Ap2 = hypre_StructMatrixBoxData(A, i, offd[2]);
xoff3 = hypre_BoxOffsetDistance(
x_dbox, stencil_shape[offd[3]]);
xoff2 = hypre_BoxOffsetDistance(
x_dbox, stencil_shape[offd[2]]);
case 3:
Ap1 = hypre_StructMatrixBoxData(A, i, offd[1]);
Ap0 = hypre_StructMatrixBoxData(A, i, offd[0]);
xoff1 = hypre_BoxOffsetDistance(
x_dbox, stencil_shape[offd[1]]);
xoff0 = hypre_BoxOffsetDistance(
x_dbox, stencil_shape[offd[0]]);
break;
}
switch(stencil_size)
{
case 7:
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii,jj,Ai,bi,xi,kk) HYPRE_SMP_SCHEDULE
#endif
for (kk = 0; kk < nk; kk++)
{
for (jj = 0; jj < nj; jj++)
{
ii = (kk + jj + redblack) % 2;
Ai = Astart + kk*Anj*Ani + jj*Ani + ii;
bi = bstart + kk*bnj*bni + jj*bni + ii;
xi = xstart + kk*xnj*xni + jj*xni + ii;
for (; ii < ni; ii+=2, Ai+=2, bi+=2, xi+=2)
{
xp[xi] =
(bp[bi] -
Ap0[Ai] * xp[xi + xoff0] -
Ap1[Ai] * xp[xi + xoff1] -
Ap2[Ai] * xp[xi + xoff2] -
Ap3[Ai] * xp[xi + xoff3] -
Ap4[Ai] * xp[xi + xoff4] -
Ap5[Ai] * xp[xi + xoff5]) / Ap[Ai];
}
}
}
break;
case 5:
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii,jj,Ai,bi,xi,kk) HYPRE_SMP_SCHEDULE
#endif
for (kk = 0; kk < nk; kk++)
{
for (jj = 0; jj < nj; jj++)
{
ii = (kk + jj + redblack) % 2;
Ai = Astart + kk*Anj*Ani + jj*Ani + ii;
bi = bstart + kk*bnj*bni + jj*bni + ii;
xi = xstart + kk*xnj*xni + jj*xni + ii;
for (; ii < ni; ii+=2, Ai+=2, bi+=2, xi+=2)
{
xp[xi] =
(bp[bi] -
Ap0[Ai] * xp[xi + xoff0] -
Ap1[Ai] * xp[xi + xoff1] -
Ap2[Ai] * xp[xi + xoff2] -
Ap3[Ai] * xp[xi + xoff3]) / Ap[Ai];
}
}
}
break;
case 3:
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii,jj,Ai,bi,xi,kk) HYPRE_SMP_SCHEDULE
#endif
for (kk = 0; kk < nk; kk++)
{
for (jj = 0; jj < nj; jj++)
{
ii = (kk + jj + redblack) % 2;
Ai = Astart + kk*Anj*Ani + jj*Ani + ii;
bi = bstart + kk*bnj*bni + jj*bni + ii;
xi = xstart + kk*xnj*xni + jj*xni + ii;
for (; ii < ni; ii+=2, Ai+=2, bi+=2, xi+=2)
{
xp[xi] =
(bp[bi] -
Ap0[Ai] * xp[xi + xoff0] -
Ap1[Ai] * xp[xi + xoff1]) / Ap[Ai];
}
}
}
break;
}
}
}
}
rb = (rb + 1) % 2;
iter++;
}
(relax_data -> num_iterations) = iter / 2;
/*-----------------------------------------------------------------------
* Return
*-----------------------------------------------------------------------*/
hypre_IncFLOPCount(relax_data -> flops);
hypre_EndTiming(relax_data -> time_index);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_RedBlackGSSetTol( void *relax_vdata,
double tol )
{
hypre_RedBlackGSData *relax_data = relax_vdata;
(relax_data -> tol) = tol;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_RedBlackGSSetMaxIter( void *relax_vdata,
HYPRE_Int max_iter )
{
hypre_RedBlackGSData *relax_data = relax_vdata;
(relax_data -> max_iter) = max_iter;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_RedBlackGSSetZeroGuess( void *relax_vdata,
HYPRE_Int zero_guess )
{
hypre_RedBlackGSData *relax_data = relax_vdata;
(relax_data -> zero_guess) = zero_guess;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_RedBlackGSSetStartRed( void *relax_vdata )
{
hypre_RedBlackGSData *relax_data = relax_vdata;
(relax_data -> rb_start) = 1;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_RedBlackGSSetStartBlack( void *relax_vdata )
{
hypre_RedBlackGSData *relax_data = relax_vdata;
(relax_data -> rb_start) = 0;
return hypre_error_flag;
}
|
convolutiondepthwise_5x5_pack8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw5x5s1_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
float* outptr1 = out.row(1);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _k00 = _mm256_loadu_ps(k0);
__m256 _k01 = _mm256_loadu_ps(k0 + 8);
__m256 _k02 = _mm256_loadu_ps(k0 + 16);
__m256 _k03 = _mm256_loadu_ps(k0 + 24);
__m256 _k04 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k03, _r03, _sum0);
_sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _k10 = _mm256_loadu_ps(k0);
__m256 _k11 = _mm256_loadu_ps(k0 + 8);
__m256 _k12 = _mm256_loadu_ps(k0 + 16);
__m256 _k13 = _mm256_loadu_ps(k0 + 24);
__m256 _k14 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k13, _r13, _sum0);
_sum0 = _mm256_fmadd_ps(_k14, _r14, _sum0);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
__m256 _k20 = _mm256_loadu_ps(k0);
__m256 _k21 = _mm256_loadu_ps(k0 + 8);
__m256 _k22 = _mm256_loadu_ps(k0 + 16);
__m256 _k23 = _mm256_loadu_ps(k0 + 24);
__m256 _k24 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
_sum0 = _mm256_fmadd_ps(_k23, _r23, _sum0);
_sum0 = _mm256_fmadd_ps(_k24, _r24, _sum0);
__m256 _r30 = _mm256_loadu_ps(r3);
__m256 _r31 = _mm256_loadu_ps(r3 + 8);
__m256 _r32 = _mm256_loadu_ps(r3 + 16);
__m256 _r33 = _mm256_loadu_ps(r3 + 24);
__m256 _r34 = _mm256_loadu_ps(r3 + 32);
__m256 _k30 = _mm256_loadu_ps(k0);
__m256 _k31 = _mm256_loadu_ps(k0 + 8);
__m256 _k32 = _mm256_loadu_ps(k0 + 16);
__m256 _k33 = _mm256_loadu_ps(k0 + 24);
__m256 _k34 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k30, _r30, _sum0);
_sum0 = _mm256_fmadd_ps(_k31, _r31, _sum0);
_sum0 = _mm256_fmadd_ps(_k32, _r32, _sum0);
_sum0 = _mm256_fmadd_ps(_k33, _r33, _sum0);
_sum0 = _mm256_fmadd_ps(_k34, _r34, _sum0);
__m256 _r40 = _mm256_loadu_ps(r4);
__m256 _r41 = _mm256_loadu_ps(r4 + 8);
__m256 _r42 = _mm256_loadu_ps(r4 + 16);
__m256 _r43 = _mm256_loadu_ps(r4 + 24);
__m256 _r44 = _mm256_loadu_ps(r4 + 32);
__m256 _k40 = _mm256_loadu_ps(k0);
__m256 _k41 = _mm256_loadu_ps(k0 + 8);
__m256 _k42 = _mm256_loadu_ps(k0 + 16);
__m256 _k43 = _mm256_loadu_ps(k0 + 24);
__m256 _k44 = _mm256_loadu_ps(k0 + 32);
k0 -= 160;
_sum0 = _mm256_fmadd_ps(_k40, _r40, _sum0);
_sum0 = _mm256_fmadd_ps(_k41, _r41, _sum0);
_sum0 = _mm256_fmadd_ps(_k42, _r42, _sum0);
_sum0 = _mm256_fmadd_ps(_k43, _r43, _sum0);
_sum0 = _mm256_fmadd_ps(_k44, _r44, _sum0);
_mm256_storeu_ps(outptr0, _sum0);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
r4 += 8;
outptr0 += 8;
}
r0 += 4 * 8;
r1 += 4 * 8;
r2 += 4 * 8;
r3 += 4 * 8;
r4 += 4 * 8;
}
}
}
static void convdw5x5s2_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 8;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
float* outptr1 = out.row(1);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _k00 = _mm256_loadu_ps(k0);
__m256 _k01 = _mm256_loadu_ps(k0 + 8);
__m256 _k02 = _mm256_loadu_ps(k0 + 16);
__m256 _k03 = _mm256_loadu_ps(k0 + 24);
__m256 _k04 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k03, _r03, _sum0);
_sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _k10 = _mm256_loadu_ps(k0);
__m256 _k11 = _mm256_loadu_ps(k0 + 8);
__m256 _k12 = _mm256_loadu_ps(k0 + 16);
__m256 _k13 = _mm256_loadu_ps(k0 + 24);
__m256 _k14 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k13, _r13, _sum0);
_sum0 = _mm256_fmadd_ps(_k14, _r14, _sum0);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
__m256 _k20 = _mm256_loadu_ps(k0);
__m256 _k21 = _mm256_loadu_ps(k0 + 8);
__m256 _k22 = _mm256_loadu_ps(k0 + 16);
__m256 _k23 = _mm256_loadu_ps(k0 + 24);
__m256 _k24 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
_sum0 = _mm256_fmadd_ps(_k23, _r23, _sum0);
_sum0 = _mm256_fmadd_ps(_k24, _r24, _sum0);
__m256 _r30 = _mm256_loadu_ps(r3);
__m256 _r31 = _mm256_loadu_ps(r3 + 8);
__m256 _r32 = _mm256_loadu_ps(r3 + 16);
__m256 _r33 = _mm256_loadu_ps(r3 + 24);
__m256 _r34 = _mm256_loadu_ps(r3 + 32);
__m256 _k30 = _mm256_loadu_ps(k0);
__m256 _k31 = _mm256_loadu_ps(k0 + 8);
__m256 _k32 = _mm256_loadu_ps(k0 + 16);
__m256 _k33 = _mm256_loadu_ps(k0 + 24);
__m256 _k34 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k30, _r30, _sum0);
_sum0 = _mm256_fmadd_ps(_k31, _r31, _sum0);
_sum0 = _mm256_fmadd_ps(_k32, _r32, _sum0);
_sum0 = _mm256_fmadd_ps(_k33, _r33, _sum0);
_sum0 = _mm256_fmadd_ps(_k34, _r34, _sum0);
__m256 _r40 = _mm256_loadu_ps(r4);
__m256 _r41 = _mm256_loadu_ps(r4 + 8);
__m256 _r42 = _mm256_loadu_ps(r4 + 16);
__m256 _r43 = _mm256_loadu_ps(r4 + 24);
__m256 _r44 = _mm256_loadu_ps(r4 + 32);
__m256 _k40 = _mm256_loadu_ps(k0);
__m256 _k41 = _mm256_loadu_ps(k0 + 8);
__m256 _k42 = _mm256_loadu_ps(k0 + 16);
__m256 _k43 = _mm256_loadu_ps(k0 + 24);
__m256 _k44 = _mm256_loadu_ps(k0 + 32);
k0 -= 160;
_sum0 = _mm256_fmadd_ps(_k40, _r40, _sum0);
_sum0 = _mm256_fmadd_ps(_k41, _r41, _sum0);
_sum0 = _mm256_fmadd_ps(_k42, _r42, _sum0);
_sum0 = _mm256_fmadd_ps(_k43, _r43, _sum0);
_sum0 = _mm256_fmadd_ps(_k44, _r44, _sum0);
_mm256_storeu_ps(outptr0, _sum0);
r0 += 16;
r1 += 16;
r2 += 16;
r3 += 16;
r4 += 16;
outptr0 += 8;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
|
convolution_1x1_pack4to1_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_transform_kernel_pack4to1_bf16s_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch)
{
// interleave
// src = inch-outch
// dst = 4a-inch/4a-outch
#if __aarch64__
kernel_tm_pack4.create(8, inch/4, outch/8 + (outch%8)/4 + outch%4, (size_t)2u*4, 4);
#else
kernel_tm_pack4.create(4, inch/4, outch/4 + outch%4, (size_t)2u*4, 4);
#endif
int p=0;
#if __aarch64__
for (; p+7<outch; p+=8)
{
const float* k0 = (const float*)kernel + (p+0)*inch;
const float* k1 = (const float*)kernel + (p+1)*inch;
const float* k2 = (const float*)kernel + (p+2)*inch;
const float* k3 = (const float*)kernel + (p+3)*inch;
const float* k4 = (const float*)kernel + (p+4)*inch;
const float* k5 = (const float*)kernel + (p+5)*inch;
const float* k6 = (const float*)kernel + (p+6)*inch;
const float* k7 = (const float*)kernel + (p+7)*inch;
unsigned short* ktmp = kernel_tm_pack4.channel(p/8);
for (int q=0; q+3<inch; q+=4)
{
ktmp[0] = float32_to_bfloat16(k0[0]);
ktmp[1] = float32_to_bfloat16(k1[0]);
ktmp[2] = float32_to_bfloat16(k2[0]);
ktmp[3] = float32_to_bfloat16(k3[0]);
ktmp[4] = float32_to_bfloat16(k4[0]);
ktmp[5] = float32_to_bfloat16(k5[0]);
ktmp[6] = float32_to_bfloat16(k6[0]);
ktmp[7] = float32_to_bfloat16(k7[0]);
ktmp[8] = float32_to_bfloat16(k0[1]);
ktmp[9] = float32_to_bfloat16(k1[1]);
ktmp[10] = float32_to_bfloat16(k2[1]);
ktmp[11] = float32_to_bfloat16(k3[1]);
ktmp[12] = float32_to_bfloat16(k4[1]);
ktmp[13] = float32_to_bfloat16(k5[1]);
ktmp[14] = float32_to_bfloat16(k6[1]);
ktmp[15] = float32_to_bfloat16(k7[1]);
ktmp[16] = float32_to_bfloat16(k0[2]);
ktmp[17] = float32_to_bfloat16(k1[2]);
ktmp[18] = float32_to_bfloat16(k2[2]);
ktmp[19] = float32_to_bfloat16(k3[2]);
ktmp[20] = float32_to_bfloat16(k4[2]);
ktmp[21] = float32_to_bfloat16(k5[2]);
ktmp[22] = float32_to_bfloat16(k6[2]);
ktmp[23] = float32_to_bfloat16(k7[2]);
ktmp[24] = float32_to_bfloat16(k0[3]);
ktmp[25] = float32_to_bfloat16(k1[3]);
ktmp[26] = float32_to_bfloat16(k2[3]);
ktmp[27] = float32_to_bfloat16(k3[3]);
ktmp[28] = float32_to_bfloat16(k4[3]);
ktmp[29] = float32_to_bfloat16(k5[3]);
ktmp[30] = float32_to_bfloat16(k6[3]);
ktmp[31] = float32_to_bfloat16(k7[3]);
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
k4 += 4;
k5 += 4;
k6 += 4;
k7 += 4;
ktmp += 32;
}
}
#endif
for (; p+3<outch; p+=4)
{
const float* k0 = (const float*)kernel + (p+0)*inch;
const float* k1 = (const float*)kernel + (p+1)*inch;
const float* k2 = (const float*)kernel + (p+2)*inch;
const float* k3 = (const float*)kernel + (p+3)*inch;
#if __aarch64__
unsigned short* ktmp = kernel_tm_pack4.channel(p/8 + (p%8)/4);
#else
unsigned short* ktmp = kernel_tm_pack4.channel(p/4);
#endif
for (int q=0; q+3<inch; q+=4)
{
ktmp[0] = float32_to_bfloat16(k0[0]);
ktmp[1] = float32_to_bfloat16(k1[0]);
ktmp[2] = float32_to_bfloat16(k2[0]);
ktmp[3] = float32_to_bfloat16(k3[0]);
ktmp[4] = float32_to_bfloat16(k0[1]);
ktmp[5] = float32_to_bfloat16(k1[1]);
ktmp[6] = float32_to_bfloat16(k2[1]);
ktmp[7] = float32_to_bfloat16(k3[1]);
ktmp[8] = float32_to_bfloat16(k0[2]);
ktmp[9] = float32_to_bfloat16(k1[2]);
ktmp[10] = float32_to_bfloat16(k2[2]);
ktmp[11] = float32_to_bfloat16(k3[2]);
ktmp[12] = float32_to_bfloat16(k0[3]);
ktmp[13] = float32_to_bfloat16(k1[3]);
ktmp[14] = float32_to_bfloat16(k2[3]);
ktmp[15] = float32_to_bfloat16(k3[3]);
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
ktmp += 16;
}
}
for (; p<outch; p++)
{
const float* k0 = (const float*)kernel + p*inch;
#if __aarch64__
float* ktmp = kernel_tm_pack4.channel(p/8 + (p%8)/4 + p%4);
#else
float* ktmp = kernel_tm_pack4.channel(p/4 + p%4);
#endif
for (int q=0; q+3<inch; q+=4)
{
ktmp[0] = float32_to_bfloat16(k0[0]);
ktmp[1] = float32_to_bfloat16(k0[1]);
ktmp[2] = float32_to_bfloat16(k0[2]);
ktmp[3] = float32_to_bfloat16(k0[3]);
k0 += 4;
ktmp += 4;
}
}
}
static void conv1x1s1_sgemm_pack4to1_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
const float* bias = _bias;
// interleave
Mat tmp;
#if __aarch64__
if (size >= 12)
tmp.create(12, inch, size/12 + (size%12)/8 + (size%12%8)/4 + size%12%4, elemsize, elempack, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8, inch, size/8 + (size%8)/4 + size%4, elemsize, elempack, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4, inch, size/4 + size%4, elemsize, elempack, opt.workspace_allocator);
else // if (size >= 1)
tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator);
#else
if (size >= 8)
tmp.create(8, inch, size/8 + (size%8)/4 + size%4, elemsize, elempack, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4, inch, size/4 + size%4, elemsize, elempack, opt.workspace_allocator);
else // if (size >= 1)
tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator);
#endif
{
int nn_size;
int remain_size_start;
#if __aarch64__
nn_size = size / 12;
remain_size_start = nn_size * 12;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 12;
const unsigned short* img0 = bottom_blob.channel(0);
img0 += i*4;
unsigned short* tmpptr = tmp.channel(i/12);
for (int q=0; q<inch; q++)
{
// transpose 4x12
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.4h, v5.4h, v6.4h, v7.4h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
"st1 {v4.4h}, [%1], #8 \n"
"st1 {v1.8h}, [%1], #16 \n"
"st1 {v5.4h}, [%1], #8 \n"
"sub %0, %0, #64 \n"
"st1 {v2.8h}, [%1], #16 \n"
"st1 {v6.4h}, [%1], #8 \n"
"st1 {v3.8h}, [%1], #16 \n"
"st1 {v7.4h}, [%1], #8 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"
);
img0 += bottom_blob.cstep * 4;
}
}
#else
remain_size_start = 0;
#endif
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 8;
const unsigned short* img0 = bottom_blob.channel(0);
img0 += i*4;
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i/12+(i%12)/8);
#else
unsigned short* tmpptr = tmp.channel(i/8);
#endif
for (int q=0; q<inch; q++)
{
// transpose 4x8
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3"
);
#else
asm volatile(
"pld [%0, #256] \n"
"vld4.u16 {d0-d3}, [%0]! \n"
"pld [%0, #256] \n"
"vld4.u16 {d4-d7}, [%0] \n"
"sub %0, %0, #32 \n"
"vst1.u16 {d0}, [%1 :64]! \n"
"vst1.u16 {d4}, [%1 :64]! \n"
"vst1.u16 {d1}, [%1 :64]! \n"
"vst1.u16 {d5}, [%1 :64]! \n"
"vst1.u16 {d2}, [%1 :64]! \n"
"vst1.u16 {d6}, [%1 :64]! \n"
"vst1.u16 {d3}, [%1 :64]! \n"
"vst1.u16 {d7}, [%1 :64]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1", "q2", "q3"
);
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const unsigned short* img0 = bottom_blob.channel(0);
img0 += i*4;
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4);
#else
unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4);
#endif
for (int q=0; q<inch; q++)
{
// transpose 4x4
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld4 {v0.4h, v1.4h, v2.4h, v3.4h}, [%0] \n"
"st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1"
);
#else
asm volatile(
"pld [%0, #256] \n"
"vld4.u16 {d0-d3}, [%0 :128] \n"
"vst1.u16 {d0-d3}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1"
);
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<size; i++)
{
const unsigned short* img0 = bottom_blob.channel(0);
img0 += i*4;
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + i%12%4);
#else
unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
#endif
for (int q=0; q<inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v0.4h}, [%0] \n"
"st1 {v0.4h}, [%1], #8 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0"
);
#else
asm volatile(
"pld [%0, #64] \n"
"vld1.u16 {d0}, [%0 :64] \n"
"vst1.u16 {d0}, [%1 :64]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0"
);
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
#if __aarch64__
nn_outch = outch >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 8;
unsigned short* outptr0 = top_blob.channel(p);
unsigned short* outptr1 = top_blob.channel(p+1);
unsigned short* outptr2 = top_blob.channel(p+2);
unsigned short* outptr3 = top_blob.channel(p+3);
unsigned short* outptr4 = top_blob.channel(p+4);
unsigned short* outptr5 = top_blob.channel(p+5);
unsigned short* outptr6 = top_blob.channel(p+6);
unsigned short* outptr7 = top_blob.channel(p+7);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i=0;
for (; i+11<size; i+=12)
{
unsigned short* tmpptr = tmp.channel(i/12);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8);
int nn = inch;// inch always > 0
asm volatile(
"ld1 {v30.4s, v31.4s}, [%22] \n"
"dup v8.4s, v30.s[0] \n"
"dup v9.4s, v30.s[0] \n"
"dup v10.4s, v30.s[0] \n"
"dup v11.4s, v30.s[1] \n"
"dup v12.4s, v30.s[1] \n"
"dup v13.4s, v30.s[1] \n"
"dup v14.4s, v30.s[2] \n"
"dup v15.4s, v30.s[2] \n"
"dup v16.4s, v30.s[2] \n"
"dup v17.4s, v30.s[3] \n"
"dup v18.4s, v30.s[3] \n"
"dup v19.4s, v30.s[3] \n"
"dup v20.4s, v31.s[0] \n"
"dup v21.4s, v31.s[0] \n"
"dup v22.4s, v31.s[0] \n"
"dup v23.4s, v31.s[1] \n"
"dup v24.4s, v31.s[1] \n"
"dup v25.4s, v31.s[1] \n"
"dup v26.4s, v31.s[2] \n"
"dup v27.4s, v31.s[2] \n"
"dup v28.4s, v31.s[2] \n"
"dup v29.4s, v31.s[3] \n"
"dup v30.4s, v31.s[3] \n"
"dup v31.4s, v31.s[3] \n"
"0: \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v11.4s, v0.4s, v4.s[1] \n"
"fmla v14.4s, v0.4s, v4.s[2] \n"
"fmla v17.4s, v0.4s, v4.s[3] \n"
"fmla v20.4s, v0.4s, v5.s[0] \n"
"fmla v23.4s, v0.4s, v5.s[1] \n"
"fmla v26.4s, v0.4s, v5.s[2] \n"
"fmla v29.4s, v0.4s, v5.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v12.4s, v1.4s, v4.s[1] \n"
"fmla v15.4s, v1.4s, v4.s[2] \n"
"fmla v18.4s, v1.4s, v4.s[3] \n"
"fmla v21.4s, v1.4s, v5.s[0] \n"
"fmla v24.4s, v1.4s, v5.s[1] \n"
"fmla v27.4s, v1.4s, v5.s[2] \n"
"fmla v30.4s, v1.4s, v5.s[3] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"fmla v13.4s, v2.4s, v4.s[1] \n"
"fmla v16.4s, v2.4s, v4.s[2] \n"
"fmla v19.4s, v2.4s, v4.s[3] \n"
"fmla v22.4s, v2.4s, v5.s[0] \n"
"fmla v25.4s, v2.4s, v5.s[1] \n"
"fmla v28.4s, v2.4s, v5.s[2] \n"
"fmla v31.4s, v2.4s, v5.s[3] \n"
"fmla v8.4s, v3.4s, v6.s[0] \n"
"fmla v11.4s, v3.4s, v6.s[1] \n"
"fmla v14.4s, v3.4s, v6.s[2] \n"
"fmla v17.4s, v3.4s, v6.s[3] \n"
"fmla v20.4s, v3.4s, v7.s[0] \n"
"fmla v23.4s, v3.4s, v7.s[1] \n"
"fmla v26.4s, v3.4s, v7.s[2] \n"
"fmla v29.4s, v3.4s, v7.s[3] \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v9.4s, v0.4s, v6.s[0] \n"
"fmla v12.4s, v0.4s, v6.s[1] \n"
"fmla v15.4s, v0.4s, v6.s[2] \n"
"fmla v18.4s, v0.4s, v6.s[3] \n"
"fmla v21.4s, v0.4s, v7.s[0] \n"
"fmla v24.4s, v0.4s, v7.s[1] \n"
"fmla v27.4s, v0.4s, v7.s[2] \n"
"fmla v30.4s, v0.4s, v7.s[3] \n"
"fmla v10.4s, v1.4s, v6.s[0] \n"
"fmla v13.4s, v1.4s, v6.s[1] \n"
"fmla v16.4s, v1.4s, v6.s[2] \n"
"fmla v19.4s, v1.4s, v6.s[3] \n"
"fmla v22.4s, v1.4s, v7.s[0] \n"
"fmla v25.4s, v1.4s, v7.s[1] \n"
"fmla v28.4s, v1.4s, v7.s[2] \n"
"fmla v31.4s, v1.4s, v7.s[3] \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v2.4s, v4.s[0] \n"
"fmla v11.4s, v2.4s, v4.s[1] \n"
"fmla v14.4s, v2.4s, v4.s[2] \n"
"fmla v17.4s, v2.4s, v4.s[3] \n"
"fmla v20.4s, v2.4s, v5.s[0] \n"
"fmla v23.4s, v2.4s, v5.s[1] \n"
"fmla v26.4s, v2.4s, v5.s[2] \n"
"fmla v29.4s, v2.4s, v5.s[3] \n"
"fmla v9.4s, v3.4s, v4.s[0] \n"
"fmla v12.4s, v3.4s, v4.s[1] \n"
"fmla v15.4s, v3.4s, v4.s[2] \n"
"fmla v18.4s, v3.4s, v4.s[3] \n"
"fmla v21.4s, v3.4s, v5.s[0] \n"
"fmla v24.4s, v3.4s, v5.s[1] \n"
"fmla v27.4s, v3.4s, v5.s[2] \n"
"fmla v30.4s, v3.4s, v5.s[3] \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v10.4s, v0.4s, v4.s[0] \n"
"fmla v13.4s, v0.4s, v4.s[1] \n"
"fmla v16.4s, v0.4s, v4.s[2] \n"
"fmla v19.4s, v0.4s, v4.s[3] \n"
"fmla v22.4s, v0.4s, v5.s[0] \n"
"fmla v25.4s, v0.4s, v5.s[1] \n"
"fmla v28.4s, v0.4s, v5.s[2] \n"
"fmla v31.4s, v0.4s, v5.s[3] \n"
"fmla v8.4s, v1.4s, v6.s[0] \n"
"fmla v11.4s, v1.4s, v6.s[1] \n"
"fmla v14.4s, v1.4s, v6.s[2] \n"
"fmla v17.4s, v1.4s, v6.s[3] \n"
"fmla v20.4s, v1.4s, v7.s[0] \n"
"fmla v23.4s, v1.4s, v7.s[1] \n"
"fmla v26.4s, v1.4s, v7.s[2] \n"
"fmla v29.4s, v1.4s, v7.s[3] \n"
"fmla v9.4s, v2.4s, v6.s[0] \n"
"fmla v12.4s, v2.4s, v6.s[1] \n"
"fmla v15.4s, v2.4s, v6.s[2] \n"
"fmla v18.4s, v2.4s, v6.s[3] \n"
"fmla v21.4s, v2.4s, v7.s[0] \n"
"fmla v24.4s, v2.4s, v7.s[1] \n"
"fmla v27.4s, v2.4s, v7.s[2] \n"
"fmla v30.4s, v2.4s, v7.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v10.4s, v3.4s, v6.s[0] \n"
"fmla v13.4s, v3.4s, v6.s[1] \n"
"fmla v16.4s, v3.4s, v6.s[2] \n"
"fmla v19.4s, v3.4s, v6.s[3] \n"
"fmla v22.4s, v3.4s, v7.s[0] \n"
"fmla v25.4s, v3.4s, v7.s[1] \n"
"fmla v28.4s, v3.4s, v7.s[2] \n"
"fmla v31.4s, v3.4s, v7.s[3] \n"
"bne 0b \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"shrn v14.4h, v14.4s, #16 \n"
"shrn v15.4h, v15.4s, #16 \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v8.4h, v9.4h, v10.4h}, [%1], #24 \n"
"st1 {v11.4h, v12.4h, v13.4h}, [%2], #24 \n"
"st1 {v14.4h, v15.4h, v16.4h}, [%3], #24 \n"
"st1 {v17.4h, v18.4h, v19.4h}, [%4], #24 \n"
"st1 {v20.4h, v21.4h, v22.4h}, [%5], #24 \n"
"st1 {v23.4h, v24.4h, v25.4h}, [%6], #24 \n"
"st1 {v26.4h, v27.4h, v28.4h}, [%7], #24 \n"
"st1 {v29.4h, v30.4h, v31.4h}, [%8], #24 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"r"(biasptr) // %22
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; i+7<size; i+=8)
{
unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8);
int nn = inch;// inch always > 0
asm volatile(
"ld1 {v30.4s, v31.4s}, [%22] \n"
"dup v16.4s, v30.s[0] \n"
"dup v17.4s, v30.s[0] \n"
"dup v18.4s, v30.s[1] \n"
"dup v19.4s, v30.s[1] \n"
"dup v20.4s, v30.s[2] \n"
"dup v21.4s, v30.s[2] \n"
"dup v22.4s, v30.s[3] \n"
"dup v23.4s, v30.s[3] \n"
"dup v24.4s, v31.s[0] \n"
"dup v25.4s, v31.s[0] \n"
"dup v26.4s, v31.s[1] \n"
"dup v27.4s, v31.s[1] \n"
"dup v28.4s, v31.s[2] \n"
"dup v29.4s, v31.s[2] \n"
"dup v30.4s, v31.s[3] \n"
"dup v31.4s, v31.s[3] \n"
"0: \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v16.4s, v0.4s, v4.s[0] \n"
"fmla v18.4s, v0.4s, v4.s[1] \n"
"fmla v20.4s, v0.4s, v4.s[2] \n"
"fmla v22.4s, v0.4s, v4.s[3] \n"
"fmla v24.4s, v0.4s, v5.s[0] \n"
"fmla v26.4s, v0.4s, v5.s[1] \n"
"fmla v28.4s, v0.4s, v5.s[2] \n"
"fmla v30.4s, v0.4s, v5.s[3] \n"
"fmla v17.4s, v1.4s, v4.s[0] \n"
"fmla v19.4s, v1.4s, v4.s[1] \n"
"fmla v21.4s, v1.4s, v4.s[2] \n"
"fmla v23.4s, v1.4s, v4.s[3] \n"
"fmla v25.4s, v1.4s, v5.s[0] \n"
"fmla v27.4s, v1.4s, v5.s[1] \n"
"fmla v29.4s, v1.4s, v5.s[2] \n"
"fmla v31.4s, v1.4s, v5.s[3] \n"
"fmla v16.4s, v2.4s, v6.s[0] \n"
"fmla v18.4s, v2.4s, v6.s[1] \n"
"fmla v20.4s, v2.4s, v6.s[2] \n"
"fmla v22.4s, v2.4s, v6.s[3] \n"
"fmla v24.4s, v2.4s, v7.s[0] \n"
"fmla v26.4s, v2.4s, v7.s[1] \n"
"fmla v28.4s, v2.4s, v7.s[2] \n"
"fmla v30.4s, v2.4s, v7.s[3] \n"
"fmla v17.4s, v3.4s, v6.s[0] \n"
"fmla v19.4s, v3.4s, v6.s[1] \n"
"fmla v21.4s, v3.4s, v6.s[2] \n"
"fmla v23.4s, v3.4s, v6.s[3] \n"
"fmla v25.4s, v3.4s, v7.s[0] \n"
"fmla v27.4s, v3.4s, v7.s[1] \n"
"fmla v29.4s, v3.4s, v7.s[2] \n"
"fmla v31.4s, v3.4s, v7.s[3] \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%9], #32 \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%10], #32 \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v12.4s, v8.s[0] \n"
"fmla v18.4s, v12.4s, v8.s[1] \n"
"fmla v20.4s, v12.4s, v8.s[2] \n"
"fmla v22.4s, v12.4s, v8.s[3] \n"
"fmla v24.4s, v12.4s, v9.s[0] \n"
"fmla v26.4s, v12.4s, v9.s[1] \n"
"fmla v28.4s, v12.4s, v9.s[2] \n"
"fmla v30.4s, v12.4s, v9.s[3] \n"
"fmla v17.4s, v13.4s, v8.s[0] \n"
"fmla v19.4s, v13.4s, v8.s[1] \n"
"fmla v21.4s, v13.4s, v8.s[2] \n"
"fmla v23.4s, v13.4s, v8.s[3] \n"
"fmla v25.4s, v13.4s, v9.s[0] \n"
"fmla v27.4s, v13.4s, v9.s[1] \n"
"fmla v29.4s, v13.4s, v9.s[2] \n"
"fmla v31.4s, v13.4s, v9.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v10.s[0] \n"
"fmla v18.4s, v14.4s, v10.s[1] \n"
"fmla v20.4s, v14.4s, v10.s[2] \n"
"fmla v22.4s, v14.4s, v10.s[3] \n"
"fmla v24.4s, v14.4s, v11.s[0] \n"
"fmla v26.4s, v14.4s, v11.s[1] \n"
"fmla v28.4s, v14.4s, v11.s[2] \n"
"fmla v30.4s, v14.4s, v11.s[3] \n"
"fmla v17.4s, v15.4s, v10.s[0] \n"
"fmla v19.4s, v15.4s, v10.s[1] \n"
"fmla v21.4s, v15.4s, v10.s[2] \n"
"fmla v23.4s, v15.4s, v10.s[3] \n"
"fmla v25.4s, v15.4s, v11.s[0] \n"
"fmla v27.4s, v15.4s, v11.s[1] \n"
"fmla v29.4s, v15.4s, v11.s[2] \n"
"fmla v31.4s, v15.4s, v11.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v16.4h, v17.4h}, [%1], #16 \n"
"st1 {v18.4h, v19.4h}, [%2], #16 \n"
"st1 {v20.4h, v21.4h}, [%3], #16 \n"
"st1 {v22.4h, v23.4h}, [%4], #16 \n"
"st1 {v24.4h, v25.4h}, [%5], #16 \n"
"st1 {v26.4h, v27.4h}, [%6], #16 \n"
"st1 {v28.4h, v29.4h}, [%7], #16 \n"
"st1 {v30.4h, v31.4h}, [%8], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"r"(biasptr) // %22
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; i+3<size; i+=4)
{
unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8);
int nn = inch;// inch always > 0
asm volatile(
"ld1 {v22.4s, v23.4s}, [%22] \n"
"dup v16.4s, v22.s[0] \n"
"dup v17.4s, v22.s[1] \n"
"dup v18.4s, v22.s[2] \n"
"dup v19.4s, v22.s[3] \n"
"dup v20.4s, v23.s[0] \n"
"dup v21.4s, v23.s[1] \n"
"dup v22.4s, v23.s[2] \n"
"dup v23.4s, v23.s[3] \n"
"0: \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v16.4s, v0.4s, v4.s[0] \n"
"fmla v17.4s, v0.4s, v4.s[1] \n"
"fmla v18.4s, v0.4s, v4.s[2] \n"
"fmla v19.4s, v0.4s, v4.s[3] \n"
"fmla v20.4s, v0.4s, v5.s[0] \n"
"fmla v21.4s, v0.4s, v5.s[1] \n"
"fmla v22.4s, v0.4s, v5.s[2] \n"
"fmla v23.4s, v0.4s, v5.s[3] \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%10], #32 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v1.4s, v6.s[0] \n"
"fmla v17.4s, v1.4s, v6.s[1] \n"
"fmla v18.4s, v1.4s, v6.s[2] \n"
"fmla v19.4s, v1.4s, v6.s[3] \n"
"fmla v20.4s, v1.4s, v7.s[0] \n"
"fmla v21.4s, v1.4s, v7.s[1] \n"
"fmla v22.4s, v1.4s, v7.s[2] \n"
"fmla v23.4s, v1.4s, v7.s[3] \n"
"fmla v16.4s, v2.4s, v8.s[0] \n"
"fmla v17.4s, v2.4s, v8.s[1] \n"
"fmla v18.4s, v2.4s, v8.s[2] \n"
"fmla v19.4s, v2.4s, v8.s[3] \n"
"fmla v20.4s, v2.4s, v9.s[0] \n"
"fmla v21.4s, v2.4s, v9.s[1] \n"
"fmla v22.4s, v2.4s, v9.s[2] \n"
"fmla v23.4s, v2.4s, v9.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v3.4s, v10.s[0] \n"
"fmla v17.4s, v3.4s, v10.s[1] \n"
"fmla v18.4s, v3.4s, v10.s[2] \n"
"fmla v19.4s, v3.4s, v10.s[3] \n"
"fmla v20.4s, v3.4s, v11.s[0] \n"
"fmla v21.4s, v3.4s, v11.s[1] \n"
"fmla v22.4s, v3.4s, v11.s[2] \n"
"fmla v23.4s, v3.4s, v11.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"st1 {v16.4h}, [%1], #8 \n"
"st1 {v17.4h}, [%2], #8 \n"
"st1 {v18.4h}, [%3], #8 \n"
"st1 {v19.4h}, [%4], #8 \n"
"st1 {v20.4h}, [%5], #8 \n"
"st1 {v21.4h}, [%6], #8 \n"
"st1 {v22.4h}, [%7], #8 \n"
"st1 {v23.4h}, [%8], #8 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"r"(biasptr) // %22
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
}
for (; i<size; i++)
{
unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + i%12%4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8);
int nn = inch;// inch always > 0
asm volatile(
"ld1 {v16.4s, v17.4s}, [%22] \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #64] \n"
"ld1 {v0.4h}, [%9], #8 \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v16.4s, v4.4s, v0.s[0] \n"
"fmla v17.4s, v5.4s, v0.s[0] \n"
"fmla v18.4s, v6.4s, v0.s[1] \n"
"fmla v19.4s, v7.4s, v0.s[1] \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%10], #32 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[2] \n"
"fmla v17.4s, v9.4s, v0.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v18.4s, v10.4s, v0.s[3] \n"
"fmla v19.4s, v11.4s, v0.s[3] \n"
"bne 0b \n"
"fadd v16.4s, v16.4s, v18.4s \n"
"fadd v17.4s, v17.4s, v19.4s \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"st1 {v16.h}[0], [%1], #2 \n"
"st1 {v16.h}[1], [%2], #2 \n"
"st1 {v16.h}[2], [%3], #2 \n"
"st1 {v16.h}[3], [%4], #2 \n"
"st1 {v17.h}[0], [%5], #2 \n"
"st1 {v17.h}[1], [%6], #2 \n"
"st1 {v17.h}[2], [%7], #2 \n"
"st1 {v17.h}[3], [%8], #2 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"r"(biasptr) // %22
: "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"
);
}
}
remain_outch_start += nn_outch << 3;
nn_outch = (outch - remain_outch_start) >> 2;
#else // __aarch64__
nn_outch = outch >> 2;
#endif // __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
unsigned short* outptr0 = top_blob.channel(p);
unsigned short* outptr1 = top_blob.channel(p+1);
unsigned short* outptr2 = top_blob.channel(p+2);
unsigned short* outptr3 = top_blob.channel(p+3);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i=0;
#if __aarch64__
for (; i+11<size; i+=12)
{
unsigned short* tmpptr = tmp.channel(i/12);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4);
int nn = inch;// inch always > 0
asm volatile(
"ld1 {v19.4s}, [%14] \n"
"dup v8.4s, v19.s[0] \n"
"dup v9.4s, v19.s[0] \n"
"dup v10.4s, v19.s[0] \n"
"dup v11.4s, v19.s[1] \n"
"dup v12.4s, v19.s[1] \n"
"dup v13.4s, v19.s[1] \n"
"dup v14.4s, v19.s[2] \n"
"dup v15.4s, v19.s[2] \n"
"dup v16.4s, v19.s[2] \n"
"dup v17.4s, v19.s[3] \n"
"dup v18.4s, v19.s[3] \n"
"dup v19.4s, v19.s[3] \n"
"0: \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v11.4s, v0.4s, v4.s[1] \n"
"fmla v14.4s, v0.4s, v4.s[2] \n"
"fmla v17.4s, v0.4s, v4.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v12.4s, v1.4s, v4.s[1] \n"
"fmla v15.4s, v1.4s, v4.s[2] \n"
"fmla v18.4s, v1.4s, v4.s[3] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"fmla v13.4s, v2.4s, v4.s[1] \n"
"fmla v16.4s, v2.4s, v4.s[2] \n"
"fmla v19.4s, v2.4s, v4.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%5], #32 \n"
"shll v20.4s, v20.4h, #16 \n"
"shll v21.4s, v21.4h, #16 \n"
"shll v22.4s, v22.4h, #16 \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v8.4s, v3.4s, v5.s[0] \n"
"fmla v11.4s, v3.4s, v5.s[1] \n"
"fmla v14.4s, v3.4s, v5.s[2] \n"
"fmla v17.4s, v3.4s, v5.s[3] \n"
"fmla v9.4s, v20.4s, v5.s[0] \n"
"fmla v12.4s, v20.4s, v5.s[1] \n"
"fmla v15.4s, v20.4s, v5.s[2] \n"
"fmla v18.4s, v20.4s, v5.s[3] \n"
"fmla v10.4s, v21.4s, v5.s[0] \n"
"fmla v13.4s, v21.4s, v5.s[1] \n"
"fmla v16.4s, v21.4s, v5.s[2] \n"
"fmla v19.4s, v21.4s, v5.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%5], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v8.4s, v22.4s, v6.s[0] \n"
"fmla v11.4s, v22.4s, v6.s[1] \n"
"fmla v14.4s, v22.4s, v6.s[2] \n"
"fmla v17.4s, v22.4s, v6.s[3] \n"
"fmla v9.4s, v23.4s, v6.s[0] \n"
"fmla v12.4s, v23.4s, v6.s[1] \n"
"fmla v15.4s, v23.4s, v6.s[2] \n"
"fmla v18.4s, v23.4s, v6.s[3] \n"
"fmla v10.4s, v24.4s, v6.s[0] \n"
"fmla v13.4s, v24.4s, v6.s[1] \n"
"fmla v16.4s, v24.4s, v6.s[2] \n"
"fmla v19.4s, v24.4s, v6.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v25.4s, v7.s[0] \n"
"fmla v11.4s, v25.4s, v7.s[1] \n"
"fmla v14.4s, v25.4s, v7.s[2] \n"
"fmla v17.4s, v25.4s, v7.s[3] \n"
"fmla v9.4s, v26.4s, v7.s[0] \n"
"fmla v12.4s, v26.4s, v7.s[1] \n"
"fmla v15.4s, v26.4s, v7.s[2] \n"
"fmla v18.4s, v26.4s, v7.s[3] \n"
"fmla v10.4s, v27.4s, v7.s[0] \n"
"fmla v13.4s, v27.4s, v7.s[1] \n"
"fmla v16.4s, v27.4s, v7.s[2] \n"
"fmla v19.4s, v27.4s, v7.s[3] \n"
"bne 0b \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"shrn v14.4h, v14.4s, #16 \n"
"shrn v15.4h, v15.4s, #16 \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"st1 {v8.4h, v9.4h, v10.4h}, [%1], #24 \n"
"st1 {v11.4h, v12.4h, v13.4h}, [%2], #24 \n"
"st1 {v14.4h, v15.4h, v16.4h}, [%3], #24 \n"
"st1 {v17.4h, v18.4h, v19.4h}, [%4], #24 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
}
#endif // __aarch64__
for (; i+7<size; i+=8)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4);
#else
unsigned short* tmpptr = tmp.channel(i/8);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/4);
#endif
int nn = inch;// inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v15.4s}, [%14] \n"
"dup v8.4s, v15.s[0] \n"
"dup v9.4s, v15.s[0] \n"
"dup v10.4s, v15.s[1] \n"
"dup v11.4s, v15.s[1] \n"
"dup v12.4s, v15.s[2] \n"
"dup v13.4s, v15.s[2] \n"
"dup v14.4s, v15.s[3] \n"
"dup v15.4s, v15.s[3] \n"
"0: \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v10.4s, v0.4s, v4.s[1] \n"
"fmla v12.4s, v0.4s, v4.s[2] \n"
"fmla v14.4s, v0.4s, v4.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v11.4s, v1.4s, v4.s[1] \n"
"fmla v13.4s, v1.4s, v4.s[2] \n"
"fmla v15.4s, v1.4s, v4.s[3] \n"
"fmla v8.4s, v2.4s, v5.s[0] \n"
"fmla v10.4s, v2.4s, v5.s[1] \n"
"fmla v12.4s, v2.4s, v5.s[2] \n"
"fmla v14.4s, v2.4s, v5.s[3] \n"
"fmla v9.4s, v3.4s, v5.s[0] \n"
"fmla v11.4s, v3.4s, v5.s[1] \n"
"fmla v13.4s, v3.4s, v5.s[2] \n"
"fmla v15.4s, v3.4s, v5.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%5], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v8.4s, v16.4s, v6.s[0] \n"
"fmla v10.4s, v16.4s, v6.s[1] \n"
"fmla v12.4s, v16.4s, v6.s[2] \n"
"fmla v14.4s, v16.4s, v6.s[3] \n"
"fmla v9.4s, v17.4s, v6.s[0] \n"
"fmla v11.4s, v17.4s, v6.s[1] \n"
"fmla v13.4s, v17.4s, v6.s[2] \n"
"fmla v15.4s, v17.4s, v6.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v18.4s, v7.s[0] \n"
"fmla v10.4s, v18.4s, v7.s[1] \n"
"fmla v12.4s, v18.4s, v7.s[2] \n"
"fmla v14.4s, v18.4s, v7.s[3] \n"
"fmla v9.4s, v19.4s, v7.s[0] \n"
"fmla v11.4s, v19.4s, v7.s[1] \n"
"fmla v13.4s, v19.4s, v7.s[2] \n"
"fmla v15.4s, v19.4s, v7.s[3] \n"
"bne 0b \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"shrn v14.4h, v14.4s, #16 \n"
"shrn v15.4h, v15.4s, #16 \n"
"st1 {v8.4h, v9.4h}, [%1], #16 \n"
"st1 {v10.4h, v11.4h}, [%2], #16 \n"
"st1 {v12.4h, v13.4h}, [%3], #16 \n"
"st1 {v14.4h, v15.4h}, [%4], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"
);
#else // __aarch64__
asm volatile(
"vld1.f32 {d30-d31}, [%14] \n"
"vdup.f32 q8, d30[0] \n"
"vdup.f32 q9, d30[0] \n"
"vdup.f32 q10, d30[1] \n"
"vdup.f32 q11, d30[1] \n"
"vdup.f32 q12, d31[0] \n"
"vdup.f32 q13, d31[0] \n"
"vdup.f32 q14, d31[1] \n"
"vdup.f32 q15, d31[1] \n"
"0: \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5]! \n"
"pld [%6, #256] \n"
"vld1.u16 {d12-d15}, [%6]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q10, q0, d8[1] \n"
"vmla.f32 q12, q0, d9[0] \n"
"vmla.f32 q14, q0, d9[1] \n"
"vmla.f32 q9, q1, d8[0] \n"
"vmla.f32 q11, q1, d8[1] \n"
"vmla.f32 q13, q1, d9[0] \n"
"vmla.f32 q15, q1, d9[1] \n"
"vmla.f32 q8, q2, d10[0] \n"
"vmla.f32 q10, q2, d10[1] \n"
"vmla.f32 q12, q2, d11[0] \n"
"vmla.f32 q14, q2, d11[1] \n"
"vmla.f32 q9, q3, d10[0] \n"
"vmla.f32 q11, q3, d10[1] \n"
"vmla.f32 q13, q3, d11[0] \n"
"vmla.f32 q15, q3, d11[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q8, q0, d12[0] \n"
"vmla.f32 q10, q0, d12[1] \n"
"vmla.f32 q12, q0, d13[0] \n"
"vmla.f32 q14, q0, d13[1] \n"
"vmla.f32 q9, q1, d12[0] \n"
"vmla.f32 q11, q1, d12[1] \n"
"vmla.f32 q13, q1, d13[0] \n"
"vmla.f32 q15, q1, d13[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q2, d14[0] \n"
"vmla.f32 q10, q2, d14[1] \n"
"vmla.f32 q12, q2, d15[0] \n"
"vmla.f32 q14, q2, d15[1] \n"
"vmla.f32 q9, q3, d14[0] \n"
"vmla.f32 q11, q3, d14[1] \n"
"vmla.f32 q13, q3, d15[0] \n"
"vmla.f32 q15, q3, d15[1] \n"
"bne 0b \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d17, q9, #16 \n"
"vshrn.u32 d20, q10, #16 \n"
"vshrn.u32 d21, q11, #16 \n"
"vshrn.u32 d24, q12, #16 \n"
"vshrn.u32 d25, q13, #16 \n"
"vshrn.u32 d28, q14, #16 \n"
"vshrn.u32 d29, q15, #16 \n"
"vst1.u16 {d16-d17}, [%1 :64]! \n"
"vst1.u16 {d20-d21}, [%2 :64]! \n"
"vst1.u16 {d24-d25}, [%3 :64]! \n"
"vst1.u16 {d28-d29}, [%4 :64]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; i+3<size; i+=4)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4);
#else
unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/4);
#endif
int nn = inch;// inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v11.4s}, [%14] \n"
"dup v8.4s, v11.s[0] \n"
"dup v9.4s, v11.s[1] \n"
"dup v10.4s, v11.s[2] \n"
"dup v11.4s, v11.s[3] \n"
"0: \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v0.4s, v4.s[1] \n"
"fmla v10.4s, v0.4s, v4.s[2] \n"
"fmla v11.4s, v0.4s, v4.s[3] \n"
"fmla v8.4s, v1.4s, v5.s[0] \n"
"fmla v9.4s, v1.4s, v5.s[1] \n"
"fmla v10.4s, v1.4s, v5.s[2] \n"
"fmla v11.4s, v1.4s, v5.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v2.4s, v6.s[0] \n"
"fmla v9.4s, v2.4s, v6.s[1] \n"
"fmla v10.4s, v2.4s, v6.s[2] \n"
"fmla v11.4s, v2.4s, v6.s[3] \n"
"fmla v8.4s, v3.4s, v7.s[0] \n"
"fmla v9.4s, v3.4s, v7.s[1] \n"
"fmla v10.4s, v3.4s, v7.s[2] \n"
"fmla v11.4s, v3.4s, v7.s[3] \n"
"bne 0b \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"st1 {v8.4h}, [%1], #8 \n"
"st1 {v9.4h}, [%2], #8 \n"
"st1 {v10.4h}, [%3], #8 \n"
"st1 {v11.4h}, [%4], #8 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"
);
#else // __aarch64__
asm volatile(
"vld1.f32 {d22-d23}, [%14] \n"
"vdup.f32 q8, d22[0] \n"
"vdup.f32 q9, d22[1] \n"
"vdup.f32 q10, d23[0] \n"
"vdup.f32 q11, d23[1] \n"
"0: \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5]! \n"
"pld [%6, #256] \n"
"vld1.u16 {d12-d15}, [%6]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q0, d8[1] \n"
"vmla.f32 q10, q0, d9[0] \n"
"vmla.f32 q11, q0, d9[1] \n"
"vmla.f32 q8, q1, d10[0] \n"
"vmla.f32 q9, q1, d10[1] \n"
"vmla.f32 q10, q1, d11[0] \n"
"vmla.f32 q11, q1, d11[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q2, d12[0] \n"
"vmla.f32 q9, q2, d12[1] \n"
"vmla.f32 q10, q2, d13[0] \n"
"vmla.f32 q11, q2, d13[1] \n"
"vmla.f32 q8, q3, d14[0] \n"
"vmla.f32 q9, q3, d14[1] \n"
"vmla.f32 q10, q3, d15[0] \n"
"vmla.f32 q11, q3, d15[1] \n"
"bne 0b \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d18, q9, #16 \n"
"vshrn.u32 d20, q10, #16 \n"
"vshrn.u32 d22, q11, #16 \n"
"vst1.u16 {d16}, [%1 :64]! \n"
"vst1.u16 {d18}, [%2 :64]! \n"
"vst1.u16 {d20}, [%3 :64]! \n"
"vst1.u16 {d22}, [%4 :64]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"
);
#endif // __aarch64__
}
for (; i<size; i++)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + i%12%4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4);
#else
unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/4);
#endif
int nn = inch;// inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v8.4s}, [%14] \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%5, #64] \n"
"ld1 {v0.4h}, [%5], #8 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[1] \n"
"subs %w0, %w0, #1 \n"
"fmla v10.4s, v6.4s, v0.s[2] \n"
"fmla v11.4s, v7.4s, v0.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"shrn v8.4h, v8.4s, #16 \n"
"st1 {v8.h}[0], [%1], #2 \n"
"st1 {v8.h}[1], [%2], #2 \n"
"st1 {v8.h}[2], [%3], #2 \n"
"st1 {v8.h}[3], [%4], #2 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"
);
#else // __aarch64__
asm volatile(
"vld1.f32 {d16-d17}, [%14] \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%5, #64] \n"
"vld1.u16 {d1}, [%5]! \n"
"pld [%6, #256] \n"
"vld1.u16 {d12-d15}, [%6]! \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q7, d1[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vshrn.u32 d16, q8, #16 \n"
"vst1.u16 {d16[0]}, [%1]! \n"
"vst1.u16 {d16[1]}, [%2]! \n"
"vst1.u16 {d16[2]}, [%3]! \n"
"vst1.u16 {d16[3]}, [%4]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"
);
#endif // __aarch64__
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
unsigned short* outptr0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
int i=0;
#if __aarch64__
for (; i+11<size; i+=12)
{
unsigned short* tmpptr = tmp.channel(i/12);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4 + p%4);
int nn = inch;// inch always > 0
asm volatile(
"dup v8.4s, %w8 \n"
"dup v9.4s, %w8 \n"
"dup v10.4s, %w8 \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v4.4h}, [%3], #8 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%2], #32 \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v5.4s, v3.4s, v4.s[1] \n"
"fmla v6.4s, v12.4s, v4.s[1] \n"
"fmla v7.4s, v13.4s, v4.s[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v8.4s, v14.4s, v4.s[2] \n"
"fmla v9.4s, v15.4s, v4.s[2] \n"
"fmla v10.4s, v16.4s, v4.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v5.4s, v17.4s, v4.s[3] \n"
"fmla v6.4s, v18.4s, v4.s[3] \n"
"fmla v7.4s, v19.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v5.4s \n"
"fadd v9.4s, v9.4s, v6.4s \n"
"fadd v10.4s, v10.4s, v7.4s \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"st1 {v8.4h, v9.4h, v10.4h}, [%1], #24 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"
);
}
#endif // __aarch64__
for (; i+7<size; i+=8)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4 + p%4);
#else
unsigned short* tmpptr = tmp.channel(i/8);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/4 + p%4);
#endif
int nn = inch;// inch always > 0
#if __aarch64__
asm volatile(
"dup v8.4s, %w8 \n"
"dup v9.4s, %w8 \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v4.4h}, [%3], #8 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v10.4s, v2.4s, v4.s[1] \n"
"fmla v11.4s, v3.4s, v4.s[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%2], #32 \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v8.4s, v12.4s, v4.s[2] \n"
"fmla v9.4s, v13.4s, v4.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v10.4s, v14.4s, v4.s[3] \n"
"fmla v11.4s, v15.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"fadd v9.4s, v9.4s, v11.4s \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"st1 {v8.4h, v9.4h}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
#else // __aarch64__
asm volatile(
"vdup.f32 q8, %8 \n"
"vdup.f32 q9, %8 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2]! \n"
"pld [%3, #64] \n"
"vld1.u16 {d9}, [%3]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[0] \n"
"vmla.f32 q10, q2, d8[1] \n"
"vmla.f32 q11, q3, d8[1] \n"
"pld [%2, #256] \n"
"vld1.u16 {d28-d31}, [%2]! \n"
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q8, q12, d9[0] \n"
"vmla.f32 q9, q13, d9[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q10, q14, d9[1] \n"
"vmla.f32 q11, q15, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q10 \n"
"vadd.f32 q9, q9, q11 \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d17, q9, #16 \n"
"vst1.u16 {d16-d17}, [%1 :64]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; i+3<size; i+=4)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4 + p%4);
#else
unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/4 + p%4);
#endif
int nn = inch;// inch always > 0
#if __aarch64__
asm volatile(
"dup v8.4s, %w8 \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v4.4h}, [%3], #8 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[1] \n"
"subs %w0, %w0, #1 \n"
"fmla v10.4s, v2.4s, v4.s[2] \n"
"fmla v11.4s, v3.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"shrn v8.4h, v8.4s, #16 \n"
"st1 {v8.4h}, [%1], #8 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11"
);
#else // __aarch64__
asm volatile(
"vdup.f32 q8, %8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2]! \n"
"pld [%3, #64] \n"
"vld1.u16 {d9}, [%3]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q10, q2, d9[0] \n"
"vmla.f32 q11, q3, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vshrn.u32 d16, q8, #16 \n"
"vst1.u16 {d16}, [%1]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11"
);
#endif // __aarch64__
}
for (; i<size; i++)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + i%12%4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4 + p%4);
#else
unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/4 + p%4);
#endif
float32x4_t _sum0 = vdupq_n_f32(0.f);
for (int q=0; q<inch; q++)
{
float32x4_t _r0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(tmpptr), 16));
float32x4_t _k0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(kptr), 16));
_sum0 = vmlaq_f32(_sum0, _r0, _k0);
kptr += 4;
tmpptr += 4;
}
#if __aarch64__
float sum0 = vaddvq_f32(_sum0);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss2 = vpadd_f32(_ss, _ss);
float sum0 = vget_lane_f32(_ss2, 0);
#endif
outptr0[0] = float32_to_bfloat16(bias0 + sum0);
outptr0++;
}
}
// // NOTE sgemm
// for (; p<outch; p++)
// {
// Mat out0 = top_blob.channel(p);
//
// const float bias0 = bias ? bias[p] : 0.f;
//
// unsigned short* outptr0 = out0;
//
// for (int i=0; i<size; i++)
// {
// float sum = bias0;
//
// const unsigned short* kptr = _kernel.channel(p);
//
// for (int q=0; q<inch; q++)
// {
// const unsigned short* img0 = bottom_blob.channel(q);
//
// sum += img0[i] * kptr[0];
// kptr ++;
// }
//
// outptr0[i] = sum;
// }
// }
}
static void conv1x1s2_pack4to1_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2*outw + w) * 4;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<channels; p++)
{
const unsigned short* r0 = bottom_blob.channel(p);
unsigned short* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j+3 < outw; j+=4)
{
uint16x4_t _v0 = vld1_u16(r0);
uint16x4_t _v1 = vld1_u16(r0+8);
uint16x4_t _v2 = vld1_u16(r0+16);
uint16x4_t _v3 = vld1_u16(r0+24);
uint16x8_t _v01 = vcombine_u16(_v0, _v1);
uint16x8_t _v23 = vcombine_u16(_v2, _v3);
vst1q_u16(outptr, _v01);
vst1q_u16(outptr+8, _v23);
r0 += 32;
outptr += 16;
}
for (; j+1 < outw; j+=2)
{
uint16x4_t _v0 = vld1_u16(r0);
uint16x4_t _v1 = vld1_u16(r0+8);
uint16x8_t _v = vcombine_u16(_v0, _v1);
vst1q_u16(outptr, _v);
r0 += 16;
outptr += 8;
}
for (; j < outw; j++)
{
uint16x4_t _v = vld1_u16(r0);
vst1_u16(outptr, _v);
r0 += 8;
outptr += 4;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack4to1_bf16s_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
nusPerfect.cpp.dapt.c | #pragma warning(disable : 4996)
#include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define _CRT_SECURE_NO_WARNINGS
#define min(x,y) ((x) < (y) ? (x) : (y))
#define max(x,y) ((x) > (y) ? (x) : (y))
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))
#define S0(a, i, j, k) c[i][j] = c[i][k] + c[k][j]
//#define match(b1, b2) (((b1)+(b2)) == 3 ? 1 : 0)
#define sigma(i, j) (match(seq[i], seq[j]))
int max_score(int s1, int s2)
{
if (s1 >= s2)
return s1;
return s2;
}
int max_sc(int s1, int s2, int s3) {
if (s1>=s2 && s1>=s3)
return s1;
if (s2>=s3)
return s2;
return s3;
}
int match(const int e1, const int e2)
{
/*
* 'A' => 0 -> bitowo 0001 -> 1
* 'G' => 1 -> bitowo 0010 -> 2
* 'C' => 2 -> bitowo 0100 -> 4
* 'U' => 3 -> bitowo 1000 -> 8
*/
//const bool match =
// (e1 == 0 && e2 == 3) || (e1 == 3 && e2 == 0) ||
// (e1 == 1 && e2 == 2) || (e1 == 2 && e2 == 1) ||
// (e1 == 1 && e2 == 3) || (e1 == 3 && e2 == 1);
//return match;
const int match =
(e1 + e2 == 9) ||
(e1 + e2 == 6) ||
(e1 + e2 == 10) ;
return match;
//(e1 == "A" && e2 == "U") ||
//(e1 == "U" && e2 == "A") ||
//(e1 == "G" && e2 == "C") ||
//(e1 == "C" && e2 == "G") ||
//(e1 == "G" && e2 == "U") ||
//(e1 == "U" && e2 == "G");
}
void printMatrix(int**, int, int);
int ** getFullCopy(int ** table, int N);
int** allocateMatrix(int);
void deallocateMatrix(int**, int);
void write_results_full(int , double , char );
void write_results(int , double );
void computeDYN1PerfectNoIf(int** table, int n, int *seq) {
int** S = getFullCopy(table, n);
double start = omp_get_wtime();
for (int c0 = 1; c0 <= floord(n - 2, 26) + 1; c0 += 1) {
#pragma omp parallel for
for (int c1 = max(max(-((n + 24) / 26), -c0 - (n + 28) / 30 + 1), -8 * c0 + c0 / 2 + 1); c1 <= -c0; c1 += 1) {
for (int c3 = max(max(-n + 2, 30 * c0 + 30 * c1 - 29), 26 * c1 + 1); c3 <= 30 * c0 + 30 * c1; c3 += 1) {
for (int c4 = max(-26 * c1 - 25, -c3 + 1); c4 <= min(n - 1, -26 * c1); c4 += 1) {
for (int c5 = -c3; c5 < c4; c5 += 1) {
S[-c3][c4] = max_sc(S[-c3][c5] + S[c5 + 1][c4], S[-c3][c4], max_score(S[-c3][c4], S[-c3 + 1][c4 - 1] + match(seq[-c3], seq[c4])));
}
}
}
}
}
double execution_time = omp_get_wtime() - start;
printf("PERNIF: %lf\n", execution_time);
write_results_full(n, execution_time, '\n');
printMatrix(S, n, 1);
deallocateMatrix(S, n);
}
void computeDYN2PerfectIf(int** table, int n, int *seq) {
int** S = getFullCopy(table, n);
double start = omp_get_wtime();
for (int c0 = 1; c0 < n - 1; c0 += 1)
{
for (int c0_0 = max(floord(-c0, 15) + 1, floord(-n, 30) + 1); c0_0 <= floord(-15 * c0 + 2 * n - 30, 390) + 2; c0_0 += 1) {
#pragma omp parallel for
for (int c1 = max(max(floord(c0 - n + 1, 26), -c0_0 + floord(-n, 30) + 1), -c0 - 8 * c0_0 + floord(c0 + c0_0, 2) + 1); c1 <= min(min(0, -c0_0 + floord(-c0 - 1, 30) + 1), -8 * c0_0 + floord(-c0 + 2 * c0_0 + 2, 4) + 13); c1 += 1) {
for (int c3 = max(max(0, -26 * c1 - 25), -2 * c0 - 30 * c0_0 - 30 * c1 + 1); c3 <= min(min(-c0 + n - 1, -26 * c1), -c0 - 30 * c0_0 - 30 * c1 + 29); c3 += 1) {
for (int c4 = max(-30 * c0_0 - 30 * c1, c0 + c3); c4 <= min(min(n - 1, -30 * c0_0 - 30 * c1 + 29), 2 * c0 + c3 - 1); c4 += 1) {
if (2 * c0 + c3 >= c4 + 2) {
S[c3][c4] = max_sc(S[c3][-c0 + c4] + S[-c0 + c4 + 1][c4], S[c3][c4], S[c3 + 1][c4 - 1] + match(seq[c3], seq[c4]));
}
S[c3][c4] = max_sc(S[c3][c0 + c3 - 1] + S[c0 + c3][c4], S[c3][c4], S[c3 + 1][c4 - 1] + match(seq[c3], seq[c4]));
}
}
}
}
}
double execution_time = omp_get_wtime() - start;
printf("PERWIF: %lf\n", execution_time);
write_results(n, execution_time);
printMatrix(S, n, 2);
deallocateMatrix(S, n);
}
void printMatrix(int** matrix, int N, int fileno) {
char filename[10];
sprintf(filename, "nontiled%d", fileno);
FILE* f = fopen(filename, "wt");
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++)
fprintf(f, "%d ", matrix[i][j]);
fprintf(f, "\n");
}
fclose(f);
}
int **getFullCopy(int ** table, int N)
{
int **S = allocateMatrix(N);
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
S[i][j] = table[i][j];
return S;
}
int** allocateMatrix(int N) {
int** t = (int**)malloc(sizeof(int*) * N);
for (int i = 0; i < N; i++) {
t[i] = (int*)malloc(sizeof(int) * N);
}
return t;
}
int* allocateVector(int N) {
int* t = (int*)malloc(sizeof(int) * N);
return t;
}
void deallocateMatrix(int **t, int N) {
for (int i = 0; i < N; i++) {
free(t[i]);
}
free(t);
}
void write_results_full(int n, double execution_time, char end_char)
{
FILE* f = fopen("results.txt", "at");
fprintf(f, "%d:%lf%c", n, execution_time, end_char);
fclose(f);
}
void write_results(int n, double execution_time)
{
write_results_full(n, execution_time, ';');
}
int getValue(const char c)
{
/*
* 'A' => 0 -> bitowo 0001 -> 1
* 'G' => 1 -> bitowo 0010 -> 2
* 'C' => 2 -> bitowo 0100 -> 4
* 'U' => 3 -> bitowo 1000 -> 8
*/
if(c=='A') return 1;
if(c=='G') return 2;
if(c=='C') return 4;
if(c=='U') return 8;
return 16;
}
#define PERFORMANCE_TEST 1
int main(void) {
#if PERFORMANCE_TEST==1
const int ZMAX = 1600;
#else
const int ZMAX = 16;
#endif
int** graph = allocateMatrix(ZMAX);
int* seq = allocateVector(ZMAX);
for (int i = 0; i < ZMAX; i++)
for (int j = 0; j < ZMAX; j++)
graph[i][j] = 0;
for (int i = 0; i < ZMAX; i++)
graph[i][i] = 0;
//
const char* seqTest = "GCGUCCACGGCUAGCU";
#if PERFORMANCE_TEST==1
for (int i=0 ; i<ZMAX ; i++)
{
seq[i] = 1 << (rand()%4+1);
}
#else
for (int i = 0; i < ZMAX; i++)
seq[i] = getValue(seqTest[i]);
#endif
int N = ZMAX - 10;
//while (N < ZMAX)
//{
N += 10;
computeDYN1Imperfect(graph, N, seq);
computeDYN2Perfect(graph, N, seq);
computeDYN3ImperfA(graph, N, seq);
computeDYN4ImperfB(graph, N, seq);
//N += 10;
//}
deallocateMatrix(graph, ZMAX);
free(seq);
return 0;
}
|
transpose.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <complex.h>
#include "np_helper.h"
/*
* matrix a[n,m]
*/
void NPdtranspose(int n, int m, double *a, double *at)
{
size_t i, j, j0, j1;
for (j0 = 0; j0 < n; j0+=BLOCK_DIM) {
j1 = MIN(j0+BLOCK_DIM, n);
for (i = 0; i < m; i++) {
for (j = j0; j < j1; j++) {
at[i*n+j] = a[j*m+i];
}
}
}
}
void NPztranspose(int n, int m, double complex *a, double complex *at)
{
size_t i, j, j0, j1;
for (j0 = 0; j0 < n; j0+=BLOCK_DIM) {
j1 = MIN(j0+BLOCK_DIM, n);
for (i = 0; i < m; i++) {
for (j = j0; j < j1; j++) {
at[i*n+j] = a[j*m+i];
}
}
}
}
void NPdtranspose_021(int *shape, double *a, double *at)
{
#pragma omp parallel default(none) \
shared(shape, a, at)
{
int ic;
size_t nm = shape[1] * shape[2];
#pragma omp for schedule (static)
for (ic = 0; ic < shape[0]; ic++) {
NPdtranspose(shape[1], shape[2], a+ic*nm, at+ic*nm);
}
}
}
void NPztranspose_021(int *shape, double complex *a, double complex *at)
{
#pragma omp parallel default(none) \
shared(shape, a, at)
{
int ic;
size_t nm = shape[1] * shape[2];
#pragma omp for schedule (static)
for (ic = 0; ic < shape[0]; ic++) {
NPztranspose(shape[1], shape[2], a+ic*nm, at+ic*nm);
}
}
}
void NPdsymm_sum(int n, double *a, double *out, int hermi)
{
size_t i, j, j0, j1;
double tmp;
if (hermi == HERMITIAN || hermi == SYMMETRIC) {
TRIU_LOOP(i, j) {
tmp = a[i*n+j] + a[j*n+i];
out[i*n+j] = tmp;
out[j*n+i] = tmp;
}
} else {
TRIU_LOOP(i, j) {
tmp = a[i*n+j] - a[j*n+i];
out[i*n+j] = tmp;
out[j*n+i] =-tmp;
}
}
}
void NPzhermi_sum(int n, double complex *a, double complex *out, int hermi)
{
size_t i, j, j0, j1;
double complex tmp;
if (hermi == HERMITIAN) {
TRIU_LOOP(i, j) {
tmp = a[i*n+j] + conj(a[j*n+i]);
out[i*n+j] = tmp;
out[j*n+i] = conj(tmp);
}
} else if (hermi == SYMMETRIC) {
TRIU_LOOP(i, j) {
tmp = a[i*n+j] + a[j*n+i];
out[i*n+j] = tmp;
out[j*n+i] = tmp;
}
} else {
TRIU_LOOP(i, j) {
tmp = a[i*n+j] - conj(a[j*n+i]);
out[i*n+j] = tmp;
out[j*n+i] =-conj(tmp);
}
}
}
void NPdsymm_021_sum(int *shape, double *a, double *out, int hermi)
{
#pragma omp parallel default(none) \
shared(shape, a, out, hermi)
{
int ic;
size_t nn = shape[1] * shape[1];
#pragma omp for schedule (static)
for (ic = 0; ic < shape[0]; ic++) {
NPdsymm_sum(shape[1], a+ic*nn, out+ic*nn, hermi);
}
}
}
void NPzhermi_021_sum(int *shape, double complex *a, double complex *out, int hermi)
{
#pragma omp parallel default(none) \
shared(shape, a, out, hermi)
{
int ic;
size_t nn = shape[1] * shape[1];
#pragma omp for schedule (static)
for (ic = 0; ic < shape[0]; ic++) {
NPzhermi_sum(shape[1], a+ic*nn, out+ic*nn, hermi);
}
}
}
|
Euclid_apply.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
#include "_hypre_Euclid.h"
/* #include "Euclid_dh.h" */
/* #include "Mat_dh.h" */
/* #include "Factor_dh.h" */
/* #include "Parser_dh.h" */
/* #include "TimeLog_dh.h" */
/* #include "SubdomainGraph_dh.h" */
static void scale_rhs_private(Euclid_dh ctx, HYPRE_Real *rhs);
static void permute_vec_n2o_private(Euclid_dh ctx, HYPRE_Real *xIN, HYPRE_Real *xOUT);
static void permute_vec_o2n_private(Euclid_dh ctx, HYPRE_Real *xIN, HYPRE_Real *xOUT);
#undef __FUNC__
#define __FUNC__ "Euclid_dhApply"
void Euclid_dhApply(Euclid_dh ctx, HYPRE_Real *rhs, HYPRE_Real *lhs)
{
START_FUNC_DH
HYPRE_Real *rhs_, *lhs_;
HYPRE_Real t1, t2;
t1 = hypre_MPI_Wtime();
/* default settings; for everything except PILU */
ctx->from = 0;
ctx->to = ctx->m;
/* case 1: no preconditioning */
if (! strcmp(ctx->algo_ilu, "none") || ! strcmp(ctx->algo_par, "none")) {
HYPRE_Int i, m = ctx->m;
for (i=0; i<m; ++i) lhs[i] = rhs[i];
goto END_OF_FUNCTION;
}
/*----------------------------------------------------------------
* permute and scale rhs vector
*----------------------------------------------------------------*/
/* permute rhs vector */
if (ctx->sg != NULL) {
/* hypre_printf("@@@@@@@@@@@@@@@@@ permute_vec_n2o_private\n"); */
permute_vec_n2o_private(ctx, rhs, lhs); CHECK_V_ERROR;
rhs_ = lhs;
lhs_ = ctx->work2;
} else {
rhs_ = rhs;
lhs_ = lhs;
}
/* scale rhs vector */
if (ctx->isScaled) {
/* hypre_printf("@@@@@@@@@@@@@@@@@ scale_rhs_private\n"); */
scale_rhs_private(ctx, rhs_); CHECK_V_ERROR;
}
/* note: rhs_ is permuted, scaled; the input, "rhs" vector has
not been disturbed.
*/
/*----------------------------------------------------------------
* big switch to choose the appropriate triangular solve
*----------------------------------------------------------------*/
/* sequential and mpi block jacobi cases */
if (np_dh == 1 ||
! strcmp(ctx->algo_par, "bj") ) {
Factor_dhSolveSeq(rhs_, lhs_, ctx); CHECK_V_ERROR;
}
/* pilu case */
else {
Factor_dhSolve(rhs_, lhs_, ctx); CHECK_V_ERROR;
}
/*----------------------------------------------------------------
* unpermute lhs vector
* (note: don't need to unscale, because we were clever)
*----------------------------------------------------------------*/
if (ctx->sg != NULL) {
permute_vec_o2n_private(ctx, lhs_, lhs); CHECK_V_ERROR;
}
END_OF_FUNCTION: ;
t2 = hypre_MPI_Wtime();
/* collective timing for triangular solves */
ctx->timing[TRI_SOLVE_T] += (t2 - t1);
/* collective timing for setup+krylov+triSolves
(intent is to time linear solve, but this is
at best probelematical!)
*/
ctx->timing[TOTAL_SOLVE_TEMP_T] = t2 - ctx->timing[SOLVE_START_T];
/* total triangular solve count */
ctx->its += 1;
ctx->itsTotal += 1;
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "scale_rhs_private"
void scale_rhs_private(Euclid_dh ctx, HYPRE_Real *rhs)
{
START_FUNC_DH
HYPRE_Int i, m = ctx->m;
REAL_DH *scale = ctx->scale;
/* if matrix was scaled, must scale the rhs */
if (scale != NULL) {
#ifdef USING_OPENMP_DH
#pragma omp for schedule(static)
#endif
for (i=0; i<m; ++i) { rhs[i] *= scale[i]; }
}
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "permute_vec_o2n_private"
void permute_vec_o2n_private(Euclid_dh ctx, HYPRE_Real *xIN, HYPRE_Real *xOUT)
{
START_FUNC_DH
HYPRE_Int i, m = ctx->m;
HYPRE_Int *o2n = ctx->sg->o2n_col;
for (i=0; i<m; ++i) xOUT[i] = xIN[o2n[i]];
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "permute_vec_n2o_private"
void permute_vec_n2o_private(Euclid_dh ctx, HYPRE_Real *xIN, HYPRE_Real *xOUT)
{
START_FUNC_DH
HYPRE_Int i, m = ctx->m;
HYPRE_Int *n2o = ctx->sg->n2o_row;
for (i=0; i<m; ++i) xOUT[i] = xIN[n2o[i]];
END_FUNC_DH
}
|
nvector_openmpdev.c | /* -----------------------------------------------------------------
* Programmer(s): David J. Gardner and Shelby Lockhart @ LLNL
* -----------------------------------------------------------------
* Acknowledgements: This NVECTOR module is based on the NVECTOR
* Serial module by Scott D. Cohen, Alan C.
* Hindmarsh, Radu Serban, and Aaron Collier
* @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2020, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the implementation file for an OpenMP DEV implementation
* of the NVECTOR module.
* -----------------------------------------------------------------*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <nvector/nvector_openmpdev.h>
#include <sundials/sundials_math.h>
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
#define ONE RCONST(1.0)
#define ONEPT5 RCONST(1.5)
/* Private functions for special cases of vector operations */
static void VCopy_OpenMPDEV(N_Vector x, N_Vector z); /* z=x */
static void VSum_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); /* z=x+y */
static void VDiff_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */
static void VNeg_OpenMPDEV(N_Vector x, N_Vector z); /* z=-x */
static void VScaleSum_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x+y) */
static void VScaleDiff_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */
static void VLin1_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */
static void VLin2_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */
static void Vaxpy_OpenMPDEV(realtype a, N_Vector x, N_Vector y); /* y <- ax+y */
static void VScaleBy_OpenMPDEV(realtype a, N_Vector x); /* x <- ax */
/* Private functions for special cases of vector array operations */
static int VSumVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X+Y */
static int VDiffVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X-Y */
static int VScaleSumVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X+Y) */
static int VScaleDiffVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X-Y) */
static int VLin1VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX+Y */
static int VLin2VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX-Y */
static int VaxpyVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y); /* Y <- aX+Y */
/*
* -----------------------------------------------------------------
* exported functions
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------
* Returns vector type ID. Used to identify vector implementation
* from abstract N_Vector interface.
*/
N_Vector_ID N_VGetVectorID_OpenMPDEV(N_Vector v)
{
return SUNDIALS_NVEC_OPENMPDEV;
}
/* ----------------------------------------------------------------------------
* Function to create a new empty vector
*/
N_Vector N_VNewEmpty_OpenMPDEV(sunindextype length)
{
N_Vector v;
N_VectorContent_OpenMPDEV content;
/* Create an empty vector object */
v = NULL;
v = N_VNewEmpty();
if (v == NULL) return(NULL);
/* Attach operations */
/* constructors, destructors, and utility operations */
v->ops->nvgetvectorid = N_VGetVectorID_OpenMPDEV;
v->ops->nvclone = N_VClone_OpenMPDEV;
v->ops->nvcloneempty = N_VCloneEmpty_OpenMPDEV;
v->ops->nvdestroy = N_VDestroy_OpenMPDEV;
v->ops->nvspace = N_VSpace_OpenMPDEV;
v->ops->nvgetlength = N_VGetLength_OpenMPDEV;
/* standard vector operations */
v->ops->nvlinearsum = N_VLinearSum_OpenMPDEV;
v->ops->nvconst = N_VConst_OpenMPDEV;
v->ops->nvprod = N_VProd_OpenMPDEV;
v->ops->nvdiv = N_VDiv_OpenMPDEV;
v->ops->nvscale = N_VScale_OpenMPDEV;
v->ops->nvabs = N_VAbs_OpenMPDEV;
v->ops->nvinv = N_VInv_OpenMPDEV;
v->ops->nvaddconst = N_VAddConst_OpenMPDEV;
v->ops->nvdotprod = N_VDotProd_OpenMPDEV;
v->ops->nvmaxnorm = N_VMaxNorm_OpenMPDEV;
v->ops->nvwrmsnormmask = N_VWrmsNormMask_OpenMPDEV;
v->ops->nvwrmsnorm = N_VWrmsNorm_OpenMPDEV;
v->ops->nvmin = N_VMin_OpenMPDEV;
v->ops->nvwl2norm = N_VWL2Norm_OpenMPDEV;
v->ops->nvl1norm = N_VL1Norm_OpenMPDEV;
v->ops->nvcompare = N_VCompare_OpenMPDEV;
v->ops->nvinvtest = N_VInvTest_OpenMPDEV;
v->ops->nvconstrmask = N_VConstrMask_OpenMPDEV;
v->ops->nvminquotient = N_VMinQuotient_OpenMPDEV;
/* fused and vector array operations are disabled (NULL) by default */
/* local reduction operations */
v->ops->nvdotprodlocal = N_VDotProd_OpenMPDEV;
v->ops->nvmaxnormlocal = N_VMaxNorm_OpenMPDEV;
v->ops->nvminlocal = N_VMin_OpenMPDEV;
v->ops->nvl1normlocal = N_VL1Norm_OpenMPDEV;
v->ops->nvinvtestlocal = N_VInvTest_OpenMPDEV;
v->ops->nvconstrmasklocal = N_VConstrMask_OpenMPDEV;
v->ops->nvminquotientlocal = N_VMinQuotient_OpenMPDEV;
v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_OpenMPDEV;
v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_OpenMPDEV;
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMPDEV) malloc(sizeof *content);
if (content == NULL) { N_VDestroy(v); return(NULL); }
/* Attach content */
v->content = content;
/* Initialize content */
content->length = length;
content->own_data = SUNFALSE;
content->host_data = NULL;
content->dev_data = NULL;
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a new vector
*/
N_Vector N_VNew_OpenMPDEV(sunindextype length)
{
N_Vector v;
realtype *data;
realtype *dev_data;
int dev;
v = NULL;
v = N_VNewEmpty_OpenMPDEV(length);
if (v == NULL) return(NULL);
/* Create data */
if (length > 0) {
/* Update ownership */
NV_OWN_DATA_OMPDEV(v) = SUNTRUE;
/* Allocate memory on host */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
if (data == NULL) { N_VDestroy(v); return(NULL); }
/* Allocate memory on device */
dev = omp_get_default_device();
dev_data = omp_target_alloc(length * sizeof(realtype), dev);
if (dev_data == NULL) { N_VDestroy(v); return(NULL); }
/* Attach data */
NV_DATA_HOST_OMPDEV(v) = data;
NV_DATA_DEV_OMPDEV(v) = dev_data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a vector with user data component
*/
N_Vector N_VMake_OpenMPDEV(sunindextype length, realtype *h_vdata,
realtype *d_vdata)
{
N_Vector v;
int dev, host;
if (h_vdata == NULL || d_vdata == NULL) return(NULL);
v = NULL;
v = N_VNewEmpty_OpenMPDEV(length);
if (v == NULL) return(NULL);
if (length > 0) {
/* Get device and host identifiers */
dev = omp_get_default_device();
host = omp_get_initial_device();
/* Attach data */
NV_OWN_DATA_OMPDEV(v) = SUNFALSE;
NV_DATA_HOST_OMPDEV(v) = h_vdata;
NV_DATA_DEV_OMPDEV(v) = d_vdata;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors.
*/
N_Vector *N_VCloneVectorArray_OpenMPDEV(int count, N_Vector w)
{
N_Vector *vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector *) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VClone_OpenMPDEV(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_OpenMPDEV(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors with NULL data array.
*/
N_Vector *N_VCloneVectorArrayEmpty_OpenMPDEV(int count, N_Vector w)
{
N_Vector *vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector *) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VCloneEmpty_OpenMPDEV(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_OpenMPDEV(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------------------
* Function to free an array created with N_VCloneVectorArray_OpenMPDEV
*/
void N_VDestroyVectorArray_OpenMPDEV(N_Vector *vs, int count)
{
int j;
for (j = 0; j < count; j++) N_VDestroy_OpenMPDEV(vs[j]);
free(vs); vs = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Function to return number of vector elements
*/
sunindextype N_VGetLength_OpenMPDEV(N_Vector v)
{
return NV_LENGTH_OMPDEV(v);
}
/* ----------------------------------------------------------------------------
* Function to return a pointer to the data array on the host.
*/
realtype *N_VGetHostArrayPointer_OpenMPDEV(N_Vector v)
{
return((realtype *) NV_DATA_HOST_OMPDEV(v));
}
/* ----------------------------------------------------------------------------
* Function to return a pointer to the data array on the device.
*/
realtype *N_VGetDeviceArrayPointer_OpenMPDEV(N_Vector v)
{
return((realtype *) NV_DATA_DEV_OMPDEV(v));
}
/* ----------------------------------------------------------------------------
* Function to print a vector to stdout
*/
void N_VPrint_OpenMPDEV(N_Vector x)
{
N_VPrintFile_OpenMPDEV(x, stdout);
}
/* ----------------------------------------------------------------------------
* Function to print a vector to outfile
*/
void N_VPrintFile_OpenMPDEV(N_Vector x, FILE *outfile)
{
sunindextype i, N;
realtype *xd;
xd = NULL;
N = NV_LENGTH_OMPDEV(x);
xd = NV_DATA_HOST_OMPDEV(x);
for (i = 0; i < N; i++) {
#if defined(SUNDIALS_EXTENDED_PRECISION)
STAN_SUNDIALS_FPRINTF(outfile, "%11.8Lg\n", xd[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
STAN_SUNDIALS_FPRINTF(outfile, "%11.8g\n", xd[i]);
#else
STAN_SUNDIALS_FPRINTF(outfile, "%11.8g\n", xd[i]);
#endif
}
STAN_SUNDIALS_FPRINTF(outfile, "\n");
return;
}
/* ----------------------------------------------------------------------------
* Function to copy host array into device array
*/
void N_VCopyToDevice_OpenMPDEV(N_Vector x)
{
int dev, host;
sunindextype length;
realtype *host_ptr;
realtype *dev_ptr;
/* Get array information */
length = NV_LENGTH_OMPDEV(x);
host_ptr = NV_DATA_HOST_OMPDEV(x);
dev_ptr = NV_DATA_DEV_OMPDEV(x);
/* Get device and host identifiers */
dev = omp_get_default_device();
host = omp_get_initial_device();
/* Copy array from host to device */
omp_target_memcpy(dev_ptr, host_ptr, sizeof(realtype) * length, 0, 0, dev, host);
return;
}
/* ----------------------------------------------------------------------------
* Function to copy device array into host array
*/
void N_VCopyFromDevice_OpenMPDEV(N_Vector x)
{
int dev, host;
sunindextype length;
realtype *host_ptr;
realtype *dev_ptr;
/* Get array information */
length = NV_LENGTH_OMPDEV(x);
host_ptr = NV_DATA_HOST_OMPDEV(x);
dev_ptr = NV_DATA_DEV_OMPDEV(x);
/* Get device and host identifiers */
dev = omp_get_default_device();
host = omp_get_initial_device();
/* Copy array from device to host */
omp_target_memcpy(host_ptr, dev_ptr, sizeof(realtype) * length, 0, 0, host, dev);
return;
}
/*
* -----------------------------------------------------------------
* implementation of vector operations
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Create new vector from existing vector without attaching data
*/
N_Vector N_VCloneEmpty_OpenMPDEV(N_Vector w)
{
N_Vector v;
N_VectorContent_OpenMPDEV content;
if (w == NULL) return(NULL);
/* Create vector */
v = NULL;
v = N_VNewEmpty();
if (v == NULL) return(NULL);
/* Attach operations */
if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); }
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMPDEV) malloc(sizeof *content);
if (content == NULL) { N_VDestroy(v); return(NULL); }
/* Attach content */
v->content = content;
/* Initialize content */
content->length = NV_LENGTH_OMPDEV(w);
content->own_data = SUNFALSE;
content->host_data = NULL;
content->dev_data = NULL;
return(v);
}
/* ----------------------------------------------------------------------------
* Create new vector from existing vector and attach data
*/
N_Vector N_VClone_OpenMPDEV(N_Vector w)
{
N_Vector v;
realtype *data;
realtype *dev_data;
sunindextype length;
int dev;
v = NULL;
v = N_VCloneEmpty_OpenMPDEV(w);
if (v == NULL) return(NULL);
length = NV_LENGTH_OMPDEV(w);
/* Create data */
if (length > 0) {
/* Update ownership flag */
NV_OWN_DATA_OMPDEV(v) = SUNTRUE;
/* Allocate memory on host */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
if (data == NULL) { N_VDestroy(v); return(NULL); }
/* Allocate memory on device */
dev = omp_get_default_device();
dev_data = omp_target_alloc(length * sizeof(realtype), dev);
if (dev_data == NULL) { N_VDestroy(v); return(NULL); }
/* Attach data */
NV_DATA_HOST_OMPDEV(v)= data;
NV_DATA_DEV_OMPDEV(v) = dev_data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Destroy vector and free vector memory
*/
void N_VDestroy_OpenMPDEV(N_Vector v)
{
int dev;
if (v == NULL) return;
/* free content */
if (v->content != NULL) {
/* free data arrays if they are owned by the vector */
if (NV_OWN_DATA_OMPDEV(v)) {
if (NV_DATA_HOST_OMPDEV(v) != NULL) {
free(NV_DATA_HOST_OMPDEV(v));
NV_DATA_HOST_OMPDEV(v) = NULL;
}
if (NV_DATA_DEV_OMPDEV(v) != NULL) {
dev = omp_get_default_device();
omp_target_free(NV_DATA_DEV_OMPDEV(v), dev);
NV_DATA_DEV_OMPDEV(v) = NULL;
}
}
free(v->content);
v->content = NULL;
}
/* free ops and vector */
if (v->ops != NULL) { free(v->ops); v->ops = NULL; }
free(v); v = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Get storage requirement for N_Vector
*/
void N_VSpace_OpenMPDEV(N_Vector v, sunindextype *lrw, sunindextype *liw)
{
*lrw = NV_LENGTH_OMPDEV(v);
*liw = 1;
return;
}
/* ----------------------------------------------------------------------------
* Compute linear combination z[i] = a*x[i]+b*y[i]
*/
void N_VLinearSum_OpenMPDEV(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype c, *xd_dev, *yd_dev, *zd_dev;
N_Vector v1, v2;
booleantype test;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */
Vaxpy_OpenMPDEV(a,x,y);
return;
}
if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */
Vaxpy_OpenMPDEV(b,y,x);
return;
}
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE)) {
VSum_OpenMPDEV(x, y, z);
return;
}
/* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
v1 = test ? y : x;
v2 = test ? x : y;
VDiff_OpenMPDEV(v2, v1, z);
return;
}
/* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin1_OpenMPDEV(c, v1, v2, z);
return;
}
/* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin2_OpenMPDEV(c, v1, v2, z);
return;
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b) {
VScaleSum_OpenMPDEV(a, x, y, z);
return;
}
/* Case: a == -b */
if (a == -b) {
VScaleDiff_OpenMPDEV(a, x, y, z);
return;
}
/* Do all cases not handled above:
(1) a == other, b == 0.0 - user should have called N_VScale
(2) a == 0.0, b == other - user should have called N_VScale
(3) a,b == other, a !=b, a != -b */
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,a,b) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (a*xd_dev[i])+(b*yd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Assigns constant value to all vector elements, z[i] = c
*/
void N_VConst_OpenMPDEV(realtype c, N_Vector z)
{
sunindextype i, N;
realtype *zd_dev;
int dev;
zd_dev = NULL;
N = NV_LENGTH_OMPDEV(z);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,c) is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++) zd_dev[i] = c;
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise product z[i] = x[i]*y[i]
*/
void N_VProd_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]*yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise division z[i] = x[i]/y[i]
*/
void N_VDiv_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]/yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaler multiplication z[i] = c*x[i]
*/
void N_VScale_OpenMPDEV(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
if (z == x) { /* BLAS usage: scale x <- cx */
VScaleBy_OpenMPDEV(c, x);
return;
}
if (c == ONE) {
VCopy_OpenMPDEV(x, z);
} else if (c == -ONE) {
VNeg_OpenMPDEV(x, z);
} else {
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,c) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = c*xd_dev[i];
}
return;
}
/* ----------------------------------------------------------------------------
* Compute absolute value of vector components z[i] = SUNRabs(x[i])
*/
void N_VAbs_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = SUNRabs(xd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = 1 / x[i]
*/
void N_VInv_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = ONE/xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise addition of a scaler to a vector z[i] = x[i] + b
*/
void N_VAddConst_OpenMPDEV(N_Vector x, realtype b, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,b) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]+b;
return;
}
/* ----------------------------------------------------------------------------
* Computes the dot product of two vectors, a = sum(x[i]*y[i])
*/
realtype N_VDotProd_OpenMPDEV(N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype sum, *xd_dev, *yd_dev;
int dev;
xd_dev = yd_dev = NULL;
sum = ZERO;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
sum += xd_dev[i]*yd_dev[i];
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Computes max norm of a vector
*/
realtype N_VMaxNorm_OpenMPDEV(N_Vector x)
{
sunindextype i, N;
realtype max, *xd_dev;
int dev;
max = ZERO;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:max) is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(max:max) schedule(static, 1)
for (i = 0; i < N; i++) {
max = SUNMAX(SUNRabs(xd_dev[i]), max);
}
return(max);
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a vector
*/
realtype N_VWrmsNorm_OpenMPDEV(N_Vector x, N_Vector w)
{
return(SUNRsqrt(N_VWSqrSumLocal_OpenMPDEV(x, w)/(NV_LENGTH_OMPDEV(x))));
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a masked vector
*/
realtype N_VWrmsNormMask_OpenMPDEV(N_Vector x, N_Vector w, N_Vector id)
{
return(SUNRsqrt(N_VWSqrSumMaskLocal_OpenMPDEV(x, w, id) / (NV_LENGTH_OMPDEV(x))));
}
/* ----------------------------------------------------------------------------
* Computes weighted square sum of a vector
*/
realtype N_VWSqrSumLocal_OpenMPDEV(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd_dev, *wd_dev;
int dev;
sum = ZERO;
xd_dev = wd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
wd_dev = NV_DATA_DEV_OMPDEV(w);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, wd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
sum += SUNSQR(xd_dev[i]*wd_dev[i]);
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Computes weighted square sum of a masked vector
*/
realtype N_VWSqrSumMaskLocal_OpenMPDEV(N_Vector x, N_Vector w, N_Vector id)
{
sunindextype i, N;
realtype sum, *xd_dev, *wd_dev, *idd_dev;
int dev;
sum = ZERO;
xd_dev = wd_dev = idd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
wd_dev = NV_DATA_DEV_OMPDEV(w);
idd_dev = NV_DATA_DEV_OMPDEV(id);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, wd_dev, idd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
if (idd_dev[i] > ZERO) {
sum += SUNSQR(xd_dev[i]*wd_dev[i]);
}
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Finds the minimun component of a vector
*/
realtype N_VMin_OpenMPDEV(N_Vector x)
{
sunindextype i, N;
realtype min, *xd_dev;
int dev;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(from:min) is_device_ptr(xd_dev) device(dev)
#pragma omp teams num_teams(1)
{
min = xd_dev[0];
#pragma omp distribute parallel for reduction(min:min) schedule(static, 1)
for (i = 1; i < N; i++) {
min = SUNMIN(xd_dev[i], min);
}
}
return(min);
}
/* ----------------------------------------------------------------------------
* Computes weighted L2 norm of a vector
*/
realtype N_VWL2Norm_OpenMPDEV(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd_dev, *wd_dev;
int dev;
sum = ZERO;
xd_dev = wd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
wd_dev = NV_DATA_DEV_OMPDEV(w);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, wd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
sum += SUNSQR(xd_dev[i]*wd_dev[i]);
}
return(SUNRsqrt(sum));
}
/* ----------------------------------------------------------------------------
* Computes L1 norm of a vector
*/
realtype N_VL1Norm_OpenMPDEV(N_Vector x)
{
sunindextype i, N;
realtype sum, *xd_dev;
int dev;
sum = ZERO;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i<N; i++)
sum += SUNRabs(xd_dev[i]);
return(sum);
}
/* ----------------------------------------------------------------------------
* Compare vector component values to a scaler
*/
void N_VCompare_OpenMPDEV(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,c) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (SUNRabs(xd_dev[i]) >= c) ? ONE : ZERO;
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = ONE/x[i] and checks if x[i] == ZERO
*/
booleantype N_VInvTest_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev, val;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
val = ZERO;
#pragma omp target map(to:N) map(tofrom:val) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(max:val) schedule(static, 1)
for (i = 0; i < N; i++) {
if (xd_dev[i] == ZERO)
val = ONE;
else
zd_dev[i] = ONE/xd_dev[i];
}
if (val > ZERO)
return (SUNFALSE);
else
return (SUNTRUE);
}
/* ----------------------------------------------------------------------------
* Compute constraint mask of a vector
*/
booleantype N_VConstrMask_OpenMPDEV(N_Vector c, N_Vector x, N_Vector m)
{
sunindextype i, N;
realtype temp;
realtype *cd_dev, *xd_dev, *md_dev;
int dev;
cd_dev = xd_dev = md_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
cd_dev = NV_DATA_DEV_OMPDEV(c);
md_dev = NV_DATA_DEV_OMPDEV(m);
/* get default device identifier */
dev = omp_get_default_device();
temp = ONE;
#pragma omp target map(to:N) map(tofrom:temp) is_device_ptr(xd_dev, cd_dev, md_dev) device(dev)
#pragma omp teams distribute parallel for reduction(min:temp) schedule(static, 1)
for (i = 0; i < N; i++) {
md_dev[i] = ZERO;
if (cd_dev[i] == ZERO) continue;
if (cd_dev[i] > ONEPT5 || cd_dev[i] < -ONEPT5) {
if ( xd_dev[i]*cd_dev[i] <= ZERO) { temp = ZERO; md_dev[i] = ONE; }
continue;
}
if ( cd_dev[i] > HALF || cd_dev[i] < -HALF) {
if (xd_dev[i]*cd_dev[i] < ZERO ) { temp = ZERO; md_dev[i] = ONE; }
}
}
if (temp == ONE) return (SUNTRUE);
else return(SUNFALSE);
}
/* ----------------------------------------------------------------------------
* Compute minimum componentwise quotient
*/
realtype N_VMinQuotient_OpenMPDEV(N_Vector num, N_Vector denom)
{
sunindextype i, N;
realtype *nd_dev, *dd_dev, min;
int dev;
nd_dev = dd_dev = NULL;
N = NV_LENGTH_OMPDEV(num);
nd_dev = NV_DATA_DEV_OMPDEV(num);
dd_dev = NV_DATA_DEV_OMPDEV(denom);
/* get default device identifier */
dev = omp_get_default_device();
min = BIG_REAL;
#pragma omp target map(to:N) map(tofrom:min) is_device_ptr(nd_dev, dd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(min:min) schedule(static, 1)
for (i = 0; i < N; i++)
if (dd_dev[i] != ZERO) min = SUNMIN(nd_dev[i]/dd_dev[i], min);
return(min);
}
/*
* -----------------------------------------------------------------
* fused vector operations
* -----------------------------------------------------------------
*/
int N_VLinearCombination_OpenMPDEV(int nvec, realtype* c, N_Vector* X, N_Vector z)
{
int i, dev;
realtype to_add; /* temporary variable to hold sum being added in atomic operation */
sunindextype j, N;
realtype* zd_dev=NULL;
realtype* xd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VScale */
if (nvec == 1) {
N_VScale_OpenMPDEV(c[0], X[0], z);
return(0);
}
/* should have called N_VLinearSum */
if (nvec == 2) {
N_VLinearSum_OpenMPDEV(c[0], X[0], c[1], X[1], z);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMPDEV(z);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store X dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
/*
* X[0] += c[i]*X[i], i = 1,...,nvec-1
*/
if ((X[0] == z) && (c[0] == ONE)) {
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=1; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++) {
to_add = c[i] * xd_dev[j];
#pragma omp atomic
zd_dev[j] += to_add;
}
}
}
free(xd_dev_ptrs);
return(0);
}
/*
* X[0] = c[0] * X[0] + sum{ c[i] * X[i] }, i = 1,...,nvec-1
*/
if (X[0] == z) {
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,zd_dev)
{
#pragma omp teams distribute parallel for schedule(static,1)
for (j=0; j<N; j++)
zd_dev[j] *= c[0];
}
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,zd_dev)
#pragma omp teams distribute
{
for (i=1; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++) {
to_add = c[i] * xd_dev[j];
#pragma omp atomic
zd_dev[j] += to_add;
}
}
}
free(xd_dev_ptrs);
return(0);
}
/*
* z = sum{ c[i] * X[i] }, i = 0,...,nvec-1
*/
xd_dev = NV_DATA_DEV_OMPDEV(X[0]);
#pragma omp target map(to:N,c[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
{
#pragma omp teams distribute parallel for schedule(static, 1)
for (j=0; j<N; j++) {
zd_dev[j] = c[0] * xd_dev[j];
}
}
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=1; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++) {
to_add = c[i] * xd_dev[j];
#pragma omp atomic
zd_dev[j] += to_add;
}
}
}
free(xd_dev_ptrs);
return(0);
}
int N_VScaleAddMulti_OpenMPDEV(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VLinearSum */
if (nvec == 1) {
N_VLinearSum_OpenMPDEV(a[0], x, ONE, Y[0], Z[0]);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
/*
* Y[i][j] += a[i] * x[j]
*/
if (Y == Z) {
#pragma omp target map(to:N,nvec,a[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] += a[i] * xd_dev[j];
}
}
free(yd_dev_ptrs);
return(0);
}
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/*
* Z[i][j] = Y[i][j] + a[i] * x[j]
*/
#pragma omp target map(to:N,nvec,a[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = a[i] * xd_dev[j] + yd_dev[j];
}
}
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VDotProdMulti_OpenMPDEV(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods)
{
int i, dev;
sunindextype j, N;
realtype sum;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype** yd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VDotProd */
if (nvec == 1) {
dotprods[0] = N_VDotProd_OpenMPDEV(x, Y[0]);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
/* initialize dot products */
for (i=0; i<nvec; i++) {
dotprods[i] = ZERO;
}
/* Allocate and store dev pointers to copy to device */
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
/* compute multiple dot products */
#pragma omp target map(to:N,nvec,yd_dev_ptrs[:nvec]) map(tofrom:dotprods[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
for (i=0; i<nvec; i++) {
yd_dev = yd_dev_ptrs[i];
sum = ZERO;
#pragma omp parallel for reduction(+:sum) schedule(static, 1)
for (j=0; j<N; j++)
sum += xd_dev[j] * yd_dev[j];
dotprods[i] += sum;
}
free(yd_dev_ptrs);
return(0);
}
/*
* -----------------------------------------------------------------
* vector array operations
* -----------------------------------------------------------------
*/
int N_VLinearSumVectorArray_OpenMPDEV(int nvec,
realtype a, N_Vector* X,
realtype b, N_Vector* Y,
N_Vector* Z)
{
int i, dev;
sunindextype j, N;
N_Vector* V1;
N_Vector* V2;
booleantype test;
realtype c;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VLinearSum */
if (nvec == 1) {
N_VLinearSum_OpenMPDEV(a, X[0], b, Y[0], Z[0]);
return(0);
}
/* BLAS usage: axpy y <- ax+y */
if ((b == ONE) && (Z == Y))
return(VaxpyVectorArray_OpenMPDEV(nvec, a, X, Y));
/* BLAS usage: axpy x <- by+x */
if ((a == ONE) && (Z == X))
return(VaxpyVectorArray_OpenMPDEV(nvec, b, Y, X));
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE))
return(VSumVectorArray_OpenMPDEV(nvec, X, Y, Z));
/* Cases: */
/* (1) a == 1.0, b = -1.0, */
/* (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VDiffVectorArray_OpenMPDEV(nvec, V2, V1, Z));
}
/* Cases: */
/* (1) a == 1.0, b == other or 0.0, */
/* (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VLin1VectorArray_OpenMPDEV(nvec, c, V1, V2, Z));
}
/* Cases: */
/* (1) a == -1.0, b != 1.0, */
/* (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VLin2VectorArray_OpenMPDEV(nvec, c, V1, V2, Z));
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b)
return(VScaleSumVectorArray_OpenMPDEV(nvec, a, X, Y, Z));
/* Case: a == -b */
if (a == -b)
return(VScaleDiffVectorArray_OpenMPDEV(nvec, a, X, Y, Z));
/* Do all cases not handled above: */
/* (1) a == other, b == 0.0 - user should have called N_VScale */
/* (2) a == 0.0, b == other - user should have called N_VScale */
/* (3) a,b == other, a !=b, a != -b */
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/* compute linear sum for each vector pair in vector arrays */
#pragma omp target map(to:N,nvec,a,b,xd_dev_ptrs[:nvec], yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = a * xd_dev[j] + b * yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VScaleVectorArray_OpenMPDEV(int nvec, realtype* c, N_Vector* X, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VScale */
if (nvec == 1) {
N_VScale_OpenMPDEV(c[0], X[0], Z[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++) {
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
}
/*
* X[i] *= c[i]
*/
if (X == Z) {
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
xd_dev[j] *= c[i];
}
}
free(xd_dev_ptrs);
return(0);
}
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/*
* Z[i] = c[i] * X[i]
*/
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c[i] * xd_dev[j];
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VConstVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* zd_dev=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VConst */
if (nvec == 1) {
N_VConst_OpenMPDEV(c, Z[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get device */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/* set each vector in the vector array to a constant */
#pragma omp target map(to:N,nvec,zd_dev_ptrs[:nvec]) \
is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c;
}
}
free(zd_dev_ptrs);
return(0);
}
int N_VWrmsNormVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* W, realtype* nrm)
{
int i, dev;
sunindextype j, N;
realtype sum;
realtype* wd_dev=NULL;
realtype* xd_dev=NULL;
realtype** wd_dev_ptrs=NULL;
realtype** xd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VWrmsNorm */
if (nvec == 1) {
nrm[0] = N_VWrmsNorm_OpenMPDEV(X[0], W[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* initialize norms */
for (i=0; i<nvec; i++)
nrm[i] = ZERO;
/* Allocate and store dev pointers to copy to device */
wd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
wd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(W[i]);
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
/* compute the WRMS norm for each vector in the vector array */
#pragma omp target map(to:N,nvec,xd_dev_ptrs[:nvec],wd_dev_ptrs[:nvec]) map(tofrom:nrm[:nvec]) \
is_device_ptr(xd_dev, wd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
wd_dev = wd_dev_ptrs[i];
sum = ZERO;
#pragma omp parallel for reduction(+:sum) schedule(static, 1)
{
for (j=0; j<N; j++)
sum += SUNSQR(xd_dev[j] * wd_dev[j]);
}
nrm[i] = SUNRsqrt(sum/N);
}
}
free(wd_dev_ptrs);
free(xd_dev_ptrs);
return(0);
}
int N_VWrmsNormMaskVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* W,
N_Vector id, realtype* nrm)
{
int i, dev;
sunindextype j, N;
realtype sum;
realtype* wd_dev=NULL;
realtype* xd_dev=NULL;
realtype* idd_dev=NULL;
realtype** wd_dev_ptrs=NULL;
realtype** xd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VWrmsNorm */
if (nvec == 1) {
nrm[0] = N_VWrmsNormMask_OpenMPDEV(X[0], W[0], id);
return(0);
}
/* get vector length and mask data array */
N = NV_LENGTH_OMPDEV(X[0]);
idd_dev = NV_DATA_DEV_OMPDEV(id);
/* get default device identifier */
dev = omp_get_default_device();
/* initialize norms */
for (i=0; i<nvec; i++)
nrm[i] = ZERO;
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
wd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
wd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(W[i]);
/* compute the WRMS norm for each vector in the vector array */
#pragma omp target map(to:N,nvec,xd_dev_ptrs[:nvec],wd_dev_ptrs[:nvec]) map(tofrom:nrm[:nvec]) \
is_device_ptr(idd_dev,xd_dev,wd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
wd_dev = wd_dev_ptrs[i];
sum = ZERO;
#pragma omp parallel for reduction(+:sum) schedule(static, 1)
{
for (j=0; j<N; j++) {
if (idd_dev[j] > ZERO)
sum += SUNSQR(xd_dev[j] * wd_dev[j]);
}
}
nrm[i] = SUNRsqrt(sum/N);
}
}
free(xd_dev_ptrs);
free(wd_dev_ptrs);
return(0);
}
int N_VScaleAddMultiVectorArray_OpenMPDEV(int nvec, int nsum, realtype* a,
N_Vector* X, N_Vector** Y, N_Vector** Z)
{
int i, j, dev;
sunindextype k, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
int retval;
N_Vector* YY;
N_Vector* ZZ;
/* invalid number of vectors */
if (nvec < 1) return(-1);
if (nsum < 1) return(-1);
/* ---------------------------
* Special cases for nvec == 1
* --------------------------- */
if (nvec == 1) {
/* should have called N_VLinearSum */
if (nsum == 1) {
N_VLinearSum_OpenMPDEV(a[0], X[0], ONE, Y[0][0], Z[0][0]);
return(0);
}
/* should have called N_VScaleAddMulti */
YY = (N_Vector *) malloc(nsum * sizeof(N_Vector));
ZZ = (N_Vector *) malloc(nsum * sizeof(N_Vector));
for (j=0; j<nsum; j++) {
YY[j] = Y[j][0];
ZZ[j] = Z[j][0];
}
retval = N_VScaleAddMulti_OpenMPDEV(nsum, a, X[0], YY, ZZ);
free(YY);
free(ZZ);
return(retval);
}
/* --------------------------
* Special cases for nvec > 1
* -------------------------- */
/* should have called N_VLinearSumVectorArray */
if (nsum == 1) {
retval = N_VLinearSumVectorArray_OpenMPDEV(nvec, a[0], X, ONE, Y[0], Z[0]);
return(retval);
}
/* ----------------------------
* Compute multiple linear sums
* ---------------------------- */
/* get vector length */
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++) {
for (j=0; j<nsum; j++)
yd_dev_ptrs[i * nsum + j] = NV_DATA_DEV_OMPDEV(Y[j][i]);
}
/*
* Y[i][j] += a[i] * x[j]
*/
if (Y == Z) {
#pragma omp target map(to:N,nvec,nsum,a[:nsum],xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec*nsum]) \
is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
for (j=0; j<nsum; j++) {
yd_dev = yd_dev_ptrs[i*nsum+j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
yd_dev[k] += a[j] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*));
for (i=0; i<nvec; i++) {
for (j=0; j<nsum; j++)
zd_dev_ptrs[i * nsum + j] = NV_DATA_DEV_OMPDEV(Z[j][i]);
}
/*
* Z[i][j] = Y[i][j] + a[i] * x[j]
*/
#pragma omp target map(to:N,nvec,nsum,a[:nsum],xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec*nsum]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
for (j=0; j<nsum; j++) {
yd_dev = yd_dev_ptrs[i*nsum+j];
zd_dev = zd_dev_ptrs[i*nsum+j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] = a[j] * xd_dev[k] + yd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VLinearCombinationVectorArray_OpenMPDEV(int nvec, int nsum,
realtype* c,
N_Vector** X,
N_Vector* Z)
{
int i; /* vector arrays index in summation [0,nsum) */
int j; /* vector index in vector array [0,nvec) */
sunindextype k; /* element index in vector [0,N) */
sunindextype N;
realtype* zd_dev=NULL;
realtype* xd_dev=NULL;
realtype** zd_dev_ptrs=NULL;
realtype** xd_dev_ptrs=NULL;
int dev;
realtype* ctmp;
N_Vector* Y;
/* invalid number of vectors */
if (nvec < 1) return(-1);
if (nsum < 1) return(-1);
/* ---------------------------
* Special cases for nvec == 1
* --------------------------- */
if (nvec == 1) {
/* should have called N_VScale */
if (nsum == 1) {
N_VScale_OpenMPDEV(c[0], X[0][0], Z[0]);
return(0);
}
/* should have called N_VLinearSum */
if (nsum == 2) {
N_VLinearSum_OpenMPDEV(c[0], X[0][0], c[1], X[1][0], Z[0]);
return(0);
}
/* should have called N_VLinearCombination */
Y = (N_Vector *) malloc(nsum * sizeof(N_Vector));
for (i=0; i<nsum; i++) {
Y[i] = X[i][0];
}
N_VLinearCombination_OpenMPDEV(nsum, c, Y, Z[0]);
free(Y);
return(0);
}
/* --------------------------
* Special cases for nvec > 1
* -------------------------- */
/* should have called N_VScaleVectorArray */
if (nsum == 1) {
ctmp = (realtype*) malloc(nvec * sizeof(realtype));
for (j=0; j<nvec; j++) {
ctmp[j] = c[0];
}
N_VScaleVectorArray_OpenMPDEV(nvec, ctmp, X[0], Z);
free(ctmp);
return(0);
}
/* should have called N_VLinearSumVectorArray */
if (nsum == 2) {
N_VLinearSumVectorArray_OpenMPDEV(nvec, c[0], X[0], c[1], X[1], Z);
return(0);
}
/* --------------------------
* Compute linear combination
* -------------------------- */
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
xd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*));
for (j=0; j<nvec; j++)
zd_dev_ptrs[j] = NV_DATA_DEV_OMPDEV(Z[j]);
for (j=0; j<nvec; j++) {
for (i=0; i<nsum; i++)
xd_dev_ptrs[j * nsum + i] = NV_DATA_DEV_OMPDEV(X[i][j]);
}
/*
* X[0][j] += c[i]*X[i][j], i = 1,...,nvec-1
*/
if ((X[0] == Z) && (c[0] == ONE)) {
#pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (j=0; j<nvec; j++) {
zd_dev = zd_dev_ptrs[j];
for (i=1; i<nsum; i++) {
xd_dev = xd_dev_ptrs[j*nsum+i];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] += c[i] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
/*
* X[0][j] = c[0] * X[0][j] + sum{ c[i] * X[i][j] }, i = 1,...,nvec-1
*/
if (X[0] == Z) {
#pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \
is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute
{
for (j=0; j<nvec; j++) {
zd_dev = zd_dev_ptrs[j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] *= c[0];
for (i=1; i<nsum; i++) {
xd_dev = xd_dev_ptrs[j*nsum+i];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] += c[i] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
/*
* Z[j] = sum{ c[i] * X[i][j] }, i = 0,...,nvec-1
*/
#pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \
is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute
{
for (j=0; j<nvec; j++) {
/* scale first vector in the sum into the output vector */
xd_dev = xd_dev_ptrs[j*nsum];
zd_dev = zd_dev_ptrs[j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] = c[0] * xd_dev[k];
/* scale and sum remaining vectors into the output vector */
for (i=1; i<nsum; i++) {
xd_dev = xd_dev_ptrs[j*nsum+i];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] += c[i] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
/*
* -----------------------------------------------------------------
* private functions
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Copy vector components into a second vector
*/
static void VCopy_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum
*/
static void VSum_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]+yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference
*/
static void VDiff_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]-yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute the negative of a vector
*/
static void VNeg_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = -xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector sum
*/
static void VScaleSum_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,c) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = c*(xd_dev[i]+yd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector difference
*/
static void VScaleDiff_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,c) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = c*(xd_dev[i]-yd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum z[i] = a*x[i]+y[i]
*/
static void VLin1_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,a) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (a*xd_dev[i])+yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference z[i] = a*x[i]-y[i]
*/
static void VLin2_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,a) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (a*xd_dev[i])-yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute special cases of linear sum
*/
static void Vaxpy_OpenMPDEV(realtype a, N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev;
int dev;
xd_dev = yd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
/* get default device identifier */
dev = omp_get_default_device();
if (a == ONE) {
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
yd_dev[i] += xd_dev[i];
return;
}
if (a == -ONE) {
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
yd_dev[i] -= xd_dev[i];
return;
}
#pragma omp target map(to:N,a) is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
yd_dev[i] += a*xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector x[i] = a*x[i]
*/
static void VScaleBy_OpenMPDEV(realtype a, N_Vector x)
{
sunindextype i, N;
realtype *xd_dev;
int dev;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,a) is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
xd_dev[i] *= a;
return;
}
/*
* -----------------------------------------------------------------
* private functions for special cases of vector array operations
* -----------------------------------------------------------------
*/
static int VSumVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = xd_dev[j] + yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VDiffVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = xd_dev[j] - yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VScaleSumVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c * (xd_dev[j] + yd_dev[j]);
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VScaleDiffVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev ointer to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c * (xd_dev[j] - yd_dev[j]);
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VLin1VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = (a * xd_dev[j]) + yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VLin2VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = (a * xd_dev[j]) - yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VaxpyVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
if (a == ONE) {
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] += xd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
if (a == -ONE) {
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] -= xd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] += a * xd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
/*
* -----------------------------------------------------------------
* Enable / Disable fused and vector array operations
* -----------------------------------------------------------------
*/
int N_VEnableFusedOps_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
if (tf) {
/* enable all fused vector operations */
v->ops->nvlinearcombination = N_VLinearCombination_OpenMPDEV;
v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMPDEV;
v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMPDEV;
/* enable all vector array operations */
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMPDEV;
v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMPDEV;
v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMPDEV;
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMPDEV;
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMPDEV;
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMPDEV;
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMPDEV;
} else {
/* disable all fused vector operations */
v->ops->nvlinearcombination = NULL;
v->ops->nvscaleaddmulti = NULL;
v->ops->nvdotprodmulti = NULL;
/* disable all vector array operations */
v->ops->nvlinearsumvectorarray = NULL;
v->ops->nvscalevectorarray = NULL;
v->ops->nvconstvectorarray = NULL;
v->ops->nvwrmsnormvectorarray = NULL;
v->ops->nvwrmsnormmaskvectorarray = NULL;
v->ops->nvscaleaddmultivectorarray = NULL;
v->ops->nvlinearcombinationvectorarray = NULL;
}
/* return success */
return(0);
}
int N_VEnableLinearCombination_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombination = N_VLinearCombination_OpenMPDEV;
else
v->ops->nvlinearcombination = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMulti_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMPDEV;
else
v->ops->nvscaleaddmulti = NULL;
/* return success */
return(0);
}
int N_VEnableDotProdMulti_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMPDEV;
else
v->ops->nvdotprodmulti = NULL;
/* return success */
return(0);
}
int N_VEnableLinearSumVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMPDEV;
else
v->ops->nvlinearsumvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMPDEV;
else
v->ops->nvscalevectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableConstVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMPDEV;
else
v->ops->nvconstvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMPDEV;
else
v->ops->nvwrmsnormvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormMaskVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMPDEV;
else
v->ops->nvwrmsnormmaskvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMultiVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMPDEV;
else
v->ops->nvscaleaddmultivectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableLinearCombinationVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMPDEV;
else
v->ops->nvlinearcombinationvectorarray = NULL;
/* return success */
return(0);
}
|
zero_length_array_section_exit.c | // RUN: %libomptarget-compile-aarch64-unknown-linux-gnu -fopenmp-version=51
// RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \
// RUN: | %fcheck-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu -fopenmp-version=51
// RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu -fopenmp-version=51
// RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-x86_64-pc-linux-gnu -fopenmp-version=51
// RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \
// RUN: | %fcheck-x86_64-pc-linux-gnu
#include <stdio.h>
int main() {
int arr[5];
// CHECK: addr=0x[[#%x,HOST_ADDR:]]
fprintf(stderr, "addr=%p\n", arr);
// CHECK-NOT: Libomptarget
#pragma omp target enter data map(alloc: arr[0:5])
#pragma omp target exit data map(present, release: arr[0:0])
// CHECK: arr is present
fprintf(stderr, "arr is present\n");
// arr[0:0] doesn't create an actual mapping in the first directive.
//
// CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#HOST_ADDR]] (0 bytes)
// CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory
#pragma omp target enter data map(alloc: arr[0:0])
#pragma omp target exit data map(present, release: arr[0:0])
// CHECK-NOT: arr is present
fprintf(stderr, "arr is present\n");
return 0;
}
|
SparseDenseProduct.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEDENSEPRODUCT_H
#define EIGEN_SPARSEDENSEPRODUCT_H
namespace Eigen {
namespace internal {
template <> struct product_promote_storage_type<Sparse,Dense, OuterProduct> { typedef Sparse ret; };
template <> struct product_promote_storage_type<Dense,Sparse, OuterProduct> { typedef Sparse ret; };
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,
typename AlphaType,
int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor,
bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1>
struct sparse_time_dense_product_impl;
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, true>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
typedef evaluator<Lhs> LhsEval;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
LhsEval lhsEval(lhs);
Index n = lhs.outerSize();
#ifdef EIGEN_HAS_OPENMP
Eigen::initParallel();
Index threads = Eigen::nbThreads();
#endif
for(Index c=0; c<rhs.cols(); ++c)
{
#ifdef EIGEN_HAS_OPENMP
// This 20000 threshold has been found experimentally on 2D and 3D Poisson problems.
// It basically represents the minimal amount of work to be done to be worth it.
if(threads>1 && lhsEval.nonZerosEstimate() > 20000)
{
#pragma omp parallel for schedule(dynamic,(n+threads*4-1)/(threads*4)) num_threads(threads)
for(Index i=0; i<n; ++i)
processRow(lhsEval,rhs,res,alpha,i,c);
}
else
#endif
{
for(Index i=0; i<n; ++i)
processRow(lhsEval,rhs,res,alpha,i,c);
}
}
}
static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha, Index i, Index col)
{
typename Res::Scalar tmp(0);
for(LhsInnerIterator it(lhsEval,i); it ;++it)
tmp += it.value() * rhs.coeff(it.index(),col);
res.coeffRef(i,col) += alpha * tmp;
}
};
// FIXME: what is the purpose of the following specialization? Is it for the BlockedSparse format?
template<typename T1, typename T2/*, int _Options, typename _StrideType*/>
struct scalar_product_traits<T1, Ref<T2/*, _Options, _StrideType*/> >
{
enum {
Defined = 1
};
typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType;
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType, ColMajor, true>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
evaluator<Lhs> lhsEval(lhs);
for(Index c=0; c<rhs.cols(); ++c)
{
for(Index j=0; j<lhs.outerSize(); ++j)
{
// typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c);
typename internal::scalar_product_traits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res.coeffRef(it.index(),c) += it.value() * rhs_j;
}
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, false>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
evaluator<Lhs> lhsEval(lhs);
for(Index j=0; j<lhs.outerSize(); ++j)
{
typename Res::RowXpr res_j(res.row(j));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res_j += (alpha*it.value()) * rhs.row(it.index());
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, ColMajor, false>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
evaluator<Lhs> lhsEval(lhs);
for(Index j=0; j<lhs.outerSize(); ++j)
{
typename Rhs::ConstRowXpr rhs_j(rhs.row(j));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res.row(it.index()) += (alpha*it.value()) * rhs_j;
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType>
inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType>::run(lhs, rhs, res, alpha);
}
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,SparseShape,DenseShape,ProductType> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? 1 : Rhs::ColsAtCompileTime>::type LhsNested;
typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==0) ? 1 : Dynamic>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhs);
internal::sparse_time_dense_product(lhsNested, rhsNested, dst, alpha);
}
};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, DenseShape, ProductType>
: generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>
{};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,SparseShape,ProductType> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
template<typename Dst>
static void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? Dynamic : 1>::type LhsNested;
typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==RowMajorBit) ? 1 : Lhs::RowsAtCompileTime>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhs);
// transpose everything
Transpose<Dst> dstT(dst);
internal::sparse_time_dense_product(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
}
};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, DenseShape, SparseTriangularShape, ProductType>
: generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>
{};
template<typename LhsT, typename RhsT, bool NeedToTranspose>
struct sparse_dense_outer_product_evaluator
{
protected:
typedef typename conditional<NeedToTranspose,RhsT,LhsT>::type Lhs1;
typedef typename conditional<NeedToTranspose,LhsT,RhsT>::type ActualRhs;
typedef Product<LhsT,RhsT,DefaultProduct> ProdXprType;
// if the actual left-hand side is a dense vector,
// then build a sparse-view so that we can seamlessly iterate over it.
typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
Lhs1, SparseView<Lhs1> >::type ActualLhs;
typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
Lhs1 const&, SparseView<Lhs1> >::type LhsArg;
typedef evaluator<ActualLhs> LhsEval;
typedef evaluator<ActualRhs> RhsEval;
typedef typename evaluator<ActualLhs>::InnerIterator LhsIterator;
typedef typename ProdXprType::Scalar Scalar;
public:
enum {
Flags = NeedToTranspose ? RowMajorBit : 0,
CoeffReadCost = HugeCost
};
class InnerIterator : public LhsIterator
{
public:
InnerIterator(const sparse_dense_outer_product_evaluator &xprEval, Index outer)
: LhsIterator(xprEval.m_lhsXprImpl, 0),
m_outer(outer),
m_empty(false),
m_factor(get(xprEval.m_rhsXprImpl, outer, typename internal::traits<ActualRhs>::StorageKind() ))
{}
EIGEN_STRONG_INLINE Index outer() const { return m_outer; }
EIGEN_STRONG_INLINE Index row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); }
EIGEN_STRONG_INLINE Index col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; }
EIGEN_STRONG_INLINE Scalar value() const { return LhsIterator::value() * m_factor; }
EIGEN_STRONG_INLINE operator bool() const { return LhsIterator::operator bool() && (!m_empty); }
protected:
Scalar get(const RhsEval &rhs, Index outer, Dense = Dense()) const
{
return rhs.coeff(outer);
}
Scalar get(const RhsEval &rhs, Index outer, Sparse = Sparse())
{
typename RhsEval::InnerIterator it(rhs, outer);
if (it && it.index()==0 && it.value()!=Scalar(0))
return it.value();
m_empty = true;
return Scalar(0);
}
Index m_outer;
bool m_empty;
Scalar m_factor;
};
sparse_dense_outer_product_evaluator(const Lhs1 &lhs, const ActualRhs &rhs)
: m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
// transpose case
sparse_dense_outer_product_evaluator(const ActualRhs &rhs, const Lhs1 &lhs)
: m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
protected:
const LhsArg m_lhs;
evaluator<ActualLhs> m_lhsXprImpl;
evaluator<ActualRhs> m_rhsXprImpl;
};
// sparse * dense outer product
template<typename Lhs, typename Rhs>
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, SparseShape, DenseShape>
: sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor>
{
typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor> Base;
typedef Product<Lhs, Rhs> XprType;
typedef typename XprType::PlainObject PlainObject;
explicit product_evaluator(const XprType& xpr)
: Base(xpr.lhs(), xpr.rhs())
{}
};
template<typename Lhs, typename Rhs>
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, DenseShape, SparseShape>
: sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor>
{
typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor> Base;
typedef Product<Lhs, Rhs> XprType;
typedef typename XprType::PlainObject PlainObject;
explicit product_evaluator(const XprType& xpr)
: Base(xpr.lhs(), xpr.rhs())
{}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSEDENSEPRODUCT_H
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.