source
stringlengths
3
92
c
stringlengths
26
2.25M
broadcast_reduce-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015-2017 by Contributors * \file broadcast_reduce_kernel.h * \brief Function definition of elementwise unary operators */ #ifndef MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_ #define MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_ #include <mxnet/operator_util.h> #include <algorithm> #include <vector> #include <string> #include <utility> #include "../mshadow_op.h" namespace mxnet { namespace op { namespace broadcast { using namespace mshadow; const int MAX_DIM = 5; template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } template<int ndim> MSHADOW_XINLINE void unravel_dot(const int idx, const Shape<ndim>& shape, const Shape<ndim>& stridej, const Shape<ndim>& stridek, int* j, int* k) { *j = 0; *k = 0; #pragma unroll for (int i = ndim-1, idx_t = idx; i >=0; --i) { const int tmp = idx_t / shape[i]; const int coord = idx_t - tmp*shape[i]; *j += coord*stridej[i]; *k += coord*stridek[i]; idx_t = tmp; } } template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const int idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (int i = ndim-1, j = idx; i >=0; --i) { int tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } template<int ndim> MSHADOW_XINLINE int ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { int ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > 1) * coord[i]; } return ret; } template<int ndim> MSHADOW_XINLINE int diff(const Shape<ndim>& small, const Shape<ndim>& big, Shape<ndim>* dims, Shape<ndim>* stride) { int mdim = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { mdim += small[i] != big[i]; (*dims)[i] = (*stride)[i] = 1; } #pragma unroll for (int i = ndim-1, j = mdim, s = 1; i >= 0; --i) { if (small[i] != big[i]) { --j; (*stride)[j] = s; (*dims)[j] = big[i]; } s *= big[i]; } return mdim; } template<int ndim> MSHADOW_XINLINE int unravel_dot(const int idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { int ret = 0; #pragma unroll for (int i = ndim-1, j = idx; i >=0; --i) { int tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } template<int ndim> MSHADOW_XINLINE int dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { int ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) ret += coord[i] * stride[i]; return ret; } template<typename DType> MSHADOW_XINLINE void assign(DType* dst, const bool addto, const DType src) { if (addto) { *dst += src; } else { *dst = src; } } template<int ndim, typename DType, typename OP> MSHADOW_XINLINE void binary_broadcast_assign(const int idx, const bool addto, const DType* __restrict lhs, const DType* __restrict rhs, DType* out, const Shape<ndim>& lshape, const Shape<ndim>& rshape, const Shape<ndim>& oshape) { const Shape<ndim> coord = unravel(idx, oshape); const int j = ravel(coord, lshape); const int k = ravel(coord, rshape); assign(&out[idx], addto, OP::Map(lhs[j], rhs[k])); } template<typename Reducer, int ndim, typename DType, typename OP> MSHADOW_XINLINE void seq_reduce_assign(const int idx, const int M, const bool addto, const DType* __restrict big, DType *small, const Shape<ndim>& bshape, const Shape<ndim>& sshape, const Shape<ndim>& rshape, const Shape<ndim>& rstride) { Shape<ndim> coord = unravel(idx, sshape); int j = ravel(coord, bshape); DType val, residual; Reducer::SetInitValue(val, residual); for (int k = 0; k < M; ++k) { coord = unravel(k, rshape); Reducer::Reduce(val, OP::Map(big[j + dot(coord, rstride)]), residual); } assign(&small[idx], addto, val); } #ifdef __CUDACC__ #include "broadcast_reduce-inl.cuh" #else template<int ndim, typename DType, typename OP> void binary_broadcast_compute(const int N, const bool addto, const DType *lhs, const DType *rhs, DType *out, const Shape<ndim> lshape, const Shape<ndim> rshape, const Shape<ndim> oshape) { for (int idx = 0; idx < N; ++idx) { binary_broadcast_assign<ndim, DType, OP>(idx, addto, lhs, rhs, out, lshape, rshape, oshape); } } template<int ndim, typename DType, typename OP> void BinaryBroadcastComputeImpl(Stream<cpu> *s, const OpReqType req, const TBlob& lhs, const TBlob& rhs, const TBlob& out) { if (req == kNullOp) return; int N = out.shape_.Size(); binary_broadcast_compute<ndim, DType, OP>(N, req == kAddTo, lhs.dptr<DType>(), rhs.dptr<DType>(), out.dptr<DType>(), lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>(), out.shape_.get<ndim>()); } template<typename Reducer, int ndim, typename DType, typename OP> void seq_reduce_compute(const int N, const int M, const bool addto, const DType *big, DType *small, const Shape<ndim> bshape, const Shape<ndim> sshape, const Shape<ndim> rshape, const Shape<ndim> rstride) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int idx = 0; idx < N; ++idx) { seq_reduce_assign<Reducer, ndim, DType, OP>(idx, M, addto, big, small, bshape, sshape, rshape, rstride); } } template<typename Reducer, int ndim, typename DType, typename OP> void Reduce(Stream<cpu> *s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); int N = small.shape_.Size(), M = rshape.Size(); seq_reduce_compute<Reducer, ndim, DType, OP>( N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); } template<int ndim, typename DType> size_t ReduceWorkspaceSize(Stream<cpu> *s, const TShape& small, const OpReqType req, const TShape& big) { return 0; } template<int ndim, typename DType> size_t ReduceWorkspaceSize(Stream<cpu> *s, const TShape& small, const OpReqType req, const TShape& big, const TShape& lhs, const TShape& rhs) { return 0; } template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2> MSHADOW_XINLINE void seq_reduce_assign(const int idx, const int M, const bool addto, const DType* __restrict big, const DType* __restrict lhs, const DType* __restrict rhs, DType *small, const Shape<ndim>& big_shape, const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0, const Shape<ndim>& small_shape, const Shape<ndim>& rshape, const Shape<ndim>& lhs_shape, const Shape<ndim>& rhs_shape, const Shape<ndim>& rstride, const Shape<ndim>& lhs_stride, const Shape<ndim>& rhs_stride) { Shape<ndim> coord = unravel(idx, small_shape); const int idx_big0 = ravel(coord, big_shape); const int idx_lhs0 = ravel(coord, lhs_shape0); const int idx_rhs0 = ravel(coord, rhs_shape0); DType val, residual; Reducer::SetInitValue(val, residual); for (int k = 0; k < M; ++k) { Shape<ndim> coord_big = unravel(k, rshape); int idx_big = idx_big0 + dot(coord_big, rstride); Shape<ndim> coord_lhs = unravel(k, lhs_shape); int idx_lhs = idx_lhs0 + dot(coord_lhs, lhs_stride); Shape<ndim> coord_rhs = unravel(k, rhs_shape); int idx_rhs = idx_rhs0 + dot(coord_rhs, rhs_stride); Reducer::Reduce(val, OP1::Map(big[idx_big], OP2::Map(lhs[idx_lhs], rhs[idx_rhs])), residual); } assign(&small[idx], addto, val); } template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2> void seq_reduce_compute(const int N, const int M, const bool addto, const DType *big, const DType *lhs, const DType *rhs, DType *small, const Shape<ndim> big_shape, const Shape<ndim> small_shape, const Shape<ndim> rshape, const Shape<ndim> rstride, const Shape<ndim> lhs_shape, const Shape<ndim> lhs_stride, const Shape<ndim> rhs_shape, const Shape<ndim> rhs_stride, const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int idx = 0; idx < N; ++idx) { seq_reduce_assign<Reducer, ndim, DType, OP1, OP2>(idx, M, addto, big, lhs, rhs, small, big_shape, lhs_shape0, rhs_shape0, small_shape, rshape, lhs_shape, rhs_shape, rstride, lhs_stride, rhs_stride); } } template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2> void Reduce(Stream<cpu> *s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big, const TBlob& lhs, const TBlob& rhs) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); int N = small.shape_.Size(); int M = rshape.Size(); Shape<ndim> lhs_shape, lhs_stride; diff(small.shape_.get<ndim>(), lhs.shape_.get<ndim>(), &lhs_shape, &lhs_stride); Shape<ndim> rhs_shape, rhs_stride; diff(small.shape_.get<ndim>(), rhs.shape_.get<ndim>(), &rhs_shape, &rhs_stride); seq_reduce_compute<Reducer, ndim, DType, OP1, OP2>( N, M, req == kAddTo, big.dptr<DType>(), lhs.dptr<DType>(), rhs.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, lhs_shape, lhs_stride, rhs_shape, rhs_stride, lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>()); } #endif } // namespace broadcast } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
cholesky.c
/***************************************************** * Site: https://rosettacode.org/wiki/Cholesky_decomposition *****************************************************/ #include <math.h> #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <omp.h> #include <sys/time.h> void cholesky(double** A, int n, int nt) { #pragma omp parallel for num_threads(nt) for (int i = 0; i < n; i++) for (int j = 0; j < (i + 1); j++) { double s = 0; for (int k = 0; k < j; k++) s += A[i][k] * A[j][k]; A[i][j] = (i == j) ? sqrt(A[i][i] - s) : (1.0 / A[j][j] * (A[i][j] - s)); } } void show_matrix(double** A, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) printf("%2.5f ", A[i][j]); printf("\n"); } } void mat_zero(double** x, int n) { int i, j; for(i=0; i<n; i++) { for(j=0; j<n; j++) { x[i][j] = 0.0; } } } double** mat_new(int n) { int i; double** x = malloc(sizeof(double*) * n); assert(x != NULL); for(i = 0; i < n; i++){ x[i] = malloc(sizeof(double) * n); assert(x[i] != NULL); } mat_zero(x,n); return x; } void rand_fill(double** x, int n){ int i, j; for(i = 0; i < n; i++) for(j = 0; j < n; j++){ x[i][j] = (i+1)*(j+1); } for(i = 0; i < n; i++) x[i][i] *= 100; } void mat_gen(double** s, int n) { int i,j; for(i=0; i<n; i++) { for(j=0; j<n; j++) { scanf("%lf",&s[i][j]); } } } void mat_del(double** x) { free(x[0]); free(x); } double wtime() { struct timeval t; gettimeofday(&t, NULL); return t.tv_sec + t.tv_usec / 1000000.0; } int main() { int n; int i, j, q_exec; int l_threads[17] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17}; int n_threads = 17; double matT[10], total, media, desviopadrao, somatorio, mul; // testar com n = 5000 n = 2000; //fscanf(stdin,"%d",&n); printf("n_threads\tmedia\t\tdesvio\n"); for (j = 0; j < n_threads; j++){ total = 0; somatorio = 0; q_exec = 10; printf("%i\t", l_threads[j]); for (i = 0; i < q_exec; i++){ double start_time, end_time, final_time; start_time = wtime(); double** A = mat_new(n); rand_fill(A, n); cholesky(A, n, l_threads[j]); //show_matrix(A, n); mat_del(A); end_time = wtime(); final_time = ((double)(end_time - start_time)); matT[i] = final_time; //printf("%i - Exec\tTime:%fs\n",i+1, matT[i]); total += matT[i]; //printf("thread %i - exec %i - t %f\n",l_threads[j], i, matT[i]); } media = total/q_exec; for (i=0; i<q_exec; i++){ mul = (matT[i] - media); somatorio += pow(mul, 2); } somatorio = somatorio/q_exec; desviopadrao = sqrt(somatorio); printf("\t%f\t%f\n",media, desviopadrao); } return 0; }
open-mp.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include <sched.h> #include <sys/time.h> #define MAXWORK 5 int work[MAXWORK]; // work to be done int nitems = 0; // number of items in the queue int nextput = 0; // producer will place number # at work[nextput] int nextget = -1; // consumer will obtain next # at work[nextget] int done = 0; // value 1 signals producer exceeded dim int pwork; // work done by producer int *cwork; // work done by the consumers int nthreads; // number of threads int dim; double* roots; static int idx = 0; void next(int *m) { (*m)++; if (*m >= MAXWORK) { *m = 0; } } void putwork(int k) { int put = 0; while (!put) { if (nitems < MAXWORK) { #pragma omp critical { work[nextput] = k; if (nitems == 0) { nextget = nextput; } next(&nextput); nitems++; put = 1; } } else { sched_yield(); } } } int getwork() { int k, get = 0; while (!get) { // leave if producer exceeded dim and we've used all it produced if (done && nitems == 0) { return -1; } if (nitems > 0) { #pragma omp critical { if (nitems > 0) { k = work[nextget]; next(&nextget); nitems--; if (nitems == 0) { nextget = -1; } get = 1; } } } else { sched_yield(); } } return k; } void dowork() { #pragma omp parallel { int current_thread = omp_get_thread_num(); int num; #pragma omp single { int i; nthreads = omp_get_num_threads(); cwork = (int*) malloc(nthreads * sizeof(int)); for (i = 1; i < nthreads; i++) { cwork[i] = 0; } } #pragma omp barrier if (current_thread == 0) { // Producer pwork = 0; while (1) { num = idx++; putwork(num); pwork++; if (idx == dim) { done = 1; break; } } } else { // Consumer while (1) { num = getwork(); if (num == -1) { break; } cwork[current_thread]++; #pragma omp atomic roots[num] += sqrt(num); } } } } int main(int argc, char **argv) { struct timeval startwtime, endwtime; double arr_time; if (argc != 2) { printf("Usage: %s <dim>\n", argv[0]); exit(-1); } dim = atoi(argv[1]); roots = (double*) malloc(dim * sizeof(double)); int i; gettimeofday(&startwtime, NULL); dowork(); gettimeofday(&endwtime, NULL); arr_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6 + endwtime.tv_sec - startwtime.tv_sec); printf("Time taken = %f\n", arr_time); FILE* out = fopen("data.out", "w"); for (i = 0; i < dim; i++) { fprintf(out, "sqrt(%d): %lf\n", i, roots[i]); } fprintf(out, "work done by producer: %d\n", pwork); fprintf(out, "work done by consumers:\n"); for (i = 1; i < nthreads; i++) { fprintf(out, "%d\n", cwork[i]); } return 0; }
bridge.h
// This file is a bridge connecting the "lib interface" gbbs exports and the // interfact that the current pbbslib exports. We would like to support both // C++11 users, and the current (C++17) implementation of the lib. Using this // bridge will hopefully simplify having two separate implementations of the lib // interface. #pragma once #include <type_traits> #include <utility> #include "parlay/internal/binary_search.h" #include "parlay/primitives.h" #include "parlay/monoid.h" #include "parlay/parallel.h" #include "parlay/io.h" #include "parlay/random.h" #include "parlay/delayed_sequence.h" #include "parlay/sequence.h" #include "parlay/slice.h" #include "parlay/range.h" #include "parlay/utilities.h" #include "get_time.h" namespace gbbs { // ================== parallel primitives =================== using parlay::parallel_for; using parlay::par_do; using parlay::num_workers; using parlay::worker_id; template <typename A, typename Af, typename Df, typename F> static void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start, long end, F f, long granularity = 0, bool conservative = false); template <class T> using slice = parlay::slice<T*, T*>; // TODO: check template<typename T> using range = slice<T>; template <typename F> static void par_for(size_t start, size_t end, size_t granularity, F f, bool parallel=true) { if (!parallel) { for (size_t i=start; i<end; i++) { f(i); } } else { parallel_for(start, end, f, granularity); } } template <typename F> static void par_for(size_t start, size_t end, F f, bool parallel=true, size_t granularity=std::numeric_limits<size_t>::max()) { if (!parallel) { for (size_t i=start; i<end; i++) { f(i); } } else { if (granularity == std::numeric_limits<size_t>::max()) { parallel_for(start, end, f); } else { parallel_for(start, end, f, granularity); } } } #ifdef CILK // TODO try parallel_for_1 template <typename A, typename Af, typename Df, typename F> inline void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start, long end, F f, long granularity, bool conservative) { alloc_holder<A> alloc; parallel_for_1(start, end, [&](size_t i) { init_alloc(&alloc.imp_.view()); f(i, &(alloc.imp_.view())); // finish_alloc(&(alloc.imp_.view())); }, granularity, conservative); } #else #ifdef OPENMP template <typename A, typename Af, typename Df, typename F> inline void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start, long end, F f, long granularity, bool conservative) { A* alloc = nullptr; #pragma omp parallel private(alloc) { alloc = new A(); init_alloc(alloc); parallel_for_1(start, end, [&](size_t i) { f(i, alloc); }, granularity, conservative); //#pragma omp for schedule(dynamic, 1) nowait // for(long i=start; i<end; i++) f(i, alloc); finish_alloc(alloc); } } #else template <typename A, typename Af, typename Df, typename F> inline void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start, long end, F f, long granularity, bool conservative) { parallel_for(start, end, [&](long i) { static thread_local A* alloc = new A(); init_alloc(alloc); f(i, alloc); }, granularity, conservative); // finish_alloc(alloc); } #endif #endif template <class E> E* new_array_no_init(size_t n) { #ifndef PARLAY_USE_STD_ALLOC auto allocator = parlay::allocator<E>(); #else auto allocator = std::allocator<E>(); #endif return allocator.allocate(n); } // Initializes in parallel template <typename E> E* new_array(size_t n) { E* r = new_array_no_init<E>(n); if (!std::is_trivially_default_constructible<E>::value) { // if (!std::is_default_constructible<E>::value) { if (n > 2048) { auto f = [&](size_t i) { new ((void*)(r + i)) E; }; parallel_for(0, n, f); } else for (size_t i = 0; i < n; i++) new ((void*)(r + i)) E; } return r; } template <class E> void free_array(E* e, size_t n) { #ifndef PARLAY_USE_STD_ALLOC auto allocator = parlay::allocator<E>(); #else auto allocator = std::allocator<E>(); #endif allocator.deallocate(e, n); } // Alias template so that sequence is exposed w/o namespacing template<typename T> using sequence = parlay::sequence<T>; template<typename Seq> auto make_slice(const Seq& S) { return parlay::make_slice(S.begin(), S.end()); } template <class E> slice<E> make_slice(E* start, E* end) { return parlay::make_slice((E*)start, (E*)end); } // Create a slice from an explicit iterator range template<typename It, typename S> parlay::slice<It, S> make_slice(It it, S s) { return parlay::make_slice<It, S>(it, s); } // // Create a slice from an explicit iterator range // template<typename It, typename S> // auto make_slice(It it, S s) { // return parlay::slice<It, S>(it, s); // } struct empty { }; // struct containing no data (used in conjunction with empty-base optimization) } // namespace gbbs // Bridge to pbbslib (c++17) namespace pbbslib { // ====================== utilities ======================= using empty = gbbs::empty; using flags = parlay::flags; const flags no_flag = parlay::no_flag; const flags fl_sequential = parlay::fl_sequential; const flags fl_debug = parlay::fl_debug; const flags fl_time = parlay::fl_time; const flags fl_conservative = parlay::fl_conservative; const flags fl_inplace = parlay::fl_inplace; using parlay::parallel_for; using parlay::par_do; // using parlay::parallel_for_alloc; // TODO using parlay::num_workers; using parlay::worker_id; using gbbs::free_array; using gbbs::new_array_no_init; using gbbs::new_array; using parlay::hash32; using parlay::hash32_2; using parlay::hash32_3; using parlay::hash64; using parlay::hash64_2; template <class T> size_t log2_up(T i) { size_t a = 0; T b = i - 1; while (b > 0) { b = b >> 1; a++; } return a; } // Alias template so that sequence is exposed w/o namespacing template<typename T> using sequence = parlay::sequence<T>; template<typename T> using range = gbbs::range<T>; template<typename T> using slice = gbbs::slice<T>; template<typename T> inline void assign_uninitialized(T& a, const T& b) { new (static_cast<void*>(std::addressof(a))) T(b); } template<typename T> inline void move_uninitialized(T& a, const T& b) { new (static_cast<void*>(std::addressof(a))) T(std::move(b)); } // Currently unused, but may be useful in the future; including commented out. // template <class ET> // inline bool CAS128(ET* a, ET b, ET c) { // return __sync_bool_compare_and_swap_16((__int128*)a, *((__int128*)&b), // *((__int128*)&c)); // } template <typename ET> inline bool atomic_compare_and_swap(ET* a, ET oldval, ET newval) { if constexpr (sizeof(ET) == 1) { uint8_t r_oval, r_nval; std::memcpy(&r_oval, &oldval, sizeof(ET)); std::memcpy(&r_nval, &newval, sizeof(ET)); return __sync_bool_compare_and_swap(reinterpret_cast<uint8_t*>(a), r_oval, r_nval); } else if constexpr (sizeof(ET) == 4) { uint32_t r_oval, r_nval; std::memcpy(&r_oval, &oldval, sizeof(ET)); std::memcpy(&r_nval, &newval, sizeof(ET)); return __sync_bool_compare_and_swap(reinterpret_cast<uint32_t*>(a), r_oval, r_nval); } else if constexpr (sizeof(ET) == 8) { uint64_t r_oval, r_nval; std::memcpy(&r_oval, &oldval, sizeof(ET)); std::memcpy(&r_nval, &newval, sizeof(ET)); return __sync_bool_compare_and_swap(reinterpret_cast<uint64_t*>(a), r_oval, r_nval); } else if constexpr (sizeof(ET) == 16) { __int128 r_oval, r_nval; std::memcpy(&r_oval, &oldval, sizeof(ET)); std::memcpy(&r_nval, &newval, sizeof(ET)); return __sync_bool_compare_and_swap_16(reinterpret_cast<__int128*>(a), r_oval, r_nval); } else { std::cout << "Bad CAS Length" << sizeof(ET) << std::endl; exit(0); } } template <typename ET> inline bool atomic_compare_and_swap(volatile ET* a, ET oldval, ET newval) { if (sizeof(ET) == 1) { uint8_t r_oval, r_nval; std::memcpy(&r_oval, &oldval, sizeof(ET)); std::memcpy(&r_nval, &newval, sizeof(ET)); return __sync_bool_compare_and_swap(reinterpret_cast<volatile uint8_t*>(a), r_oval, r_nval); } else if (sizeof(ET) == 4) { uint32_t r_oval, r_nval; std::memcpy(&r_oval, &oldval, sizeof(ET)); std::memcpy(&r_nval, &newval, sizeof(ET)); return __sync_bool_compare_and_swap(reinterpret_cast<volatile uint32_t*>(a), r_oval, r_nval); } else if (sizeof(ET) == 8) { uint64_t r_oval, r_nval; std::memcpy(&r_oval, &oldval, sizeof(ET)); std::memcpy(&r_nval, &newval, sizeof(ET)); return __sync_bool_compare_and_swap(reinterpret_cast<volatile uint64_t*>(a), r_oval, r_nval); } else if (sizeof(ET) == 16) { __int128 r_oval, r_nval; std::memcpy(&r_oval, &oldval, sizeof(ET)); std::memcpy(&r_nval, &newval, sizeof(ET)); return __sync_bool_compare_and_swap_16( reinterpret_cast<volatile __int128*>(a), r_oval, r_nval); } else { std::cout << "Bad CAS Length" << sizeof(ET) << std::endl; exit(0); } } template <typename E, typename EV> inline E fetch_and_add(E* a, EV b) { volatile E newV, oldV; do { oldV = *a; newV = oldV + b; } while (!atomic_compare_and_swap(a, oldV, newV)); return oldV; } template <typename E, typename EV> inline void write_add(E* a, EV b) { // volatile E newV, oldV; E newV, oldV; do { oldV = *a; newV = oldV + b; } while (!atomic_compare_and_swap(a, oldV, newV)); } template <typename E, typename EV> inline void write_add(std::atomic<E>* a, EV b) { // volatile E newV, oldV; E newV, oldV; do { oldV = a->load(); newV = oldV + b; } while (!std::atomic_compare_exchange_strong(a, &oldV, newV)); } template <typename E, typename EV> inline void write_minus(E* a, EV b) { // volatile E newV, oldV; E newV, oldV; do { oldV = *a; newV = oldV - b; } while (!atomic_compare_and_swap(a, oldV, newV)); } template <typename ET, typename F> inline bool write_min(ET* a, ET b, F less) { ET c; bool r = 0; do c = *a; while (less(b, c) && !(r = atomic_compare_and_swap(a, c, b))); return r; } template <typename ET, typename F> inline bool write_min(volatile ET* a, ET b, F less) { ET c; bool r = 0; do c = *a; while (less(b, c) && !(r = atomic_compare_and_swap(a, c, b))); return r; } template <typename ET, typename F> inline bool write_min(std::atomic<ET>* a, ET b, F less) { ET c; bool r = 0; do c = a->load(); while (less(b, c) && !(r = std::atomic_compare_exchange_strong(a, &c, b))); return r; } template <typename ET, typename F> inline bool write_max(ET* a, ET b, F less) { ET c; bool r = 0; do c = *a; while (less(c, b) && !(r = atomic_compare_and_swap(a, c, b))); return r; } template <typename ET, typename F> inline bool write_max(volatile ET* a, ET b, F less) { ET c; bool r = 0; do c = *a; while (less(c, b) && !(r = atomic_compare_and_swap(a, c, b))); return r; } template <typename ET, typename F> inline bool write_max(std::atomic<ET>* a, ET b, F less) { ET c; bool r = 0; do c = a->load(); while (less(c, b) && !(r = std::atomic_compare_exchange_strong(a, &c, b))); return r; } template <typename ET> inline bool CAS(ET* ptr, const ET oldv, const ET newv) { return atomic_compare_and_swap(ptr, oldv, newv); } inline long xaddl(long* variable, long value) { asm volatile("lock; xaddl %%eax, %2;" : "=a"(value) // Output : "a"(value), "m"(*variable) // Input : "memory"); return value; } inline int xaddi(int* variable, int value) { asm volatile("lock; xadd %%eax, %2;" : "=a"(value) // Output : "a"(value), "m"(*variable) // Input : "memory"); return value; } // The conditional should be removed by the compiler // this should work with pointer types, or pairs of integers template <class ET> inline ET xadd(ET* variable, ET value) { if (sizeof(ET) == 8) { return xaddl((long*)variable, (long)value); } else if (sizeof(ET) == 4) { return xaddi((int*)variable, (int)value); } else { std::cout << "xadd bad length" << "\n"; abort(); } } template <typename ET> inline bool write_min(ET *a, ET b) { return write_min<ET>(a, b, std::less<ET>()); } template <typename ET> inline bool write_max(ET *a, ET b) { return write_max<ET>(a, b, std::less<ET>()); } // Combines two hash values. inline uint64_t hash_combine(uint64_t hash_value_1, uint64_t hash_value_2) { // This is the same as boost's 32-bit `hash_combine` implementation, but with // 2 ^ 64 / (golden ratio) chosen as an arbitrary 64-bit additive magic number // rather than 2 ^ 32 / (golden ratio). return hash_value_1 ^ (hash_value_2 + 0x9e3779b97f4a7c15 + (hash_value_1 << 6) + (hash_value_1 >> 2)); } // ========================= monoid ========================== using parlay::make_monoid; template <class T> using minm = parlay::minm<T>; template <class T> using maxm = parlay::maxm<T>; template <class T> using addm = parlay::addm<T>; template <class T> using xorm = parlay::xorm<T>; // ====================== sequence ops ======================= using parlay::scan; using parlay::scan_inclusive; using parlay::scan_inplace; using parlay::scan_inclusive_inplace; using parlay::reduce; using parlay::pack; using parlay::pack_index; using parlay::internal::pack_out; using parlay::map; using parlay::filter; using parlay::internal::filter_out; using parlay::internal::split_two; using parlay::internal::sliced_for; using parlay::internal::pack_serial_at; // TODO: filter_index // TODO all below using parlay::tokens; using parlay::chars_to_file; using parlay::chars_from_file; using parlay::internal::chars_to_int_t; using parlay::remove_duplicates_ordered; using parlay::internal::get_counts; // using pbbs::map_with_index; constexpr const size_t _log_block_size = 10; constexpr const size_t _block_size = (1 << _log_block_size); inline size_t granularity(size_t n) { return (n > 100) ? ceil(pow(n, 0.5)) : 100; } inline size_t num_blocks(size_t n, size_t block_size) { if (n == 0) return 0; else return (1 + ((n)-1) / (block_size)); } // used so second template argument can be inferred template <class T, class F> inline parlay::delayed_sequence<T,F> make_delayed(size_t n, F f) { return parlay::delayed_sequence<T,F>(n,f); } template <class T> auto make_delayed(T* A, size_t n) { return make_delayed<T>(n, [&] (size_t i) { return A[i]; }); } template <class T> inline range<T> make_range(T* A, size_t n) { return range<T>(A, A+n); } template <class T> inline range<T> make_range(T* start, T* end) { return range<T>(start, end); } template <class Seq> inline auto reduce_add(Seq const& I) -> typename Seq::value_type { using T = typename Seq::value_type; return reduce(make_slice(I), addm<T>()); } template <class Seq> inline auto reduce_max(Seq const& I) -> typename Seq::value_type { using T = typename Seq::value_type; return reduce(make_slice(I), maxm<T>()); } template <class Seq> inline auto reduce_min(Seq const& I) -> typename Seq::value_type { using T = typename Seq::value_type; return reduce(make_slice(I), minm<T>()); } template <class Seq> inline auto reduce_xor(Seq const& I) -> typename Seq::value_type { using T = typename Seq::value_type; return reduce(make_slice(I), xorm<T>()); } // Writes the list of indices `i` where `Fl[i] == true` to range `Out`. template <class Bool_Seq, class Out_Seq> size_t pack_index_out(Bool_Seq const &Fl, Out_Seq&& Out, flags fl = no_flag) { using Idx_Type = typename std::remove_reference<Out_Seq>::type::value_type; auto identity = [] (size_t i) {return (Idx_Type) i;}; return pack_out( make_delayed<Idx_Type>(Fl.size(),identity), Fl, std::forward<Out_Seq>(Out), fl); } // ====================== binary search ======================= using parlay::internal::binary_search; // ====================== sample sort ======================= using parlay::internal::sample_sort; using parlay::internal::sample_sort_inplace; using parlay::stable_sort; using parlay::stable_sort_inplace; // ====================== integer sort ======================= using parlay::integer_sort_inplace; using parlay::integer_sort; using parlay::internal::count_sort; // ====================== random shuffle ======================= using random = parlay::random; using parlay::random_permutation; using parlay::random_shuffle; } // Other extensions to pbbs used by the graph benchmarks. namespace pbbslib { constexpr size_t _F_BSIZE = 2000; // Transforms input sequence `[a_0, a_1, ..., a_{n-1}]` to sequence `[f(0, a_0), // f(1, a_1), ..., f(n-1, a_{n-1})]` using input function `f`. // // Arguments: // A: sequence-like object with elements of type `T` // Input array. // f: (size_t, T) -> OT // Function to apply to input array. // // Returns: // sequence<OT> // Result of applying `f` to each element of `A` along with the index of // that element in `A`. template <class OT, class Seq, class Func> auto map_with_index(Seq const &A, Func&& f, flags fl = no_flag) -> sequence<OT> { return sequence<OT>::from_function(A.size(), [&](size_t i) { return f(i, A[i]); }); } template <class OT, class Seq, class UnaryFunc> auto map(Seq const &A, UnaryFunc f, flags fl = no_flag) -> sequence<OT> { return sequence<OT>::from_function(A.size(), [&](size_t i) { return f(A[i]); }); } template <class In_Seq, class F> auto filter_index(In_Seq const &In, F f, flags fl = no_flag) -> sequence<typename In_Seq::value_type> { using T = typename In_Seq::value_type; size_t n = In.size(); size_t l = num_blocks(n, _block_size); sequence<size_t> Sums(l); sequence<bool> Fl(n); sliced_for(n, _block_size, [&](size_t i, size_t s, size_t e) { size_t r = 0; for (size_t j = s; j < e; j++) r += (Fl[j] = f(In[j], j)); Sums[i] = r; }); size_t m = scan_inplace(make_slice(Sums)); sequence<T> Out = sequence<T>::uninitialized(m); sliced_for(n, _block_size, [&](size_t i, size_t s, size_t e) { pack_serial_at(make_slice(In).cut(s, e), make_slice(Fl).cut(s, e), make_slice(Out).cut(Sums[i], (i == l - 1) ? m : Sums[i + 1])); }); return Out; } template <class Idx_Type, class D, class F> inline sequence<std::tuple<Idx_Type, D> > pack_index_and_data( F& f, size_t size) { auto id_seq = pbbslib::make_delayed<std::tuple<Idx_Type, D> >(size, [&](size_t i) { return std::make_tuple((Idx_Type)i, std::get<1>(f[i])); }); auto flgs_seq = pbbslib::make_delayed<bool>(size, [&](size_t i) { return std::get<0>(f[i]); }); return pbbslib::pack(id_seq, flgs_seq); } template <class Seq, class Compare> typename Seq::value_type kth_smallest(Seq const &s, size_t k, Compare less, random r = random()) { using T = typename Seq::value_type; size_t n = s.size(); T pivot = s[r[0] % n]; sequence<T> smaller = filter(s, [&](T a) { return less(a, pivot); }); if (k < smaller.size()) return kth_smallest(smaller, k, less, r.next()); else { sequence<T> larger = filter(s, [&](T a) { return less(pivot, a); }); if (k >= n - larger.size()) return kth_smallest(larger, k - n + larger.size(), less, r.next()); else return pivot; } } template <class Seq, class Compare> typename Seq::value_type approximate_kth_smallest(Seq const &S, size_t k, Compare less, random r = random()) { // raise exception if empty sequence? using T = typename Seq::value_type; size_t n = S.size(); size_t num_samples = n / sqrt(n); sequence<T> samples = sequence<T>::from_function(num_samples, [&](size_t i) -> T { return S[r[i] % n]; }); return sample_sort(make_slice(samples), less)[k * num_samples / n]; // kth_smallest(samples, k * num_samples / n, less); } template <class T, class Pred> inline size_t filter_seq(T* in, T* out, size_t n, Pred p) { size_t k = 0; for (size_t i = 0; i < n; i++) if (p(in[i])) out[k++] = in[i]; return k; } // Faster for a small number in output (about 40% or less) // Destroys the input. Does not need a bool array. template <class T, class PRED> inline size_t filterf(T* In, T* Out, size_t n, PRED p) { size_t b = _F_BSIZE; if (n < b) return filter_seq(In, Out, n, p); size_t l = num_blocks(n, b); auto Sums = sequence<size_t>::uninitialized(l + 1); parallel_for(0, l, [&] (size_t i) { size_t s = i * b; size_t e = std::min(s + b, n); size_t k = s; for (size_t j = s; j < e; j++) { if (p(In[j])) In[k++] = In[j]; } Sums[i] = k - s; }, 1); Sums[l] = 0; size_t m = scan_inplace(make_slice(Sums)); Sums[l] = m; parallel_for(0, l, [&] (size_t i) { T* I = In + i * b; T* O = Out + Sums[i]; for (size_t j = 0; j < Sums[i + 1] - Sums[i]; j++) { O[j] = I[j]; } }, 1); return m; } // Faster for a small number in output (about 40% or less) // Destroys the input. Does not need a bool array. template <class T, class PRED, class OUT> inline size_t filterf(T* In, size_t n, PRED p, OUT out, size_t out_off) { size_t b = _F_BSIZE; if (n < b) { size_t k = out_off; for (size_t i = 0; i < n; i++) { if (p(In[i])) out(k++, In[i]); } return k - out_off; } size_t l = num_blocks(n, b); auto Sums = sequence<size_t>::uninitialized(l + 1); parallel_for(0, l, [&] (size_t i) { size_t s = i * b; size_t e = std::min(s + b, n); size_t k = s; for (size_t j = s; j < e; j++) { if (p(In[j])) In[k++] = In[j]; } Sums[i] = k - s; }, 1); Sums[l] = 0; size_t m = scan_inplace(make_slice(Sums)); Sums[l] = m; parallel_for(0, l, [&] (size_t i) { T* I = In + i * b; size_t si = out_off + Sums[i]; for (size_t j = 0; j < Sums[i + 1] - Sums[i]; j++) { out(si + j, I[j]); } }, 1); return m; } template <class T, class PRED> inline size_t filterf_and_clear(T* In, T* Out, size_t n, PRED p, T& empty) { size_t b = _F_BSIZE; if (n < b) { size_t ret = filter_seq(In, Out, n, p); for (size_t i=0; i<n; i++) { if (p(In[i])) { In[i] = empty; } } return ret; } size_t l = num_blocks(n, b); b = num_blocks(n, l); auto Sums = sequence<size_t>::uninitialized(l + 1); parallel_for(0, l, [&] (size_t i) { size_t s = i * b; size_t e = std::min(s + b, n); size_t k = s; for (size_t j = s; j < e; j++) { if (p(In[j])) { In[k] = In[j]; if (k != j) { In[j] = empty; } k++; } } Sums[i] = k - s; }, 1); Sums[l] = 0; size_t m = scan_inplace(make_slice(Sums)); Sums[l] = m; parallel_for(0, l, [&] (size_t i) { T* I = In + (i * b); size_t i_off = Sums[i]; size_t num_i = Sums[i+1] - i_off; T* O = Out + i_off; for (size_t j = 0; j < num_i; j++) { O[j] = I[j]; I[j] = empty; } }, 1); return m; } template <class E, class I, class P> struct filter_iter { I& iter; P& pred; E cur_val; filter_iter(I& _it, P& _pr) : iter(_it), pred(_pr) { cur_val = iter.cur(); while (!pred(cur_val) && iter.has_next()) { cur_val = iter.next(); } } E cur() { return cur_val; } E next() { while (iter.has_next()) { cur_val = iter.next(); if (pred(cur_val)) { break; } } return cur_val; } // has_next }; template <class E, class I, class P> inline filter_iter<E, I, P> make_filter_iter(I& _it, P& _pr) { return filter_iter<E, I, P>(_it, _pr); } int t_to_stringlen(long a); void type_to_string(char* s, long a); int t_to_stringlen(unsigned long a); void type_to_string(char* s, unsigned long a); uint t_to_stringlen(uint a); void type_to_string(char* s, uint a); int t_to_stringlen(int a); void type_to_string(char* s, int a); int t_to_stringlen(double a); int t_to_stringlen(char* a); void type_to_string(char* s, char* a); void type_to_string(char* s, double a); template <class A, class B> inline int t_to_stringlen(std::pair<A, B> a) { return t_to_stringlen(a.first) + t_to_stringlen(a.second) + 1; } template <class A, class B> inline int t_to_stringlen(std::tuple<A, B> a) { return t_to_stringlen(std::get<0>(a)) + t_to_stringlen(std::get<1>(a)) + 1; } template <class A, class B, class C> inline int t_to_stringlen(std::tuple<A, B, C> a) { return t_to_stringlen(std::get<0>(a)) + t_to_stringlen(std::get<1>(a)) + t_to_stringlen(std::get<2>(a)) + 2; } template <class A, class B> inline void type_to_string(char* s, std::pair<A, B> a) { int l = t_to_stringlen(a.first); type_to_string(s, a.first); s[l] = ' '; type_to_string(s + l + 1, a.second); } template <class A, class B> inline void type_to_string(char* s, std::tuple<A, B> a) { int l = t_to_stringlen(std::get<0>(a)); type_to_string(s, std::get<0>(a)); s[l] = ' '; type_to_string(s + l + 1, std::get<1>(a)); } template <class A, class B, class C> inline void type_to_string(char* s, std::tuple<A, B, C> a) { int l = t_to_stringlen(std::get<0>(a)); type_to_string(s, std::get<0>(a)); s[l] = ' '; int l1 = t_to_stringlen(std::get<1>(a)); type_to_string(s + l + 1, std::get<1>(a)); s[l + l1 + 1] = ' '; type_to_string(s + l + l1 + 2, std::get<2>(a)); } template <class TSeq> sequence<char> sequence_to_string(TSeq const &T) { size_t n = T.size(); auto S = sequence<size_t>::from_function(n, [&] (size_t i) { return t_to_stringlen(T[i])+1; // +1 for \n }); size_t m = pbbslib::scan_inplace(make_slice(S), addm<size_t>()); auto C = sequence<char>::from_function(m, [&] (size_t i) { return (char)0; }); parallel_for(0, n-1, [&] (size_t i) { type_to_string(C.begin() + S[i], T[i]); C[S[i + 1] - 1] = '\n'; }); type_to_string(C.begin() + S[n - 1], T[n - 1]); C[m - 1] = '\n'; return pbbslib::filter(C, [&] (char A) { return A > 0; }); } }
target-4.c
#include <omp.h> #include <stdlib.h> int main () { omp_set_dynamic (0); #pragma omp parallel num_threads (4) #pragma omp target if (0) #pragma omp single if (omp_get_num_threads () != 1) abort (); return 0; }
simd.c
/* * OpenMP implementation of dot product calculation. * This program is used as the driving example in demos in the module Heterogeneous Programming with OpenMP * * @author Apan Qasem */ #include<stdio.h> #include<stdlib.h> #include<sys/time.h> #include <omp.h> #define REPS 10000 double t0; double mysecond() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } int main(int argc, char *argv[]) { int M = atoi(argv[1]); // size of vectors int N = atoi(argv[2]); // number of OpenMP threads float*a, *b; a = (float*) malloc(sizeof(float) * M); b = (float*) malloc(sizeof(float) * M); int i, j, k; for (i = 0; i < M; i++) { a[i] = i; b[i] = i + 3; } omp_set_num_threads(N); float sum = 0; t0 = mysecond(); for (j = 0; j < REPS; j++) { #pragma omp parallel for simd reduction(+:sum) for (i = k; i < M; i++) sum += a[i] * b[i]; } t0 = (mysecond() - t0) * 1.e3; fprintf(stdout, "result = %1.3e\n", sum); fprintf(stdout, "parallel loop = %3.2f ms\n", t0); return 0; }
9953.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp parallel for private(j) collapse(2) schedule(static, 1) num_threads(2) for (i = 1; i < _PB_NI - 1; ++i) { for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
update_ops_named_Z.c
#include "constant.h" #include "update_ops.h" #include "utility.h" #ifdef _OPENMP #include <omp.h> #endif #ifdef _USE_SIMD #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #endif //void Z_gate_old_single(UINT target_qubit_index, CTYPE *state, ITYPE dim); //void Z_gate_old_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim); //void Z_gate_single(UINT target_qubit_index, CTYPE *state, ITYPE dim); //void Z_gate_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim); void Z_gate(UINT target_qubit_index, CTYPE *state, ITYPE dim) { //Z_gate_old_single(target_qubit_index, state, dim); //Z_gate_old_parallel(target_qubit_index, state, dim); //Z_gate_single(target_qubit_index, state, dim); //Z_gate_single_simd(target_qubit_index, state, dim); //Z_gate_single_unroll(target_qubit_index, state, dim); //Z_gate_parallel(target_qubit_index, state, dim); //return; #ifdef _USE_SIMD #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { Z_gate_single_simd(target_qubit_index, state, dim); } else { Z_gate_parallel_simd(target_qubit_index, state, dim); } #else Z_gate_single_simd(target_qubit_index, state, dim); #endif #else #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { Z_gate_single_unroll(target_qubit_index, state, dim); } else { Z_gate_parallel_unroll(target_qubit_index, state, dim); } #else Z_gate_single_unroll(target_qubit_index, state, dim); #endif #endif } void Z_gate_single_unroll(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; if (target_qubit_index == 0) { for (state_index = 1; state_index < dim; state_index += 2) { state[state_index] *= -1; } } else { for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index = (state_index&mask_low) + ((state_index&mask_high) << 1) + mask; state[basis_index] *= -1; state[basis_index+1] *= -1; } } } #ifdef _OPENMP void Z_gate_parallel_unroll(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; if (target_qubit_index == 0) { #pragma omp parallel for for (state_index = 1; state_index < dim; state_index += 2) { state[state_index] *= -1; } } else { #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index = (state_index&mask_low) + ((state_index&mask_high) << 1) + mask; state[basis_index] *= -1; state[basis_index + 1] *= -1; } } } #endif #ifdef _USE_SIMD void Z_gate_single_simd(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; __m256d minus_one = _mm256_set_pd(-1,-1,-1,-1); if (target_qubit_index == 0) { for (state_index = 1; state_index < dim; state_index += 2) { state[state_index] *= -1; } } else { for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index = (state_index&mask_low) + ((state_index&mask_high) << 1) + mask; double* ptr0 = (double*)(state + basis_index); __m256d data0 = _mm256_loadu_pd(ptr0); data0 = _mm256_mul_pd(data0, minus_one); _mm256_storeu_pd(ptr0, data0); } } } #ifdef _OPENMP void Z_gate_parallel_simd(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; __m256d minus_one = _mm256_set_pd(-1, -1, -1, -1); if (target_qubit_index == 0) { #pragma omp parallel for for (state_index = 1; state_index < dim; state_index += 2) { state[state_index] *= -1; } } else { #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index = (state_index&mask_low) + ((state_index&mask_high) << 1) + mask; double* ptr0 = (double*)(state + basis_index); __m256d data0 = _mm256_loadu_pd(ptr0); data0 = _mm256_mul_pd(data0, minus_one); _mm256_storeu_pd(ptr0, data0); } } } #endif #endif /* void Z_gate_old_single(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; ITYPE state_index; ITYPE mask = (1ULL << target_qubit_index); for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE temp_index = insert_zero_to_basis_index(state_index, mask, target_qubit_index) ^ mask; state[temp_index] *= -1; } } void Z_gate_old_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; ITYPE state_index; ITYPE mask = (1ULL << target_qubit_index); #ifdef _OPENMP #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE temp_index = insert_zero_to_basis_index(state_index, mask, target_qubit_index) ^ mask; state[temp_index] *= -1; } } void Z_gate_single(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index = (state_index&mask_low) + ((state_index&mask_high) << 1) + mask; state[basis_index] *= -1; } } #ifdef _OPENMP void Z_gate_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index = (state_index&mask_low) + ((state_index&mask_high) << 1) + mask; state[basis_index] *= -1; } } #endif */
_implicit.c
/* Generated by Cython 0.24.1 */ /* BEGIN: Cython Metadata { "distutils": { "depends": [], "extra_compile_args": [ "-Wno-unused-function", "-O3", "-fopenmp", "-ffast-math", "-march=native" ], "extra_link_args": [ "-fopenmp" ], "language": "c" }, "module_name": "implicit._implicit" } END: Cython Metadata */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000) #error Cython requires Python 2.6+ or Python 3.2+. #else #define CYTHON_ABI "0_24_1" #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if !defined(CYTHON_USE_PYLONG_INTERNALS) && CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x02070000 #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #elif CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__implicit___implicit #define __PYX_HAVE_API__implicit___implicit #include "string.h" #include "stdlib.h" #include "pythread.h" #include "stdio.h" #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) && defined (_M_X64) #define __Pyx_sst_abs(value) _abs64(value) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) #if PY_MAJOR_VERSION < 3 static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #else #define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen #endif #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; /* None.proto */ #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include <complex> #else #include <complex.h> #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "implicit/_implicit.pyx", "stringsource", }; /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* Atomics.proto */ #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* "scipy/linalg/cython_lapack.pxd":15 * # The original libraries should be linked directly. * * ctypedef float s # <<<<<<<<<<<<<< * ctypedef double d * ctypedef float complex c */ typedef float __pyx_t_5scipy_6linalg_13cython_lapack_s; /* "scipy/linalg/cython_lapack.pxd":16 * * ctypedef float s * ctypedef double d # <<<<<<<<<<<<<< * ctypedef float complex c * ctypedef double complex z */ typedef double __pyx_t_5scipy_6linalg_13cython_lapack_d; /* "scipy/linalg/cython_blas.pxd":15 * # The original libraries should be linked directly. * * ctypedef float s # <<<<<<<<<<<<<< * ctypedef double d * ctypedef float complex c */ typedef float __pyx_t_5scipy_6linalg_11cython_blas_s; /* "scipy/linalg/cython_blas.pxd":16 * * ctypedef float s * ctypedef double d # <<<<<<<<<<<<<< * ctypedef float complex c * ctypedef double complex z */ typedef double __pyx_t_5scipy_6linalg_11cython_blas_d; /* None.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif /* None.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; /* "scipy/linalg/cython_lapack.pxd":22 * # Function pointer type declarations for * # gees and gges families of functions. * ctypedef bint cselect1(c*) # <<<<<<<<<<<<<< * ctypedef bint cselect2(c*, c*) * ctypedef bint dselect2(d*, d*) */ typedef int __pyx_t_5scipy_6linalg_13cython_lapack_cselect1(__pyx_t_float_complex *); /* "scipy/linalg/cython_lapack.pxd":23 * # gees and gges families of functions. * ctypedef bint cselect1(c*) * ctypedef bint cselect2(c*, c*) # <<<<<<<<<<<<<< * ctypedef bint dselect2(d*, d*) * ctypedef bint dselect3(d*, d*, d*) */ typedef int __pyx_t_5scipy_6linalg_13cython_lapack_cselect2(__pyx_t_float_complex *, __pyx_t_float_complex *); /* "scipy/linalg/cython_lapack.pxd":24 * ctypedef bint cselect1(c*) * ctypedef bint cselect2(c*, c*) * ctypedef bint dselect2(d*, d*) # <<<<<<<<<<<<<< * ctypedef bint dselect3(d*, d*, d*) * ctypedef bint sselect2(s*, s*) */ typedef int __pyx_t_5scipy_6linalg_13cython_lapack_dselect2(__pyx_t_5scipy_6linalg_13cython_lapack_d *, __pyx_t_5scipy_6linalg_13cython_lapack_d *); /* "scipy/linalg/cython_lapack.pxd":25 * ctypedef bint cselect2(c*, c*) * ctypedef bint dselect2(d*, d*) * ctypedef bint dselect3(d*, d*, d*) # <<<<<<<<<<<<<< * ctypedef bint sselect2(s*, s*) * ctypedef bint sselect3(s*, s*, s*) */ typedef int __pyx_t_5scipy_6linalg_13cython_lapack_dselect3(__pyx_t_5scipy_6linalg_13cython_lapack_d *, __pyx_t_5scipy_6linalg_13cython_lapack_d *, __pyx_t_5scipy_6linalg_13cython_lapack_d *); /* "scipy/linalg/cython_lapack.pxd":26 * ctypedef bint dselect2(d*, d*) * ctypedef bint dselect3(d*, d*, d*) * ctypedef bint sselect2(s*, s*) # <<<<<<<<<<<<<< * ctypedef bint sselect3(s*, s*, s*) * ctypedef bint zselect1(z*) */ typedef int __pyx_t_5scipy_6linalg_13cython_lapack_sselect2(__pyx_t_5scipy_6linalg_13cython_lapack_s *, __pyx_t_5scipy_6linalg_13cython_lapack_s *); /* "scipy/linalg/cython_lapack.pxd":27 * ctypedef bint dselect3(d*, d*, d*) * ctypedef bint sselect2(s*, s*) * ctypedef bint sselect3(s*, s*, s*) # <<<<<<<<<<<<<< * ctypedef bint zselect1(z*) * ctypedef bint zselect2(z*, z*) */ typedef int __pyx_t_5scipy_6linalg_13cython_lapack_sselect3(__pyx_t_5scipy_6linalg_13cython_lapack_s *, __pyx_t_5scipy_6linalg_13cython_lapack_s *, __pyx_t_5scipy_6linalg_13cython_lapack_s *); /* "scipy/linalg/cython_lapack.pxd":28 * ctypedef bint sselect2(s*, s*) * ctypedef bint sselect3(s*, s*, s*) * ctypedef bint zselect1(z*) # <<<<<<<<<<<<<< * ctypedef bint zselect2(z*, z*) * */ typedef int __pyx_t_5scipy_6linalg_13cython_lapack_zselect1(__pyx_t_double_complex *); /* "scipy/linalg/cython_lapack.pxd":29 * ctypedef bint sselect3(s*, s*, s*) * ctypedef bint zselect1(z*) * ctypedef bint zselect2(z*, z*) # <<<<<<<<<<<<<< * * cdef void cbdsqr(char *uplo, int *n, int *ncvt, int *nru, int *ncc, s *d, s *e, c *vt, int *ldvt, c *u, int *ldu, c *c, int *ldc, s *rwork, int *info) nogil */ typedef int __pyx_t_5scipy_6linalg_13cython_lapack_zselect2(__pyx_t_double_complex *, __pyx_t_double_complex *); /* "View.MemoryView":103 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":275 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":326 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":951 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":103 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":326 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":951 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* PyThreadStateGet.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = PyThreadState_GET(); #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #endif /* SaveResetException.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* PyErrExceptionMatches.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetException.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* PyDictContains.proto */ static CYTHON_INLINE int __Pyx_PyDict_ContainsTF(PyObject* item, PyObject* dict, int eq) { int result = PyDict_Contains(dict, item); return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); } /* DictGetItem.proto */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { PyObject *value; value = PyDict_GetItemWithError(d, key); if (unlikely(!value)) { if (!PyErr_Occurred()) { PyObject* args = PyTuple_Pack(1, key); if (likely(args)) PyErr_SetObject(PyExc_KeyError, args); Py_XDECREF(args); } return NULL; } Py_INCREF(value); return value; } #else #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) #endif /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyErrFetchRestore.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* UnicodeAsUCS4.proto */ static CYTHON_INLINE Py_UCS4 __Pyx_PyUnicode_AsPy_UCS4(PyObject*); /* object_ord.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyObject_Ord(c)\ (likely(PyUnicode_Check(c)) ? (long)__Pyx_PyUnicode_AsPy_UCS4(c) : __Pyx__PyObject_Ord(c)) #else #define __Pyx_PyObject_Ord(c) __Pyx__PyObject_Ord(c) #endif static long __Pyx__PyObject_Ord(PyObject* c); /* SetItemInt.proto */ #define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) :\ __Pyx_SetItemInt_Generic(o, to_py_func(i), v))) static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v); static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list, int wraparound, int boundscheck); /* IterFinish.proto */ static CYTHON_INLINE int __Pyx_IterFinish(void); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallNoArg.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); #else #define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* PyObjectCallMethod0.proto */ static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* UnpackItemEndCheck.proto */ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* UnpackTupleError.proto */ static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); /* UnpackTuple2.proto */ static CYTHON_INLINE int __Pyx_unpack_tuple2(PyObject* tuple, PyObject** value1, PyObject** value2, int is_tuple, int has_known_size, int decref_tuple); /* dict_iter.proto */ static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* dict, int is_dict, PyObject* method_name, Py_ssize_t* p_orig_length, int* p_is_dict); static CYTHON_INLINE int __Pyx_dict_iter_next(PyObject* dict_or_iter, Py_ssize_t orig_length, Py_ssize_t* ppos, PyObject** pkey, PyObject** pvalue, PyObject** pitem, int is_dict); /* ListAppend.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* GetModuleGlobalName.proto */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* SwapException.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* BufferFormatCheck.proto */ static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); // PROTO /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* ArgTypeTest.proto */ static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /* IncludeStringH.proto */ #include <string.h> /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* None.proto */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListCompAppend.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyIntBinop.proto */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* None.proto */ static CYTHON_INLINE long __Pyx_div_long(long, long); /* WriteUnraisableException.proto */ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback, int nogil); /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* FetchCommonType.proto */ static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type); /* CythonFunction.proto */ #define __Pyx_CyFunction_USED 1 #include <structmember.h> #define __Pyx_CYFUNCTION_STATICMETHOD 0x01 #define __Pyx_CYFUNCTION_CLASSMETHOD 0x02 #define __Pyx_CYFUNCTION_CCLASS 0x04 #define __Pyx_CyFunction_GetClosure(f)\ (((__pyx_CyFunctionObject *) (f))->func_closure) #define __Pyx_CyFunction_GetClassObj(f)\ (((__pyx_CyFunctionObject *) (f))->func_classobj) #define __Pyx_CyFunction_Defaults(type, f)\ ((type *)(((__pyx_CyFunctionObject *) (f))->defaults)) #define __Pyx_CyFunction_SetDefaultsGetter(f, g)\ ((__pyx_CyFunctionObject *) (f))->defaults_getter = (g) typedef struct { PyCFunctionObject func; #if PY_VERSION_HEX < 0x030500A0 PyObject *func_weakreflist; #endif PyObject *func_dict; PyObject *func_name; PyObject *func_qualname; PyObject *func_doc; PyObject *func_globals; PyObject *func_code; PyObject *func_closure; PyObject *func_classobj; void *defaults; int defaults_pyobjects; int flags; PyObject *defaults_tuple; PyObject *defaults_kwdict; PyObject *(*defaults_getter)(PyObject *); PyObject *func_annotations; } __pyx_CyFunctionObject; static PyTypeObject *__pyx_CyFunctionType = 0; #define __Pyx_CyFunction_NewEx(ml, flags, qualname, self, module, globals, code)\ __Pyx_CyFunction_New(__pyx_CyFunctionType, ml, flags, qualname, self, module, globals, code) static PyObject *__Pyx_CyFunction_New(PyTypeObject *, PyMethodDef *ml, int flags, PyObject* qualname, PyObject *self, PyObject *module, PyObject *globals, PyObject* code); static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m, size_t size, int pyobjects); static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m, PyObject *tuple); static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m, PyObject *dict); static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m, PyObject *dict); static int __pyx_CyFunction_init(void); /* FusedFunction.proto */ typedef struct { __pyx_CyFunctionObject func; PyObject *__signatures__; PyObject *type; PyObject *self; } __pyx_FusedFunctionObject; #define __pyx_FusedFunction_NewEx(ml, flags, qualname, self, module, globals, code)\ __pyx_FusedFunction_New(__pyx_FusedFunctionType, ml, flags, qualname, self, module, globals, code) static PyObject *__pyx_FusedFunction_New(PyTypeObject *type, PyMethodDef *ml, int flags, PyObject *qualname, PyObject *self, PyObject *module, PyObject *globals, PyObject *code); static int __pyx_FusedFunction_clear(__pyx_FusedFunctionObject *self); static PyTypeObject *__pyx_FusedFunctionType = NULL; static int __pyx_FusedFunction_init(void); #define __Pyx_FusedFunction_USED /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* None.proto */ static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0}; static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1}; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_float(PyObject *); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* BytesContains.proto */ static CYTHON_INLINE int __Pyx_BytesContains(PyObject* bytes, char character); /* MemviewDtypeToObject.proto */ static CYTHON_INLINE PyObject *__pyx_memview_get_float(const char *itemp); static CYTHON_INLINE int __pyx_memview_set_float(const char *itemp, PyObject *obj); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* MemviewDtypeToObject.proto */ static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp); static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj); /* None.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(__cplusplus) && CYTHON_CCOMPLEX && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif /* None.proto */ static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); /* None.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif /* None.proto */ static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); /* None.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_int(PyObject *); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_double(PyObject *); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_float(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* PyIdentifierFromString.proto */ #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif /* ModuleImport.proto */ static PyObject *__Pyx_ImportModule(const char *name); /* FunctionImport.proto */ static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'cython.view' */ /* Module declarations from 'cython' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'scipy.linalg.cython_lapack' */ static void (*__pyx_f_5scipy_6linalg_13cython_lapack_dgesv)(int *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_d *, int *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_d *, int *, int *); /*proto*/ static void (*__pyx_f_5scipy_6linalg_13cython_lapack_dposv)(char *, int *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_d *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_d *, int *, int *); /*proto*/ static void (*__pyx_f_5scipy_6linalg_13cython_lapack_sgesv)(int *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_s *, int *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_s *, int *, int *); /*proto*/ static void (*__pyx_f_5scipy_6linalg_13cython_lapack_sposv)(char *, int *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_s *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_s *, int *, int *); /*proto*/ /* Module declarations from 'scipy.linalg.cython_blas' */ static void (*__pyx_f_5scipy_6linalg_11cython_blas_daxpy)(int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *); /*proto*/ static void (*__pyx_f_5scipy_6linalg_11cython_blas_saxpy)(int *, __pyx_t_5scipy_6linalg_11cython_blas_s *, __pyx_t_5scipy_6linalg_11cython_blas_s *, int *, __pyx_t_5scipy_6linalg_11cython_blas_s *, int *); /*proto*/ /* Module declarations from 'implicit._implicit' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static CYTHON_INLINE void __pyx_fuse_0__pyx_f_8implicit_9_implicit_axpy(int *, float *, float *, int *, float *, int *); /*proto*/ static CYTHON_INLINE void __pyx_fuse_1__pyx_f_8implicit_9_implicit_axpy(int *, double *, double *, int *, double *, int *); /*proto*/ static CYTHON_INLINE void __pyx_fuse_0__pyx_f_8implicit_9_implicit_posv(char *, int *, int *, float *, int *, float *, int *, int *); /*proto*/ static CYTHON_INLINE void __pyx_fuse_1__pyx_f_8implicit_9_implicit_posv(char *, int *, int *, double *, int *, double *, int *, int *); /*proto*/ static CYTHON_INLINE void __pyx_fuse_0__pyx_f_8implicit_9_implicit_gesv(int *, int *, float *, int *, int *, float *, int *, int *); /*proto*/ static CYTHON_INLINE void __pyx_fuse_1__pyx_f_8implicit_9_implicit_gesv(int *, int *, double *, int *, int *, double *, int *, int *); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 }; #define __Pyx_MODULE_NAME "implicit._implicit" int __pyx_module_is_main_implicit___implicit = 0; /* Implementation of 'implicit._implicit' */ static PyObject *__pyx_builtin_ImportError; static PyObject *__pyx_builtin_AttributeError; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_zip; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_[] = "()"; static const char __pyx_k_A[] = "A"; static const char __pyx_k_O[] = "O"; static const char __pyx_k_X[] = "X"; static const char __pyx_k_Y[] = "Y"; static const char __pyx_k_b[] = "b"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_i[] = "i"; static const char __pyx_k_j[] = "j"; static const char __pyx_k_u[] = "u"; static const char __pyx_k__3[] = "|"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_Cui[] = "Cui"; static const char __pyx_k_YtY[] = "YtY"; static const char __pyx_k_dot[] = "dot"; static const char __pyx_k_err[] = "err"; static const char __pyx_k_eye[] = "eye"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_one[] = "one"; static const char __pyx_k_zip[] = "zip"; static const char __pyx_k_args[] = "args"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_data[] = "data"; static const char __pyx_k_kind[] = "kind"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_temp[] = "temp"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_dtype[] = "dtype"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_float[] = "float"; static const char __pyx_k_index[] = "index"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_pivot[] = "pivot"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_split[] = "split"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_strip[] = "strip"; static const char __pyx_k_users[] = "users"; static const char __pyx_k_zeros[] = "zeros"; static const char __pyx_k_double[] = "double"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_indptr[] = "indptr"; static const char __pyx_k_kwargs[] = "kwargs"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_factors[] = "factors"; static const char __pyx_k_float32[] = "float32"; static const char __pyx_k_float64[] = "float64"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_indices[] = "indices"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_ndarray[] = "ndarray"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_defaults[] = "defaults"; static const char __pyx_k_initialA[] = "initialA"; static const char __pyx_k_initialB[] = "initialB"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_transpose[] = "transpose"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_confidence[] = "confidence"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_signatures[] = "signatures"; static const char __pyx_k_ImportError[] = "ImportError"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_num_threads[] = "num_threads"; static const char __pyx_k_least_squares[] = "least_squares"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_AttributeError[] = "AttributeError"; static const char __pyx_k_regularization[] = "regularization"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_implicit__implicit[] = "implicit._implicit"; static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_No_matching_signature_found[] = "No matching signature found"; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_Expected_at_least_d_arguments[] = "Expected at least %d arguments"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_Singular_matrix_err_i_on_row_i[] = "Singular matrix (err=%i) on row %i"; static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static const char __pyx_k_home_escherba_dev_vevo_tyrion_e[] = "/home/escherba/dev/vevo/tyrion/env/src/implicit/implicit/_implicit.pyx"; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Function_call_with_ambiguous_arg[] = "Function call with ambiguous argument types"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_kp_s_; static PyObject *__pyx_n_s_A; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_n_s_AttributeError; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_Cui; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_kp_s_Expected_at_least_d_arguments; static PyObject *__pyx_kp_s_Function_call_with_ambiguous_arg; static PyObject *__pyx_n_s_ImportError; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_kp_s_No_matching_signature_found; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_kp_s_Singular_matrix_err_i_on_row_i; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_X; static PyObject *__pyx_n_s_Y; static PyObject *__pyx_n_s_YtY; static PyObject *__pyx_kp_s__3; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_args; static PyObject *__pyx_n_s_b; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_confidence; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_data; static PyObject *__pyx_n_s_defaults; static PyObject *__pyx_n_s_dot; static PyObject *__pyx_n_s_double; static PyObject *__pyx_n_s_dtype; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_err; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_eye; static PyObject *__pyx_n_s_factors; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_float; static PyObject *__pyx_n_s_float32; static PyObject *__pyx_n_s_float64; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_kp_s_home_escherba_dev_vevo_tyrion_e; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_implicit__implicit; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_index; static PyObject *__pyx_n_s_indices; static PyObject *__pyx_n_s_indptr; static PyObject *__pyx_n_s_initialA; static PyObject *__pyx_n_s_initialB; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_j; static PyObject *__pyx_n_s_kind; static PyObject *__pyx_n_s_kwargs; static PyObject *__pyx_n_s_least_squares; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndarray; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_num_threads; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_one; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_pivot; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_regularization; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_signatures; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_split; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_n_s_strip; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_temp; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_transpose; static PyObject *__pyx_n_s_u; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_users; static PyObject *__pyx_n_s_zeros; static PyObject *__pyx_n_s_zip; static PyObject *__pyx_pf_8implicit_9_implicit_least_squares(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_signatures, PyObject *__pyx_v_args, PyObject *__pyx_v_kwargs, CYTHON_UNUSED PyObject *__pyx_v_defaults); /* proto */ static PyObject *__pyx_pf_8implicit_9_implicit_2least_squares(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_Cui, __Pyx_memviewslice __pyx_v_X, __Pyx_memviewslice __pyx_v_Y, double __pyx_v_regularization, int __pyx_v_num_threads); /* proto */ static PyObject *__pyx_pf_8implicit_9_implicit_4least_squares(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_Cui, __Pyx_memviewslice __pyx_v_X, __Pyx_memviewslice __pyx_v_Y, double __pyx_v_regularization, int __pyx_v_num_threads); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__16; static PyObject *__pyx_slice__17; static PyObject *__pyx_slice__18; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__15; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__20; static PyObject *__pyx_tuple__22; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__24; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__26; static PyObject *__pyx_codeobj__21; /* "implicit/_implicit.pyx":13 * * # lapack/blas wrappers for cython fused types * cdef inline void axpy(int * n, floating * da, floating * dx, int * incx, floating * dy, int * incy) nogil: # <<<<<<<<<<<<<< * if floating is double: * cython_blas.daxpy(n, da, dx, incx, dy, incy) */ static CYTHON_INLINE void __pyx_fuse_0__pyx_f_8implicit_9_implicit_axpy(int *__pyx_v_n, float *__pyx_v_da, float *__pyx_v_dx, int *__pyx_v_incx, float *__pyx_v_dy, int *__pyx_v_incy) { /* "implicit/_implicit.pyx":17 * cython_blas.daxpy(n, da, dx, incx, dy, incy) * else: * cython_blas.saxpy(n, da, dx, incx, dy, incy) # <<<<<<<<<<<<<< * * cdef inline void posv(char * u, int * n, int * nrhs, floating * a, int * lda, floating * b, int * ldb, int * info) nogil: */ __pyx_f_5scipy_6linalg_11cython_blas_saxpy(__pyx_v_n, __pyx_v_da, __pyx_v_dx, __pyx_v_incx, __pyx_v_dy, __pyx_v_incy); /* "implicit/_implicit.pyx":13 * * # lapack/blas wrappers for cython fused types * cdef inline void axpy(int * n, floating * da, floating * dx, int * incx, floating * dy, int * incy) nogil: # <<<<<<<<<<<<<< * if floating is double: * cython_blas.daxpy(n, da, dx, incx, dy, incy) */ /* function exit code */ } static CYTHON_INLINE void __pyx_fuse_1__pyx_f_8implicit_9_implicit_axpy(int *__pyx_v_n, double *__pyx_v_da, double *__pyx_v_dx, int *__pyx_v_incx, double *__pyx_v_dy, int *__pyx_v_incy) { /* "implicit/_implicit.pyx":15 * cdef inline void axpy(int * n, floating * da, floating * dx, int * incx, floating * dy, int * incy) nogil: * if floating is double: * cython_blas.daxpy(n, da, dx, incx, dy, incy) # <<<<<<<<<<<<<< * else: * cython_blas.saxpy(n, da, dx, incx, dy, incy) */ __pyx_f_5scipy_6linalg_11cython_blas_daxpy(__pyx_v_n, __pyx_v_da, __pyx_v_dx, __pyx_v_incx, __pyx_v_dy, __pyx_v_incy); /* "implicit/_implicit.pyx":13 * * # lapack/blas wrappers for cython fused types * cdef inline void axpy(int * n, floating * da, floating * dx, int * incx, floating * dy, int * incy) nogil: # <<<<<<<<<<<<<< * if floating is double: * cython_blas.daxpy(n, da, dx, incx, dy, incy) */ /* function exit code */ } /* "implicit/_implicit.pyx":19 * cython_blas.saxpy(n, da, dx, incx, dy, incy) * * cdef inline void posv(char * u, int * n, int * nrhs, floating * a, int * lda, floating * b, int * ldb, int * info) nogil: # <<<<<<<<<<<<<< * if floating is double: * cython_lapack.dposv(u, n, nrhs, a, lda, b, ldb, info) */ static CYTHON_INLINE void __pyx_fuse_0__pyx_f_8implicit_9_implicit_posv(char *__pyx_v_u, int *__pyx_v_n, int *__pyx_v_nrhs, float *__pyx_v_a, int *__pyx_v_lda, float *__pyx_v_b, int *__pyx_v_ldb, int *__pyx_v_info) { /* "implicit/_implicit.pyx":23 * cython_lapack.dposv(u, n, nrhs, a, lda, b, ldb, info) * else: * cython_lapack.sposv(u, n, nrhs, a, lda, b, ldb, info) # <<<<<<<<<<<<<< * * cdef inline void gesv(int * n, int * nrhs, floating * a, int * lda, int * piv, floating * b, int * ldb, int * info) nogil: */ __pyx_f_5scipy_6linalg_13cython_lapack_sposv(__pyx_v_u, __pyx_v_n, __pyx_v_nrhs, __pyx_v_a, __pyx_v_lda, __pyx_v_b, __pyx_v_ldb, __pyx_v_info); /* "implicit/_implicit.pyx":19 * cython_blas.saxpy(n, da, dx, incx, dy, incy) * * cdef inline void posv(char * u, int * n, int * nrhs, floating * a, int * lda, floating * b, int * ldb, int * info) nogil: # <<<<<<<<<<<<<< * if floating is double: * cython_lapack.dposv(u, n, nrhs, a, lda, b, ldb, info) */ /* function exit code */ } static CYTHON_INLINE void __pyx_fuse_1__pyx_f_8implicit_9_implicit_posv(char *__pyx_v_u, int *__pyx_v_n, int *__pyx_v_nrhs, double *__pyx_v_a, int *__pyx_v_lda, double *__pyx_v_b, int *__pyx_v_ldb, int *__pyx_v_info) { /* "implicit/_implicit.pyx":21 * cdef inline void posv(char * u, int * n, int * nrhs, floating * a, int * lda, floating * b, int * ldb, int * info) nogil: * if floating is double: * cython_lapack.dposv(u, n, nrhs, a, lda, b, ldb, info) # <<<<<<<<<<<<<< * else: * cython_lapack.sposv(u, n, nrhs, a, lda, b, ldb, info) */ __pyx_f_5scipy_6linalg_13cython_lapack_dposv(__pyx_v_u, __pyx_v_n, __pyx_v_nrhs, __pyx_v_a, __pyx_v_lda, __pyx_v_b, __pyx_v_ldb, __pyx_v_info); /* "implicit/_implicit.pyx":19 * cython_blas.saxpy(n, da, dx, incx, dy, incy) * * cdef inline void posv(char * u, int * n, int * nrhs, floating * a, int * lda, floating * b, int * ldb, int * info) nogil: # <<<<<<<<<<<<<< * if floating is double: * cython_lapack.dposv(u, n, nrhs, a, lda, b, ldb, info) */ /* function exit code */ } /* "implicit/_implicit.pyx":25 * cython_lapack.sposv(u, n, nrhs, a, lda, b, ldb, info) * * cdef inline void gesv(int * n, int * nrhs, floating * a, int * lda, int * piv, floating * b, int * ldb, int * info) nogil: # <<<<<<<<<<<<<< * if floating is double: * cython_lapack.dgesv(n, nrhs, a, lda, piv, b, ldb, info) */ static CYTHON_INLINE void __pyx_fuse_0__pyx_f_8implicit_9_implicit_gesv(int *__pyx_v_n, int *__pyx_v_nrhs, float *__pyx_v_a, int *__pyx_v_lda, int *__pyx_v_piv, float *__pyx_v_b, int *__pyx_v_ldb, int *__pyx_v_info) { /* "implicit/_implicit.pyx":29 * cython_lapack.dgesv(n, nrhs, a, lda, piv, b, ldb, info) * else: * cython_lapack.sgesv(n, nrhs, a, lda, piv, b, ldb, info) # <<<<<<<<<<<<<< * * */ __pyx_f_5scipy_6linalg_13cython_lapack_sgesv(__pyx_v_n, __pyx_v_nrhs, __pyx_v_a, __pyx_v_lda, __pyx_v_piv, __pyx_v_b, __pyx_v_ldb, __pyx_v_info); /* "implicit/_implicit.pyx":25 * cython_lapack.sposv(u, n, nrhs, a, lda, b, ldb, info) * * cdef inline void gesv(int * n, int * nrhs, floating * a, int * lda, int * piv, floating * b, int * ldb, int * info) nogil: # <<<<<<<<<<<<<< * if floating is double: * cython_lapack.dgesv(n, nrhs, a, lda, piv, b, ldb, info) */ /* function exit code */ } static CYTHON_INLINE void __pyx_fuse_1__pyx_f_8implicit_9_implicit_gesv(int *__pyx_v_n, int *__pyx_v_nrhs, double *__pyx_v_a, int *__pyx_v_lda, int *__pyx_v_piv, double *__pyx_v_b, int *__pyx_v_ldb, int *__pyx_v_info) { /* "implicit/_implicit.pyx":27 * cdef inline void gesv(int * n, int * nrhs, floating * a, int * lda, int * piv, floating * b, int * ldb, int * info) nogil: * if floating is double: * cython_lapack.dgesv(n, nrhs, a, lda, piv, b, ldb, info) # <<<<<<<<<<<<<< * else: * cython_lapack.sgesv(n, nrhs, a, lda, piv, b, ldb, info) */ __pyx_f_5scipy_6linalg_13cython_lapack_dgesv(__pyx_v_n, __pyx_v_nrhs, __pyx_v_a, __pyx_v_lda, __pyx_v_piv, __pyx_v_b, __pyx_v_ldb, __pyx_v_info); /* "implicit/_implicit.pyx":25 * cython_lapack.sposv(u, n, nrhs, a, lda, b, ldb, info) * * cdef inline void gesv(int * n, int * nrhs, floating * a, int * lda, int * piv, floating * b, int * ldb, int * info) nogil: # <<<<<<<<<<<<<< * if floating is double: * cython_lapack.dgesv(n, nrhs, a, lda, piv, b, ldb, info) */ /* function exit code */ } /* "implicit/_implicit.pyx":33 * * @cython.boundscheck(False) * def least_squares(Cui, floating [:, :] X, floating [:, :] Y, double regularization, int num_threads): # <<<<<<<<<<<<<< * dtype = numpy.float64 if floating is double else numpy.float32 * */ /* Python wrapper */ static PyObject *__pyx_pw_8implicit_9_implicit_1least_squares(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_8implicit_9_implicit_1least_squares = {"least_squares", (PyCFunction)__pyx_pw_8implicit_9_implicit_1least_squares, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_8implicit_9_implicit_1least_squares(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_signatures = 0; PyObject *__pyx_v_args = 0; PyObject *__pyx_v_kwargs = 0; CYTHON_UNUSED PyObject *__pyx_v_defaults = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_fused_cpdef (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_signatures,&__pyx_n_s_args,&__pyx_n_s_kwargs,&__pyx_n_s_defaults,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_signatures)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_args)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, 1); __PYX_ERR(0, 33, __pyx_L3_error) } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_kwargs)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, 2); __PYX_ERR(0, 33, __pyx_L3_error) } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_defaults)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, 3); __PYX_ERR(0, 33, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_fused_cpdef") < 0)) __PYX_ERR(0, 33, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_signatures = values[0]; __pyx_v_args = values[1]; __pyx_v_kwargs = values[2]; __pyx_v_defaults = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 33, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("implicit._implicit.__pyx_fused_cpdef", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_8implicit_9_implicit_least_squares(__pyx_self, __pyx_v_signatures, __pyx_v_args, __pyx_v_kwargs, __pyx_v_defaults); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8implicit_9_implicit_least_squares(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_signatures, PyObject *__pyx_v_args, PyObject *__pyx_v_kwargs, CYTHON_UNUSED PyObject *__pyx_v_defaults) { PyObject *__pyx_v_dest_sig = NULL; PyTypeObject *__pyx_v_ndarray = 0; PyObject *__pyx_v_numpy = NULL; __Pyx_memviewslice __pyx_v_memslice; Py_ssize_t __pyx_v_itemsize; CYTHON_UNUSED int __pyx_v_dtype_signed; char __pyx_v_kind; PyObject *__pyx_v_arg = NULL; PyObject *__pyx_v_dtype = NULL; PyObject *__pyx_v_arg_base = NULL; PyObject *__pyx_v_candidates = NULL; PyObject *__pyx_v_sig = NULL; int __pyx_v_match_found; PyObject *__pyx_v_src_type = NULL; PyObject *__pyx_v_dst_type = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; long __pyx_t_11; Py_ssize_t __pyx_t_12; int __pyx_t_13; Py_ssize_t __pyx_t_14; PyObject *(*__pyx_t_15)(PyObject *); PyObject *__pyx_t_16 = NULL; PyObject *__pyx_t_17 = NULL; PyObject *__pyx_t_18 = NULL; PyObject *(*__pyx_t_19)(PyObject *); int __pyx_t_20; __Pyx_RefNannySetupContext("least_squares", 0); __Pyx_INCREF(__pyx_v_kwargs); __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyList_SET_ITEM(__pyx_t_1, 0, Py_None); __pyx_v_dest_sig = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; __pyx_t_2 = (__pyx_v_kwargs == Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF_SET(__pyx_v_kwargs, __pyx_t_1); __pyx_t_1 = 0; } { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_4, &__pyx_t_5, &__pyx_t_6); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_6); /*try:*/ { __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_numpy = __pyx_t_1; __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_numpy, __pyx_n_s_ndarray); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(PyType_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "type", Py_TYPE(__pyx_t_1)->tp_name), 0))) __PYX_ERR(0, 33, __pyx_L4_error) __pyx_v_ndarray = ((PyTypeObject*)__pyx_t_1); __pyx_t_1 = 0; } __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L11_try_end; __pyx_L4_error:; __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ImportError) || __Pyx_PyErr_ExceptionMatches(__pyx_builtin_AttributeError) || __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_7) { __Pyx_AddTraceback("implicit._implicit.__pyx_fused_cpdef", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_8, &__pyx_t_9) < 0) __PYX_ERR(0, 33, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(Py_None); __Pyx_XDECREF_SET(__pyx_v_ndarray, ((PyTypeObject*)Py_None)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L5_exception_handled; } goto __pyx_L6_except_error; __pyx_L6_except_error:; __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_6); __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6); goto __pyx_L1_error; __pyx_L5_exception_handled:; __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_6); __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6); __pyx_L11_try_end:; } __pyx_v_itemsize = -1L; if (unlikely(__pyx_v_args == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(0, 33, __pyx_L1_error) } __pyx_t_10 = PyTuple_GET_SIZE(((PyObject*)__pyx_v_args)); if (unlikely(__pyx_t_10 == -1)) __PYX_ERR(0, 33, __pyx_L1_error) __pyx_t_3 = ((1 < __pyx_t_10) != 0); if (__pyx_t_3) { if (unlikely(__pyx_v_args == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(0, 33, __pyx_L1_error) } __pyx_t_9 = PyTuple_GET_ITEM(((PyObject*)__pyx_v_args), 1); __Pyx_INCREF(__pyx_t_9); __pyx_v_arg = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L14; } if (unlikely(__pyx_v_kwargs == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); __PYX_ERR(0, 33, __pyx_L1_error) } __pyx_t_3 = (__Pyx_PyDict_ContainsTF(__pyx_n_s_X, ((PyObject*)__pyx_v_kwargs), Py_EQ)); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 33, __pyx_L1_error) __pyx_t_2 = (__pyx_t_3 != 0); if (__pyx_t_2) { if (unlikely(__pyx_v_kwargs == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(0, 33, __pyx_L1_error) } __pyx_t_9 = __Pyx_PyDict_GetItem(((PyObject*)__pyx_v_kwargs), __pyx_n_s_X); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_v_arg = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L14; } /*else*/ { if (unlikely(__pyx_v_args == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(0, 33, __pyx_L1_error) } __pyx_t_10 = PyTuple_GET_SIZE(((PyObject*)__pyx_v_args)); if (unlikely(__pyx_t_10 == -1)) __PYX_ERR(0, 33, __pyx_L1_error) __pyx_t_9 = PyInt_FromSsize_t(__pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_8 = __Pyx_PyString_Format(__pyx_kp_s_Expected_at_least_d_arguments, __pyx_t_9); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_t_9, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(0, 33, __pyx_L1_error) } __pyx_L14:; while (1) { __pyx_t_2 = (__pyx_v_ndarray != ((PyTypeObject*)Py_None)); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { __pyx_t_3 = __Pyx_TypeCheck(__pyx_v_arg, __pyx_v_ndarray); __pyx_t_2 = (__pyx_t_3 != 0); if (__pyx_t_2) { __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_dtype); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_v_dtype = __pyx_t_8; __pyx_t_8 = 0; goto __pyx_L18; } __pyx_t_2 = (__pyx_memoryview_check(__pyx_v_arg) != 0); if (__pyx_t_2) { __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_base); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_v_arg_base = __pyx_t_8; __pyx_t_8 = 0; __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_arg_base, __pyx_v_ndarray); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg_base, __pyx_n_s_dtype); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_v_dtype = __pyx_t_8; __pyx_t_8 = 0; goto __pyx_L19; } /*else*/ { __Pyx_INCREF(Py_None); __pyx_v_dtype = Py_None; } __pyx_L19:; goto __pyx_L18; } /*else*/ { __Pyx_INCREF(Py_None); __pyx_v_dtype = Py_None; } __pyx_L18:; __pyx_v_itemsize = -1L; __pyx_t_3 = (__pyx_v_dtype != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); if (__pyx_t_2) { __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_dtype, __pyx_n_s_itemsize); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_t_8); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_v_itemsize = __pyx_t_10; __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_dtype, __pyx_n_s_kind); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_11 = __Pyx_PyObject_Ord(__pyx_t_8); if (unlikely(__pyx_t_11 == (long)(Py_UCS4)-1)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_v_kind = __pyx_t_11; __pyx_v_dtype_signed = (__pyx_v_kind == 'i'); switch (__pyx_v_kind) { case 'i': case 'u': break; case 'f': __pyx_t_3 = (((sizeof(float)) == __pyx_v_itemsize) != 0); if (__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L22_bool_binop_done; } __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_ndim); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_t_8); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_3 = ((((Py_ssize_t)__pyx_t_10) == 2) != 0); __pyx_t_2 = __pyx_t_3; __pyx_L22_bool_binop_done:; if (__pyx_t_2) { if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_float, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 33, __pyx_L1_error) goto __pyx_L16_break; } __pyx_t_3 = (((sizeof(double)) == __pyx_v_itemsize) != 0); if (__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L25_bool_binop_done; } __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_ndim); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_t_8); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_3 = ((((Py_ssize_t)__pyx_t_10) == 2) != 0); __pyx_t_2 = __pyx_t_3; __pyx_L25_bool_binop_done:; if (__pyx_t_2) { if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_double, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 33, __pyx_L1_error) goto __pyx_L16_break; } break; case 'c': break; case 'O': break; default: break; } } } __pyx_t_3 = ((__pyx_v_itemsize == -1L) != 0); if (!__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L28_bool_binop_done; } __pyx_t_3 = ((__pyx_v_itemsize == (sizeof(float))) != 0); __pyx_t_2 = __pyx_t_3; __pyx_L28_bool_binop_done:; if (__pyx_t_2) { __pyx_v_memslice = __Pyx_PyObject_to_MemoryviewSlice_dsds_float(__pyx_v_arg); __pyx_t_2 = (__pyx_v_memslice.memview != 0); if (__pyx_t_2) { __PYX_XDEC_MEMVIEW((&__pyx_v_memslice), 1); if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_float, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 33, __pyx_L1_error) goto __pyx_L16_break; } /*else*/ { PyErr_Clear(); } } __pyx_t_3 = ((__pyx_v_itemsize == -1L) != 0); if (!__pyx_t_3) { } else { __pyx_t_2 = __pyx_t_3; goto __pyx_L32_bool_binop_done; } __pyx_t_3 = ((__pyx_v_itemsize == (sizeof(double))) != 0); __pyx_t_2 = __pyx_t_3; __pyx_L32_bool_binop_done:; if (__pyx_t_2) { __pyx_v_memslice = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(__pyx_v_arg); __pyx_t_2 = (__pyx_v_memslice.memview != 0); if (__pyx_t_2) { __PYX_XDEC_MEMVIEW((&__pyx_v_memslice), 1); if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_double, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 33, __pyx_L1_error) goto __pyx_L16_break; } /*else*/ { PyErr_Clear(); } } if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, Py_None, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 33, __pyx_L1_error) goto __pyx_L16_break; } __pyx_L16_break:; __pyx_t_8 = PyList_New(0); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_v_candidates = ((PyObject*)__pyx_t_8); __pyx_t_8 = 0; __pyx_t_10 = 0; if (unlikely(__pyx_v_signatures == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); __PYX_ERR(0, 33, __pyx_L1_error) } __pyx_t_9 = __Pyx_dict_iterator(((PyObject*)__pyx_v_signatures), 1, ((PyObject *)NULL), (&__pyx_t_12), (&__pyx_t_7)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = __pyx_t_9; __pyx_t_9 = 0; while (1) { __pyx_t_13 = __Pyx_dict_iter_next(__pyx_t_8, __pyx_t_12, &__pyx_t_10, &__pyx_t_9, NULL, NULL, __pyx_t_7); if (unlikely(__pyx_t_13 == 0)) break; if (unlikely(__pyx_t_13 == -1)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_XDECREF_SET(__pyx_v_sig, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_match_found = 0; __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_sig, __pyx_n_s_strip); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_split); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_1); __Pyx_INCREF(__pyx_v_dest_sig); __Pyx_GIVEREF(__pyx_v_dest_sig); PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_v_dest_sig); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_zip, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (likely(PyList_CheckExact(__pyx_t_1)) || PyTuple_CheckExact(__pyx_t_1)) { __pyx_t_9 = __pyx_t_1; __Pyx_INCREF(__pyx_t_9); __pyx_t_14 = 0; __pyx_t_15 = NULL; } else { __pyx_t_14 = -1; __pyx_t_9 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_15 = Py_TYPE(__pyx_t_9)->tp_iternext; if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 33, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; for (;;) { if (likely(!__pyx_t_15)) { if (likely(PyList_CheckExact(__pyx_t_9))) { if (__pyx_t_14 >= PyList_GET_SIZE(__pyx_t_9)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_1 = PyList_GET_ITEM(__pyx_t_9, __pyx_t_14); __Pyx_INCREF(__pyx_t_1); __pyx_t_14++; if (unlikely(0 < 0)) __PYX_ERR(0, 33, __pyx_L1_error) #else __pyx_t_1 = PySequence_ITEM(__pyx_t_9, __pyx_t_14); __pyx_t_14++; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); #endif } else { if (__pyx_t_14 >= PyTuple_GET_SIZE(__pyx_t_9)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_9, __pyx_t_14); __Pyx_INCREF(__pyx_t_1); __pyx_t_14++; if (unlikely(0 < 0)) __PYX_ERR(0, 33, __pyx_L1_error) #else __pyx_t_1 = PySequence_ITEM(__pyx_t_9, __pyx_t_14); __pyx_t_14++; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); #endif } } else { __pyx_t_1 = __pyx_t_15(__pyx_t_9); if (unlikely(!__pyx_t_1)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 33, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_1); } if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) { PyObject* sequence = __pyx_t_1; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 33, __pyx_L1_error) } #if CYTHON_COMPILING_IN_CPYTHON if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_16 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_17 = PyTuple_GET_ITEM(sequence, 1); } else { __pyx_t_16 = PyList_GET_ITEM(sequence, 0); __pyx_t_17 = PyList_GET_ITEM(sequence, 1); } __Pyx_INCREF(__pyx_t_16); __Pyx_INCREF(__pyx_t_17); #else __pyx_t_16 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_16); __pyx_t_17 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_17); #endif __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { Py_ssize_t index = -1; __pyx_t_18 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_18); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_19 = Py_TYPE(__pyx_t_18)->tp_iternext; index = 0; __pyx_t_16 = __pyx_t_19(__pyx_t_18); if (unlikely(!__pyx_t_16)) goto __pyx_L39_unpacking_failed; __Pyx_GOTREF(__pyx_t_16); index = 1; __pyx_t_17 = __pyx_t_19(__pyx_t_18); if (unlikely(!__pyx_t_17)) goto __pyx_L39_unpacking_failed; __Pyx_GOTREF(__pyx_t_17); if (__Pyx_IternextUnpackEndCheck(__pyx_t_19(__pyx_t_18), 2) < 0) __PYX_ERR(0, 33, __pyx_L1_error) __pyx_t_19 = NULL; __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; goto __pyx_L40_unpacking_done; __pyx_L39_unpacking_failed:; __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; __pyx_t_19 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 33, __pyx_L1_error) __pyx_L40_unpacking_done:; } __Pyx_XDECREF_SET(__pyx_v_src_type, __pyx_t_16); __pyx_t_16 = 0; __Pyx_XDECREF_SET(__pyx_v_dst_type, __pyx_t_17); __pyx_t_17 = 0; __pyx_t_2 = (__pyx_v_dst_type != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { __pyx_t_1 = PyObject_RichCompare(__pyx_v_src_type, __pyx_v_dst_type, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error) __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_3) { __pyx_v_match_found = 1; goto __pyx_L42; } /*else*/ { __pyx_v_match_found = 0; goto __pyx_L38_break; } __pyx_L42:; } } __pyx_L38_break:; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_3 = (__pyx_v_match_found != 0); if (__pyx_t_3) { __pyx_t_20 = __Pyx_PyList_Append(__pyx_v_candidates, __pyx_v_sig); if (unlikely(__pyx_t_20 == -1)) __PYX_ERR(0, 33, __pyx_L1_error) } } __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_3 = (__pyx_v_candidates != Py_None) && (PyList_GET_SIZE(__pyx_v_candidates) != 0); __pyx_t_2 = ((!__pyx_t_3) != 0); if (__pyx_t_2) { __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(0, 33, __pyx_L1_error) } __pyx_t_12 = PyList_GET_SIZE(__pyx_v_candidates); if (unlikely(__pyx_t_12 == -1)) __PYX_ERR(0, 33, __pyx_L1_error) __pyx_t_2 = ((__pyx_t_12 > 1) != 0); if (__pyx_t_2) { __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(0, 33, __pyx_L1_error) } /*else*/ { __Pyx_XDECREF(__pyx_r); if (unlikely(__pyx_v_signatures == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(0, 33, __pyx_L1_error) } __pyx_t_8 = __Pyx_PyDict_GetItem(((PyObject*)__pyx_v_signatures), PyList_GET_ITEM(__pyx_v_candidates, 0)); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_r = __pyx_t_8; __pyx_t_8 = 0; goto __pyx_L0; } /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_16); __Pyx_XDECREF(__pyx_t_17); __Pyx_XDECREF(__pyx_t_18); __Pyx_AddTraceback("implicit._implicit.__pyx_fused_cpdef", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_dest_sig); __Pyx_XDECREF(__pyx_v_ndarray); __Pyx_XDECREF(__pyx_v_numpy); __Pyx_XDECREF(__pyx_v_arg); __Pyx_XDECREF(__pyx_v_dtype); __Pyx_XDECREF(__pyx_v_arg_base); __Pyx_XDECREF(__pyx_v_candidates); __Pyx_XDECREF(__pyx_v_sig); __Pyx_XDECREF(__pyx_v_src_type); __Pyx_XDECREF(__pyx_v_dst_type); __Pyx_XDECREF(__pyx_v_kwargs); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_fuse_0__pyx_pw_8implicit_9_implicit_3least_squares(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_fuse_0__pyx_mdef_8implicit_9_implicit_3least_squares = {"__pyx_fuse_0least_squares", (PyCFunction)__pyx_fuse_0__pyx_pw_8implicit_9_implicit_3least_squares, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_fuse_0__pyx_pw_8implicit_9_implicit_3least_squares(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_Cui = 0; __Pyx_memviewslice __pyx_v_X = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_Y = { 0, 0, { 0 }, { 0 }, { 0 } }; double __pyx_v_regularization; int __pyx_v_num_threads; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("least_squares (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_Cui,&__pyx_n_s_X,&__pyx_n_s_Y,&__pyx_n_s_regularization,&__pyx_n_s_num_threads,0}; PyObject* values[5] = {0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Cui)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("least_squares", 1, 5, 5, 1); __PYX_ERR(0, 33, __pyx_L3_error) } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("least_squares", 1, 5, 5, 2); __PYX_ERR(0, 33, __pyx_L3_error) } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_regularization)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("least_squares", 1, 5, 5, 3); __PYX_ERR(0, 33, __pyx_L3_error) } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_num_threads)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("least_squares", 1, 5, 5, 4); __PYX_ERR(0, 33, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "least_squares") < 0)) __PYX_ERR(0, 33, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 5) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); } __pyx_v_Cui = values[0]; __pyx_v_X = __Pyx_PyObject_to_MemoryviewSlice_dsds_float(values[1]); if (unlikely(!__pyx_v_X.memview)) __PYX_ERR(0, 33, __pyx_L3_error) __pyx_v_Y = __Pyx_PyObject_to_MemoryviewSlice_dsds_float(values[2]); if (unlikely(!__pyx_v_Y.memview)) __PYX_ERR(0, 33, __pyx_L3_error) __pyx_v_regularization = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_regularization == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 33, __pyx_L3_error) __pyx_v_num_threads = __Pyx_PyInt_As_int(values[4]); if (unlikely((__pyx_v_num_threads == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 33, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("least_squares", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 33, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("implicit._implicit.least_squares", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_8implicit_9_implicit_2least_squares(__pyx_self, __pyx_v_Cui, __pyx_v_X, __pyx_v_Y, __pyx_v_regularization, __pyx_v_num_threads); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8implicit_9_implicit_2least_squares(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_Cui, __Pyx_memviewslice __pyx_v_X, __Pyx_memviewslice __pyx_v_Y, double __pyx_v_regularization, int __pyx_v_num_threads) { PyObject *__pyx_v_dtype = NULL; __Pyx_memviewslice __pyx_v_indptr = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_indices = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_data = { 0, 0, { 0 }, { 0 }, { 0 } }; CYTHON_UNUSED int __pyx_v_users; int __pyx_v_factors; int __pyx_v_u; int __pyx_v_i; int __pyx_v_j; int __pyx_v_index; int __pyx_v_err; int __pyx_v_one; float __pyx_v_confidence; float __pyx_v_temp; PyObject *__pyx_v_YtY = NULL; __Pyx_memviewslice __pyx_v_initialA = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_initialB = { 0, 0, { 0 }, { 0 }, { 0 } }; float *__pyx_v_A; float *__pyx_v_b; int *__pyx_v_pivot; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; __Pyx_memviewslice __pyx_t_11 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_12 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_13; int __pyx_t_14; int __pyx_t_15; Py_ssize_t __pyx_t_16; Py_ssize_t __pyx_t_17; Py_ssize_t __pyx_t_18; Py_ssize_t __pyx_t_19; int __pyx_t_20; Py_ssize_t __pyx_t_21; int __pyx_t_22; Py_ssize_t __pyx_t_23; Py_ssize_t __pyx_t_24; Py_ssize_t __pyx_t_25; Py_ssize_t __pyx_t_26; int __pyx_t_27; int __pyx_t_28; Py_ssize_t __pyx_t_29; Py_ssize_t __pyx_t_30; Py_ssize_t __pyx_t_31; Py_ssize_t __pyx_t_32; int __pyx_t_33; Py_ssize_t __pyx_t_34; Py_ssize_t __pyx_t_35; char const *__pyx_t_36; PyObject *__pyx_t_37 = NULL; PyObject *__pyx_t_38 = NULL; PyObject *__pyx_t_39 = NULL; PyObject *__pyx_t_40 = NULL; PyObject *__pyx_t_41 = NULL; PyObject *__pyx_t_42 = NULL; __Pyx_RefNannySetupContext("__pyx_fuse_0least_squares", 0); /* "implicit/_implicit.pyx":34 * @cython.boundscheck(False) * def least_squares(Cui, floating [:, :] X, floating [:, :] Y, double regularization, int num_threads): * dtype = numpy.float64 if floating is double else numpy.float32 # <<<<<<<<<<<<<< * * cdef int [:] indptr = Cui.indptr, indices = Cui.indices */ if ((0 != 0)) { __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 34, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_float64); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 34, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_1 = __pyx_t_3; __pyx_t_3 = 0; } else { __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 34, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_float32); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 34, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_1 = __pyx_t_2; __pyx_t_2 = 0; } __pyx_v_dtype = __pyx_t_1; __pyx_t_1 = 0; /* "implicit/_implicit.pyx":36 * dtype = numpy.float64 if floating is double else numpy.float32 * * cdef int [:] indptr = Cui.indptr, indices = Cui.indices # <<<<<<<<<<<<<< * cdef double [:] data = Cui.data * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_Cui, __pyx_n_s_indptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 36, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyObject_to_MemoryviewSlice_ds_int(__pyx_t_1); if (unlikely(!__pyx_t_4.memview)) __PYX_ERR(0, 36, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_indptr = __pyx_t_4; __pyx_t_4.memview = NULL; __pyx_t_4.data = NULL; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_Cui, __pyx_n_s_indices); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 36, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyObject_to_MemoryviewSlice_ds_int(__pyx_t_1); if (unlikely(!__pyx_t_4.memview)) __PYX_ERR(0, 36, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_indices = __pyx_t_4; __pyx_t_4.memview = NULL; __pyx_t_4.data = NULL; /* "implicit/_implicit.pyx":37 * * cdef int [:] indptr = Cui.indptr, indices = Cui.indices * cdef double [:] data = Cui.data # <<<<<<<<<<<<<< * * cdef int users = X.shape[0], factors = X.shape[1], u, i, j, index, err, one = 1 */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_Cui, __pyx_n_s_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 37, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_1); if (unlikely(!__pyx_t_5.memview)) __PYX_ERR(0, 37, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_data = __pyx_t_5; __pyx_t_5.memview = NULL; __pyx_t_5.data = NULL; /* "implicit/_implicit.pyx":39 * cdef double [:] data = Cui.data * * cdef int users = X.shape[0], factors = X.shape[1], u, i, j, index, err, one = 1 # <<<<<<<<<<<<<< * cdef floating confidence, temp * */ __pyx_v_users = (__pyx_v_X.shape[0]); __pyx_v_factors = (__pyx_v_X.shape[1]); __pyx_v_one = 1; /* "implicit/_implicit.pyx":42 * cdef floating confidence, temp * * YtY = numpy.dot(numpy.transpose(Y), Y) # <<<<<<<<<<<<<< * * cdef floating[:, :] initialA = YtY + regularization * numpy.eye(factors, dtype=dtype) */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 42, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_dot); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 42, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 42, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_transpose); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 42, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __pyx_memoryview_fromslice(__pyx_v_Y, 2, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 42, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } if (!__pyx_t_8) { __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 42, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_2); } else { __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 42, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8); __pyx_t_8 = NULL; __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 42, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __pyx_memoryview_fromslice(__pyx_v_Y, 2, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 42, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_9 = NULL; __pyx_t_10 = 0; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_9)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_10 = 1; } } __pyx_t_6 = PyTuple_New(2+__pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 42, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_9) { __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_9); __pyx_t_9 = NULL; } __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_10, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_10, __pyx_t_7); __pyx_t_2 = 0; __pyx_t_7 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 42, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_YtY = __pyx_t_1; __pyx_t_1 = 0; /* "implicit/_implicit.pyx":44 * YtY = numpy.dot(numpy.transpose(Y), Y) * * cdef floating[:, :] initialA = YtY + regularization * numpy.eye(factors, dtype=dtype) # <<<<<<<<<<<<<< * cdef floating[:] initialB = numpy.zeros(factors, dtype=dtype) * */ __pyx_t_1 = PyFloat_FromDouble(__pyx_v_regularization); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_eye); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_factors); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_v_dtype) < 0) __PYX_ERR(0, 44, __pyx_L1_error) __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_7, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Add(__pyx_v_YtY, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_11 = __Pyx_PyObject_to_MemoryviewSlice_dsds_float(__pyx_t_2); if (unlikely(!__pyx_t_11.memview)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_initialA = __pyx_t_11; __pyx_t_11.memview = NULL; __pyx_t_11.data = NULL; /* "implicit/_implicit.pyx":45 * * cdef floating[:, :] initialA = YtY + regularization * numpy.eye(factors, dtype=dtype) * cdef floating[:] initialB = numpy.zeros(factors, dtype=dtype) # <<<<<<<<<<<<<< * * cdef floating * A */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_factors); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_v_dtype) < 0) __PYX_ERR(0, 45, __pyx_L1_error) __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_12 = __Pyx_PyObject_to_MemoryviewSlice_ds_float(__pyx_t_7); if (unlikely(!__pyx_t_12.memview)) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_v_initialB = __pyx_t_12; __pyx_t_12.memview = NULL; __pyx_t_12.data = NULL; /* "implicit/_implicit.pyx":51 * cdef int * pivot * * with nogil, parallel(num_threads = num_threads): # <<<<<<<<<<<<<< * # allocate temp memory for each thread * A = <floating *> malloc(sizeof(floating) * factors * factors) */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { { const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif #ifdef _OPENMP #pragma omp parallel private(__pyx_v_A, __pyx_v_b, __pyx_v_pivot) private(__pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25, __pyx_t_26, __pyx_t_27, __pyx_t_28, __pyx_t_29, __pyx_t_30, __pyx_t_31, __pyx_t_32, __pyx_t_33, __pyx_t_34, __pyx_t_35, __pyx_t_36) firstprivate(__pyx_t_1, __pyx_t_2, __pyx_t_37, __pyx_t_38, __pyx_t_39, __pyx_t_40, __pyx_t_41, __pyx_t_42, __pyx_t_7) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) num_threads(__pyx_v_num_threads) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ /* Initialize private variables to invalid values */ __pyx_v_A = ((float *)1); __pyx_v_b = ((float *)1); __pyx_v_pivot = ((int *)1); /* "implicit/_implicit.pyx":53 * with nogil, parallel(num_threads = num_threads): * # allocate temp memory for each thread * A = <floating *> malloc(sizeof(floating) * factors * factors) # <<<<<<<<<<<<<< * b = <floating *> malloc(sizeof(floating) * factors) * pivot = <int *> malloc(sizeof(int) * factors) */ __pyx_v_A = ((float *)malloc((((sizeof(float)) * __pyx_v_factors) * __pyx_v_factors))); /* "implicit/_implicit.pyx":54 * # allocate temp memory for each thread * A = <floating *> malloc(sizeof(floating) * factors * factors) * b = <floating *> malloc(sizeof(floating) * factors) # <<<<<<<<<<<<<< * pivot = <int *> malloc(sizeof(int) * factors) * try: */ __pyx_v_b = ((float *)malloc(((sizeof(float)) * __pyx_v_factors))); /* "implicit/_implicit.pyx":55 * A = <floating *> malloc(sizeof(floating) * factors * factors) * b = <floating *> malloc(sizeof(floating) * factors) * pivot = <int *> malloc(sizeof(int) * factors) # <<<<<<<<<<<<<< * try: * for u in prange(users, schedule='guided'): */ __pyx_v_pivot = ((int *)malloc(((sizeof(int)) * __pyx_v_factors))); /* "implicit/_implicit.pyx":56 * b = <floating *> malloc(sizeof(floating) * factors) * pivot = <int *> malloc(sizeof(int) * factors) * try: # <<<<<<<<<<<<<< * for u in prange(users, schedule='guided'): * # For each user u calculate */ /*try:*/ { /* "implicit/_implicit.pyx":57 * pivot = <int *> malloc(sizeof(int) * factors) * try: * for u in prange(users, schedule='guided'): # <<<<<<<<<<<<<< * # For each user u calculate * # Xu = (YtCuY + regularization*I)i^-1 * YtYCuPu */ __pyx_t_13 = __pyx_v_users; if (1 == 0) abort(); { float __pyx_parallel_temp0 = __PYX_NAN(); int __pyx_parallel_temp1 = 0xbad0bad0; int __pyx_parallel_temp2 = 0xbad0bad0; int __pyx_parallel_temp3 = 0xbad0bad0; float __pyx_parallel_temp4 = __PYX_NAN(); int __pyx_parallel_temp5 = 0xbad0bad0; const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; __pyx_t_15 = (__pyx_t_13 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_15 > 0) { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_confidence) lastprivate(__pyx_v_i) lastprivate(__pyx_v_index) lastprivate(__pyx_v_j) lastprivate(__pyx_v_temp) firstprivate(__pyx_v_u) lastprivate(__pyx_v_u) schedule(guided) #endif /* _OPENMP */ for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_15; __pyx_t_14++){ if (__pyx_parallel_why < 2) { __pyx_v_u = (int)(0 + 1 * __pyx_t_14); /* Initialize private variables to invalid values */ __pyx_v_confidence = ((float)__PYX_NAN()); __pyx_v_i = ((int)0xbad0bad0); __pyx_v_index = ((int)0xbad0bad0); __pyx_v_j = ((int)0xbad0bad0); __pyx_v_temp = ((float)__PYX_NAN()); /* "implicit/_implicit.pyx":62 * * # Build up A = YtCuY + reg * I and b = YtCuPu * memcpy(A, &initialA[0, 0], sizeof(floating) * factors * factors) # <<<<<<<<<<<<<< * memcpy(b, &initialB[0], sizeof(floating) * factors) * */ __pyx_t_16 = 0; __pyx_t_17 = 0; if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_v_initialA.shape[0]; if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_v_initialA.shape[1]; memcpy(__pyx_v_A, (&(*((float *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_initialA.data + __pyx_t_16 * __pyx_v_initialA.strides[0]) ) + __pyx_t_17 * __pyx_v_initialA.strides[1]) )))), (((sizeof(float)) * __pyx_v_factors) * __pyx_v_factors)); /* "implicit/_implicit.pyx":63 * # Build up A = YtCuY + reg * I and b = YtCuPu * memcpy(A, &initialA[0, 0], sizeof(floating) * factors * factors) * memcpy(b, &initialB[0], sizeof(floating) * factors) # <<<<<<<<<<<<<< * * for index in range(indptr[u], indptr[u+1]): */ __pyx_t_18 = 0; if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_v_initialB.shape[0]; memcpy(__pyx_v_b, (&(*((float *) ( /* dim=0 */ (__pyx_v_initialB.data + __pyx_t_18 * __pyx_v_initialB.strides[0]) )))), ((sizeof(float)) * __pyx_v_factors)); /* "implicit/_implicit.pyx":65 * memcpy(b, &initialB[0], sizeof(floating) * factors) * * for index in range(indptr[u], indptr[u+1]): # <<<<<<<<<<<<<< * i = indices[index] * confidence = data[index] */ __pyx_t_19 = (__pyx_v_u + 1); if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_v_indptr.shape[0]; __pyx_t_20 = (*((int *) ( /* dim=0 */ (__pyx_v_indptr.data + __pyx_t_19 * __pyx_v_indptr.strides[0]) ))); __pyx_t_21 = __pyx_v_u; if (__pyx_t_21 < 0) __pyx_t_21 += __pyx_v_indptr.shape[0]; for (__pyx_t_22 = (*((int *) ( /* dim=0 */ (__pyx_v_indptr.data + __pyx_t_21 * __pyx_v_indptr.strides[0]) ))); __pyx_t_22 < __pyx_t_20; __pyx_t_22+=1) { __pyx_v_index = __pyx_t_22; /* "implicit/_implicit.pyx":66 * * for index in range(indptr[u], indptr[u+1]): * i = indices[index] # <<<<<<<<<<<<<< * confidence = data[index] * */ __pyx_t_23 = __pyx_v_index; if (__pyx_t_23 < 0) __pyx_t_23 += __pyx_v_indices.shape[0]; __pyx_v_i = (*((int *) ( /* dim=0 */ (__pyx_v_indices.data + __pyx_t_23 * __pyx_v_indices.strides[0]) ))); /* "implicit/_implicit.pyx":67 * for index in range(indptr[u], indptr[u+1]): * i = indices[index] * confidence = data[index] # <<<<<<<<<<<<<< * * # b += Yi Cui Pui */ __pyx_t_24 = __pyx_v_index; if (__pyx_t_24 < 0) __pyx_t_24 += __pyx_v_data.shape[0]; __pyx_v_confidence = (*((double *) ( /* dim=0 */ (__pyx_v_data.data + __pyx_t_24 * __pyx_v_data.strides[0]) ))); /* "implicit/_implicit.pyx":71 * # b += Yi Cui Pui * # Pui is implicit, its defined to be 1 for non-zero entries * axpy(&factors, &confidence, &Y[i, 0], &one, b, &one) # <<<<<<<<<<<<<< * * # A += Yi^T Cui Yi */ __pyx_t_25 = __pyx_v_i; __pyx_t_26 = 0; if (__pyx_t_25 < 0) __pyx_t_25 += __pyx_v_Y.shape[0]; if (__pyx_t_26 < 0) __pyx_t_26 += __pyx_v_Y.shape[1]; __pyx_fuse_0__pyx_f_8implicit_9_implicit_axpy((&__pyx_v_factors), (&__pyx_v_confidence), (&(*((float *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_Y.data + __pyx_t_25 * __pyx_v_Y.strides[0]) ) + __pyx_t_26 * __pyx_v_Y.strides[1]) )))), (&__pyx_v_one), __pyx_v_b, (&__pyx_v_one)); /* "implicit/_implicit.pyx":75 * # A += Yi^T Cui Yi * # Since we've already added in YtY, we subtract 1 from confidence * for j in range(factors): # <<<<<<<<<<<<<< * temp = (confidence - 1) * Y[i, j] * axpy(&factors, &temp, &Y[i, 0], &one, A + j * factors, &one) */ __pyx_t_27 = __pyx_v_factors; for (__pyx_t_28 = 0; __pyx_t_28 < __pyx_t_27; __pyx_t_28+=1) { __pyx_v_j = __pyx_t_28; /* "implicit/_implicit.pyx":76 * # Since we've already added in YtY, we subtract 1 from confidence * for j in range(factors): * temp = (confidence - 1) * Y[i, j] # <<<<<<<<<<<<<< * axpy(&factors, &temp, &Y[i, 0], &one, A + j * factors, &one) * */ __pyx_t_29 = __pyx_v_i; __pyx_t_30 = __pyx_v_j; if (__pyx_t_29 < 0) __pyx_t_29 += __pyx_v_Y.shape[0]; if (__pyx_t_30 < 0) __pyx_t_30 += __pyx_v_Y.shape[1]; __pyx_v_temp = ((__pyx_v_confidence - 1.0) * (*((float *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_Y.data + __pyx_t_29 * __pyx_v_Y.strides[0]) ) + __pyx_t_30 * __pyx_v_Y.strides[1]) )))); /* "implicit/_implicit.pyx":77 * for j in range(factors): * temp = (confidence - 1) * Y[i, j] * axpy(&factors, &temp, &Y[i, 0], &one, A + j * factors, &one) # <<<<<<<<<<<<<< * * posv("U", &factors, &one, A, &factors, b, &factors, &err); */ __pyx_t_31 = __pyx_v_i; __pyx_t_32 = 0; if (__pyx_t_31 < 0) __pyx_t_31 += __pyx_v_Y.shape[0]; if (__pyx_t_32 < 0) __pyx_t_32 += __pyx_v_Y.shape[1]; __pyx_fuse_0__pyx_f_8implicit_9_implicit_axpy((&__pyx_v_factors), (&__pyx_v_temp), (&(*((float *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_Y.data + __pyx_t_31 * __pyx_v_Y.strides[0]) ) + __pyx_t_32 * __pyx_v_Y.strides[1]) )))), (&__pyx_v_one), (__pyx_v_A + (__pyx_v_j * __pyx_v_factors)), (&__pyx_v_one)); } } /* "implicit/_implicit.pyx":79 * axpy(&factors, &temp, &Y[i, 0], &one, A + j * factors, &one) * * posv("U", &factors, &one, A, &factors, b, &factors, &err); # <<<<<<<<<<<<<< * * # fall back to using a LU decomposition if this fails */ __pyx_fuse_0__pyx_f_8implicit_9_implicit_posv(((char *)"U"), (&__pyx_v_factors), (&__pyx_v_one), __pyx_v_A, (&__pyx_v_factors), __pyx_v_b, (&__pyx_v_factors), (&__pyx_v_err)); /* "implicit/_implicit.pyx":82 * * # fall back to using a LU decomposition if this fails * if err: # <<<<<<<<<<<<<< * gesv(&factors, &one, A, &factors, pivot, b, &factors, &err) * */ __pyx_t_33 = (__pyx_v_err != 0); if (__pyx_t_33) { /* "implicit/_implicit.pyx":83 * # fall back to using a LU decomposition if this fails * if err: * gesv(&factors, &one, A, &factors, pivot, b, &factors, &err) # <<<<<<<<<<<<<< * * if not err: */ __pyx_fuse_0__pyx_f_8implicit_9_implicit_gesv((&__pyx_v_factors), (&__pyx_v_one), __pyx_v_A, (&__pyx_v_factors), __pyx_v_pivot, __pyx_v_b, (&__pyx_v_factors), (&__pyx_v_err)); /* "implicit/_implicit.pyx":82 * * # fall back to using a LU decomposition if this fails * if err: # <<<<<<<<<<<<<< * gesv(&factors, &one, A, &factors, pivot, b, &factors, &err) * */ } /* "implicit/_implicit.pyx":85 * gesv(&factors, &one, A, &factors, pivot, b, &factors, &err) * * if not err: # <<<<<<<<<<<<<< * memcpy(&X[u, 0], b, sizeof(floating) * factors) * */ __pyx_t_33 = ((!(__pyx_v_err != 0)) != 0); if (__pyx_t_33) { /* "implicit/_implicit.pyx":86 * * if not err: * memcpy(&X[u, 0], b, sizeof(floating) * factors) # <<<<<<<<<<<<<< * * else: */ __pyx_t_34 = __pyx_v_u; __pyx_t_35 = 0; if (__pyx_t_34 < 0) __pyx_t_34 += __pyx_v_X.shape[0]; if (__pyx_t_35 < 0) __pyx_t_35 += __pyx_v_X.shape[1]; memcpy((&(*((float *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_X.data + __pyx_t_34 * __pyx_v_X.strides[0]) ) + __pyx_t_35 * __pyx_v_X.strides[1]) )))), __pyx_v_b, ((sizeof(float)) * __pyx_v_factors)); /* "implicit/_implicit.pyx":85 * gesv(&factors, &one, A, &factors, pivot, b, &factors, &err) * * if not err: # <<<<<<<<<<<<<< * memcpy(&X[u, 0], b, sizeof(floating) * factors) * */ goto __pyx_L24; } /* "implicit/_implicit.pyx":89 * * else: * with gil: # <<<<<<<<<<<<<< * raise ValueError("Singular matrix (err=%i) on row %i" % (err, u)) * */ /*else*/ { { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif /*try:*/ { /* "implicit/_implicit.pyx":90 * else: * with gil: * raise ValueError("Singular matrix (err=%i) on row %i" % (err, u)) # <<<<<<<<<<<<<< * * finally: */ __pyx_t_7 = __Pyx_PyInt_From_int(__pyx_v_err); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 90, __pyx_L28_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_u); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 90, __pyx_L28_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 90, __pyx_L28_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_2); __pyx_t_7 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_Singular_matrix_err_i_on_row_i, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 90, __pyx_L28_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 90, __pyx_L28_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_1, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 90, __pyx_L28_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(0, 90, __pyx_L28_error) } /* "implicit/_implicit.pyx":89 * * else: * with gil: # <<<<<<<<<<<<<< * raise ValueError("Singular matrix (err=%i) on row %i" % (err, u)) * */ /*finally:*/ { __pyx_L28_error: { #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif goto __pyx_L17_error; } } } } __pyx_L24:; goto __pyx_L31; __pyx_L17_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L30; __pyx_L30:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates0) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_confidence; __pyx_parallel_temp1 = __pyx_v_i; __pyx_parallel_temp2 = __pyx_v_index; __pyx_parallel_temp3 = __pyx_v_j; __pyx_parallel_temp4 = __pyx_v_temp; __pyx_parallel_temp5 = __pyx_v_u; } __pyx_L31:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_confidence = __pyx_parallel_temp0; __pyx_v_i = __pyx_parallel_temp1; __pyx_v_index = __pyx_parallel_temp2; __pyx_v_j = __pyx_parallel_temp3; __pyx_v_temp = __pyx_parallel_temp4; __pyx_v_u = __pyx_parallel_temp5; switch (__pyx_parallel_why) { case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L13_error; } } } } /* "implicit/_implicit.pyx":93 * * finally: * free(A) # <<<<<<<<<<<<<< * free(b) * free(pivot) */ /*finally:*/ { /*normal exit:*/{ free(__pyx_v_A); /* "implicit/_implicit.pyx":94 * finally: * free(A) * free(b) # <<<<<<<<<<<<<< * free(pivot) */ free(__pyx_v_b); /* "implicit/_implicit.pyx":95 * free(A) * free(b) * free(pivot) # <<<<<<<<<<<<<< */ free(__pyx_v_pivot); goto __pyx_L14; } /*exception exit:*/{ __Pyx_PyThreadState_declare #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save; #endif __pyx_L13_error:; __pyx_t_37 = 0; __pyx_t_38 = 0; __pyx_t_39 = 0; __pyx_t_40 = 0; __pyx_t_41 = 0; __pyx_t_42 = 0; #ifdef WITH_THREAD __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_PyThreadState_assign __PYX_XDEC_MEMVIEW(&__pyx_t_11, 1); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_XDEC_MEMVIEW(&__pyx_t_5, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_12, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_4, 1); if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_40, &__pyx_t_41, &__pyx_t_42); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_37, &__pyx_t_38, &__pyx_t_39) < 0)) __Pyx_ErrFetch(&__pyx_t_37, &__pyx_t_38, &__pyx_t_39); __Pyx_XGOTREF(__pyx_t_37); __Pyx_XGOTREF(__pyx_t_38); __Pyx_XGOTREF(__pyx_t_39); __Pyx_XGOTREF(__pyx_t_40); __Pyx_XGOTREF(__pyx_t_41); __Pyx_XGOTREF(__pyx_t_42); __pyx_t_15 = __pyx_lineno; __pyx_t_14 = __pyx_clineno; __pyx_t_36 = __pyx_filename; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { /* "implicit/_implicit.pyx":93 * * finally: * free(A) # <<<<<<<<<<<<<< * free(b) * free(pivot) */ free(__pyx_v_A); /* "implicit/_implicit.pyx":94 * finally: * free(A) * free(b) # <<<<<<<<<<<<<< * free(pivot) */ free(__pyx_v_b); /* "implicit/_implicit.pyx":95 * free(A) * free(b) * free(pivot) # <<<<<<<<<<<<<< */ free(__pyx_v_pivot); } #ifdef WITH_THREAD __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_PyThreadState_assign if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_40); __Pyx_XGIVEREF(__pyx_t_41); __Pyx_XGIVEREF(__pyx_t_42); __Pyx_ExceptionReset(__pyx_t_40, __pyx_t_41, __pyx_t_42); } __Pyx_XGIVEREF(__pyx_t_37); __Pyx_XGIVEREF(__pyx_t_38); __Pyx_XGIVEREF(__pyx_t_39); __Pyx_ErrRestore(__pyx_t_37, __pyx_t_38, __pyx_t_39); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif __pyx_t_37 = 0; __pyx_t_38 = 0; __pyx_t_39 = 0; __pyx_t_40 = 0; __pyx_t_41 = 0; __pyx_t_42 = 0; __pyx_lineno = __pyx_t_15; __pyx_clineno = __pyx_t_14; __pyx_filename = __pyx_t_36; goto __pyx_L8_error; } __pyx_L14:; } goto __pyx_L37; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L37; __pyx_L37:; #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = NULL; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = NULL; __Pyx_XDECREF(__pyx_t_37); __pyx_t_37 = NULL; __Pyx_XDECREF(__pyx_t_38); __pyx_t_38 = NULL; __Pyx_XDECREF(__pyx_t_39); __pyx_t_39 = NULL; __Pyx_XDECREF(__pyx_t_40); __pyx_t_40 = NULL; __Pyx_XDECREF(__pyx_t_41); __pyx_t_41 = NULL; __Pyx_XDECREF(__pyx_t_42); __pyx_t_42 = NULL; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = NULL; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { switch (__pyx_parallel_why) { case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "implicit/_implicit.pyx":51 * cdef int * pivot * * with nogil, parallel(num_threads = num_threads): # <<<<<<<<<<<<<< * # allocate temp memory for each thread * A = <floating *> malloc(sizeof(floating) * factors * factors) */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "implicit/_implicit.pyx":33 * * @cython.boundscheck(False) * def least_squares(Cui, floating [:, :] X, floating [:, :] Y, double regularization, int num_threads): # <<<<<<<<<<<<<< * dtype = numpy.float64 if floating is double else numpy.float32 * */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __PYX_XDEC_MEMVIEW(&__pyx_t_4, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_5, 1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __PYX_XDEC_MEMVIEW(&__pyx_t_11, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_12, 1); __Pyx_AddTraceback("implicit._implicit.least_squares", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_dtype); __PYX_XDEC_MEMVIEW(&__pyx_v_indptr, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_indices, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_data, 1); __Pyx_XDECREF(__pyx_v_YtY); __PYX_XDEC_MEMVIEW(&__pyx_v_initialA, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_initialB, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_X, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_Y, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_fuse_1__pyx_pw_8implicit_9_implicit_5least_squares(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_fuse_1__pyx_mdef_8implicit_9_implicit_5least_squares = {"__pyx_fuse_1least_squares", (PyCFunction)__pyx_fuse_1__pyx_pw_8implicit_9_implicit_5least_squares, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_fuse_1__pyx_pw_8implicit_9_implicit_5least_squares(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_Cui = 0; __Pyx_memviewslice __pyx_v_X = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_Y = { 0, 0, { 0 }, { 0 }, { 0 } }; double __pyx_v_regularization; int __pyx_v_num_threads; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("least_squares (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_Cui,&__pyx_n_s_X,&__pyx_n_s_Y,&__pyx_n_s_regularization,&__pyx_n_s_num_threads,0}; PyObject* values[5] = {0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Cui)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("least_squares", 1, 5, 5, 1); __PYX_ERR(0, 33, __pyx_L3_error) } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("least_squares", 1, 5, 5, 2); __PYX_ERR(0, 33, __pyx_L3_error) } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_regularization)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("least_squares", 1, 5, 5, 3); __PYX_ERR(0, 33, __pyx_L3_error) } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_num_threads)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("least_squares", 1, 5, 5, 4); __PYX_ERR(0, 33, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "least_squares") < 0)) __PYX_ERR(0, 33, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 5) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); } __pyx_v_Cui = values[0]; __pyx_v_X = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[1]); if (unlikely(!__pyx_v_X.memview)) __PYX_ERR(0, 33, __pyx_L3_error) __pyx_v_Y = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[2]); if (unlikely(!__pyx_v_Y.memview)) __PYX_ERR(0, 33, __pyx_L3_error) __pyx_v_regularization = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_regularization == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 33, __pyx_L3_error) __pyx_v_num_threads = __Pyx_PyInt_As_int(values[4]); if (unlikely((__pyx_v_num_threads == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 33, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("least_squares", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 33, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("implicit._implicit.least_squares", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_8implicit_9_implicit_4least_squares(__pyx_self, __pyx_v_Cui, __pyx_v_X, __pyx_v_Y, __pyx_v_regularization, __pyx_v_num_threads); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8implicit_9_implicit_4least_squares(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_Cui, __Pyx_memviewslice __pyx_v_X, __Pyx_memviewslice __pyx_v_Y, double __pyx_v_regularization, int __pyx_v_num_threads) { PyObject *__pyx_v_dtype = NULL; __Pyx_memviewslice __pyx_v_indptr = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_indices = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_data = { 0, 0, { 0 }, { 0 }, { 0 } }; CYTHON_UNUSED int __pyx_v_users; int __pyx_v_factors; int __pyx_v_u; int __pyx_v_i; int __pyx_v_j; int __pyx_v_index; int __pyx_v_err; int __pyx_v_one; double __pyx_v_confidence; double __pyx_v_temp; PyObject *__pyx_v_YtY = NULL; __Pyx_memviewslice __pyx_v_initialA = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_initialB = { 0, 0, { 0 }, { 0 }, { 0 } }; double *__pyx_v_A; double *__pyx_v_b; int *__pyx_v_pivot; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; __Pyx_memviewslice __pyx_t_11 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_12; int __pyx_t_13; int __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; Py_ssize_t __pyx_t_17; Py_ssize_t __pyx_t_18; int __pyx_t_19; Py_ssize_t __pyx_t_20; int __pyx_t_21; Py_ssize_t __pyx_t_22; Py_ssize_t __pyx_t_23; Py_ssize_t __pyx_t_24; Py_ssize_t __pyx_t_25; int __pyx_t_26; int __pyx_t_27; Py_ssize_t __pyx_t_28; Py_ssize_t __pyx_t_29; Py_ssize_t __pyx_t_30; Py_ssize_t __pyx_t_31; int __pyx_t_32; Py_ssize_t __pyx_t_33; Py_ssize_t __pyx_t_34; char const *__pyx_t_35; PyObject *__pyx_t_36 = NULL; PyObject *__pyx_t_37 = NULL; PyObject *__pyx_t_38 = NULL; PyObject *__pyx_t_39 = NULL; PyObject *__pyx_t_40 = NULL; PyObject *__pyx_t_41 = NULL; __Pyx_RefNannySetupContext("__pyx_fuse_1least_squares", 0); /* "implicit/_implicit.pyx":34 * @cython.boundscheck(False) * def least_squares(Cui, floating [:, :] X, floating [:, :] Y, double regularization, int num_threads): * dtype = numpy.float64 if floating is double else numpy.float32 # <<<<<<<<<<<<<< * * cdef int [:] indptr = Cui.indptr, indices = Cui.indices */ if ((1 != 0)) { __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 34, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_float64); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 34, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_1 = __pyx_t_3; __pyx_t_3 = 0; } else { __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 34, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_float32); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 34, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_1 = __pyx_t_2; __pyx_t_2 = 0; } __pyx_v_dtype = __pyx_t_1; __pyx_t_1 = 0; /* "implicit/_implicit.pyx":36 * dtype = numpy.float64 if floating is double else numpy.float32 * * cdef int [:] indptr = Cui.indptr, indices = Cui.indices # <<<<<<<<<<<<<< * cdef double [:] data = Cui.data * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_Cui, __pyx_n_s_indptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 36, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyObject_to_MemoryviewSlice_ds_int(__pyx_t_1); if (unlikely(!__pyx_t_4.memview)) __PYX_ERR(0, 36, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_indptr = __pyx_t_4; __pyx_t_4.memview = NULL; __pyx_t_4.data = NULL; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_Cui, __pyx_n_s_indices); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 36, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyObject_to_MemoryviewSlice_ds_int(__pyx_t_1); if (unlikely(!__pyx_t_4.memview)) __PYX_ERR(0, 36, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_indices = __pyx_t_4; __pyx_t_4.memview = NULL; __pyx_t_4.data = NULL; /* "implicit/_implicit.pyx":37 * * cdef int [:] indptr = Cui.indptr, indices = Cui.indices * cdef double [:] data = Cui.data # <<<<<<<<<<<<<< * * cdef int users = X.shape[0], factors = X.shape[1], u, i, j, index, err, one = 1 */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_Cui, __pyx_n_s_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 37, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_1); if (unlikely(!__pyx_t_5.memview)) __PYX_ERR(0, 37, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_data = __pyx_t_5; __pyx_t_5.memview = NULL; __pyx_t_5.data = NULL; /* "implicit/_implicit.pyx":39 * cdef double [:] data = Cui.data * * cdef int users = X.shape[0], factors = X.shape[1], u, i, j, index, err, one = 1 # <<<<<<<<<<<<<< * cdef floating confidence, temp * */ __pyx_v_users = (__pyx_v_X.shape[0]); __pyx_v_factors = (__pyx_v_X.shape[1]); __pyx_v_one = 1; /* "implicit/_implicit.pyx":42 * cdef floating confidence, temp * * YtY = numpy.dot(numpy.transpose(Y), Y) # <<<<<<<<<<<<<< * * cdef floating[:, :] initialA = YtY + regularization * numpy.eye(factors, dtype=dtype) */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 42, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_dot); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 42, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 42, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_transpose); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 42, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __pyx_memoryview_fromslice(__pyx_v_Y, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 42, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } if (!__pyx_t_8) { __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 42, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_2); } else { __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 42, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8); __pyx_t_8 = NULL; __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 42, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __pyx_memoryview_fromslice(__pyx_v_Y, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 42, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_9 = NULL; __pyx_t_10 = 0; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_9)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_10 = 1; } } __pyx_t_6 = PyTuple_New(2+__pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 42, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_9) { __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_9); __pyx_t_9 = NULL; } __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_10, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_10, __pyx_t_7); __pyx_t_2 = 0; __pyx_t_7 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 42, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_YtY = __pyx_t_1; __pyx_t_1 = 0; /* "implicit/_implicit.pyx":44 * YtY = numpy.dot(numpy.transpose(Y), Y) * * cdef floating[:, :] initialA = YtY + regularization * numpy.eye(factors, dtype=dtype) # <<<<<<<<<<<<<< * cdef floating[:] initialB = numpy.zeros(factors, dtype=dtype) * */ __pyx_t_1 = PyFloat_FromDouble(__pyx_v_regularization); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_eye); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_factors); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_v_dtype) < 0) __PYX_ERR(0, 44, __pyx_L1_error) __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_7, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Add(__pyx_v_YtY, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_11 = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(__pyx_t_2); if (unlikely(!__pyx_t_11.memview)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_initialA = __pyx_t_11; __pyx_t_11.memview = NULL; __pyx_t_11.data = NULL; /* "implicit/_implicit.pyx":45 * * cdef floating[:, :] initialA = YtY + regularization * numpy.eye(factors, dtype=dtype) * cdef floating[:] initialB = numpy.zeros(factors, dtype=dtype) # <<<<<<<<<<<<<< * * cdef floating * A */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_factors); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_v_dtype) < 0) __PYX_ERR(0, 45, __pyx_L1_error) __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_5 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_7); if (unlikely(!__pyx_t_5.memview)) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_v_initialB = __pyx_t_5; __pyx_t_5.memview = NULL; __pyx_t_5.data = NULL; /* "implicit/_implicit.pyx":51 * cdef int * pivot * * with nogil, parallel(num_threads = num_threads): # <<<<<<<<<<<<<< * # allocate temp memory for each thread * A = <floating *> malloc(sizeof(floating) * factors * factors) */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { { const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif #ifdef _OPENMP #pragma omp parallel private(__pyx_v_A, __pyx_v_b, __pyx_v_pivot) private(__pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25, __pyx_t_26, __pyx_t_27, __pyx_t_28, __pyx_t_29, __pyx_t_30, __pyx_t_31, __pyx_t_32, __pyx_t_33, __pyx_t_34, __pyx_t_35) firstprivate(__pyx_t_1, __pyx_t_2, __pyx_t_36, __pyx_t_37, __pyx_t_38, __pyx_t_39, __pyx_t_40, __pyx_t_41, __pyx_t_7) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) num_threads(__pyx_v_num_threads) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ /* Initialize private variables to invalid values */ __pyx_v_A = ((double *)1); __pyx_v_b = ((double *)1); __pyx_v_pivot = ((int *)1); /* "implicit/_implicit.pyx":53 * with nogil, parallel(num_threads = num_threads): * # allocate temp memory for each thread * A = <floating *> malloc(sizeof(floating) * factors * factors) # <<<<<<<<<<<<<< * b = <floating *> malloc(sizeof(floating) * factors) * pivot = <int *> malloc(sizeof(int) * factors) */ __pyx_v_A = ((double *)malloc((((sizeof(double)) * __pyx_v_factors) * __pyx_v_factors))); /* "implicit/_implicit.pyx":54 * # allocate temp memory for each thread * A = <floating *> malloc(sizeof(floating) * factors * factors) * b = <floating *> malloc(sizeof(floating) * factors) # <<<<<<<<<<<<<< * pivot = <int *> malloc(sizeof(int) * factors) * try: */ __pyx_v_b = ((double *)malloc(((sizeof(double)) * __pyx_v_factors))); /* "implicit/_implicit.pyx":55 * A = <floating *> malloc(sizeof(floating) * factors * factors) * b = <floating *> malloc(sizeof(floating) * factors) * pivot = <int *> malloc(sizeof(int) * factors) # <<<<<<<<<<<<<< * try: * for u in prange(users, schedule='guided'): */ __pyx_v_pivot = ((int *)malloc(((sizeof(int)) * __pyx_v_factors))); /* "implicit/_implicit.pyx":56 * b = <floating *> malloc(sizeof(floating) * factors) * pivot = <int *> malloc(sizeof(int) * factors) * try: # <<<<<<<<<<<<<< * for u in prange(users, schedule='guided'): * # For each user u calculate */ /*try:*/ { /* "implicit/_implicit.pyx":57 * pivot = <int *> malloc(sizeof(int) * factors) * try: * for u in prange(users, schedule='guided'): # <<<<<<<<<<<<<< * # For each user u calculate * # Xu = (YtCuY + regularization*I)i^-1 * YtYCuPu */ __pyx_t_12 = __pyx_v_users; if (1 == 0) abort(); { double __pyx_parallel_temp0 = __PYX_NAN(); int __pyx_parallel_temp1 = 0xbad0bad0; int __pyx_parallel_temp2 = 0xbad0bad0; int __pyx_parallel_temp3 = 0xbad0bad0; double __pyx_parallel_temp4 = __PYX_NAN(); int __pyx_parallel_temp5 = 0xbad0bad0; const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; __pyx_t_14 = (__pyx_t_12 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_14 > 0) { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_confidence) lastprivate(__pyx_v_i) lastprivate(__pyx_v_index) lastprivate(__pyx_v_j) lastprivate(__pyx_v_temp) firstprivate(__pyx_v_u) lastprivate(__pyx_v_u) schedule(guided) #endif /* _OPENMP */ for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_14; __pyx_t_13++){ if (__pyx_parallel_why < 2) { __pyx_v_u = (int)(0 + 1 * __pyx_t_13); /* Initialize private variables to invalid values */ __pyx_v_confidence = ((double)__PYX_NAN()); __pyx_v_i = ((int)0xbad0bad0); __pyx_v_index = ((int)0xbad0bad0); __pyx_v_j = ((int)0xbad0bad0); __pyx_v_temp = ((double)__PYX_NAN()); /* "implicit/_implicit.pyx":62 * * # Build up A = YtCuY + reg * I and b = YtCuPu * memcpy(A, &initialA[0, 0], sizeof(floating) * factors * factors) # <<<<<<<<<<<<<< * memcpy(b, &initialB[0], sizeof(floating) * factors) * */ __pyx_t_15 = 0; __pyx_t_16 = 0; if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_v_initialA.shape[0]; if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_v_initialA.shape[1]; memcpy(__pyx_v_A, (&(*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_initialA.data + __pyx_t_15 * __pyx_v_initialA.strides[0]) ) + __pyx_t_16 * __pyx_v_initialA.strides[1]) )))), (((sizeof(double)) * __pyx_v_factors) * __pyx_v_factors)); /* "implicit/_implicit.pyx":63 * # Build up A = YtCuY + reg * I and b = YtCuPu * memcpy(A, &initialA[0, 0], sizeof(floating) * factors * factors) * memcpy(b, &initialB[0], sizeof(floating) * factors) # <<<<<<<<<<<<<< * * for index in range(indptr[u], indptr[u+1]): */ __pyx_t_17 = 0; if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_v_initialB.shape[0]; memcpy(__pyx_v_b, (&(*((double *) ( /* dim=0 */ (__pyx_v_initialB.data + __pyx_t_17 * __pyx_v_initialB.strides[0]) )))), ((sizeof(double)) * __pyx_v_factors)); /* "implicit/_implicit.pyx":65 * memcpy(b, &initialB[0], sizeof(floating) * factors) * * for index in range(indptr[u], indptr[u+1]): # <<<<<<<<<<<<<< * i = indices[index] * confidence = data[index] */ __pyx_t_18 = (__pyx_v_u + 1); if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_v_indptr.shape[0]; __pyx_t_19 = (*((int *) ( /* dim=0 */ (__pyx_v_indptr.data + __pyx_t_18 * __pyx_v_indptr.strides[0]) ))); __pyx_t_20 = __pyx_v_u; if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_v_indptr.shape[0]; for (__pyx_t_21 = (*((int *) ( /* dim=0 */ (__pyx_v_indptr.data + __pyx_t_20 * __pyx_v_indptr.strides[0]) ))); __pyx_t_21 < __pyx_t_19; __pyx_t_21+=1) { __pyx_v_index = __pyx_t_21; /* "implicit/_implicit.pyx":66 * * for index in range(indptr[u], indptr[u+1]): * i = indices[index] # <<<<<<<<<<<<<< * confidence = data[index] * */ __pyx_t_22 = __pyx_v_index; if (__pyx_t_22 < 0) __pyx_t_22 += __pyx_v_indices.shape[0]; __pyx_v_i = (*((int *) ( /* dim=0 */ (__pyx_v_indices.data + __pyx_t_22 * __pyx_v_indices.strides[0]) ))); /* "implicit/_implicit.pyx":67 * for index in range(indptr[u], indptr[u+1]): * i = indices[index] * confidence = data[index] # <<<<<<<<<<<<<< * * # b += Yi Cui Pui */ __pyx_t_23 = __pyx_v_index; if (__pyx_t_23 < 0) __pyx_t_23 += __pyx_v_data.shape[0]; __pyx_v_confidence = (*((double *) ( /* dim=0 */ (__pyx_v_data.data + __pyx_t_23 * __pyx_v_data.strides[0]) ))); /* "implicit/_implicit.pyx":71 * # b += Yi Cui Pui * # Pui is implicit, its defined to be 1 for non-zero entries * axpy(&factors, &confidence, &Y[i, 0], &one, b, &one) # <<<<<<<<<<<<<< * * # A += Yi^T Cui Yi */ __pyx_t_24 = __pyx_v_i; __pyx_t_25 = 0; if (__pyx_t_24 < 0) __pyx_t_24 += __pyx_v_Y.shape[0]; if (__pyx_t_25 < 0) __pyx_t_25 += __pyx_v_Y.shape[1]; __pyx_fuse_1__pyx_f_8implicit_9_implicit_axpy((&__pyx_v_factors), (&__pyx_v_confidence), (&(*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_Y.data + __pyx_t_24 * __pyx_v_Y.strides[0]) ) + __pyx_t_25 * __pyx_v_Y.strides[1]) )))), (&__pyx_v_one), __pyx_v_b, (&__pyx_v_one)); /* "implicit/_implicit.pyx":75 * # A += Yi^T Cui Yi * # Since we've already added in YtY, we subtract 1 from confidence * for j in range(factors): # <<<<<<<<<<<<<< * temp = (confidence - 1) * Y[i, j] * axpy(&factors, &temp, &Y[i, 0], &one, A + j * factors, &one) */ __pyx_t_26 = __pyx_v_factors; for (__pyx_t_27 = 0; __pyx_t_27 < __pyx_t_26; __pyx_t_27+=1) { __pyx_v_j = __pyx_t_27; /* "implicit/_implicit.pyx":76 * # Since we've already added in YtY, we subtract 1 from confidence * for j in range(factors): * temp = (confidence - 1) * Y[i, j] # <<<<<<<<<<<<<< * axpy(&factors, &temp, &Y[i, 0], &one, A + j * factors, &one) * */ __pyx_t_28 = __pyx_v_i; __pyx_t_29 = __pyx_v_j; if (__pyx_t_28 < 0) __pyx_t_28 += __pyx_v_Y.shape[0]; if (__pyx_t_29 < 0) __pyx_t_29 += __pyx_v_Y.shape[1]; __pyx_v_temp = ((__pyx_v_confidence - 1.0) * (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_Y.data + __pyx_t_28 * __pyx_v_Y.strides[0]) ) + __pyx_t_29 * __pyx_v_Y.strides[1]) )))); /* "implicit/_implicit.pyx":77 * for j in range(factors): * temp = (confidence - 1) * Y[i, j] * axpy(&factors, &temp, &Y[i, 0], &one, A + j * factors, &one) # <<<<<<<<<<<<<< * * posv("U", &factors, &one, A, &factors, b, &factors, &err); */ __pyx_t_30 = __pyx_v_i; __pyx_t_31 = 0; if (__pyx_t_30 < 0) __pyx_t_30 += __pyx_v_Y.shape[0]; if (__pyx_t_31 < 0) __pyx_t_31 += __pyx_v_Y.shape[1]; __pyx_fuse_1__pyx_f_8implicit_9_implicit_axpy((&__pyx_v_factors), (&__pyx_v_temp), (&(*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_Y.data + __pyx_t_30 * __pyx_v_Y.strides[0]) ) + __pyx_t_31 * __pyx_v_Y.strides[1]) )))), (&__pyx_v_one), (__pyx_v_A + (__pyx_v_j * __pyx_v_factors)), (&__pyx_v_one)); } } /* "implicit/_implicit.pyx":79 * axpy(&factors, &temp, &Y[i, 0], &one, A + j * factors, &one) * * posv("U", &factors, &one, A, &factors, b, &factors, &err); # <<<<<<<<<<<<<< * * # fall back to using a LU decomposition if this fails */ __pyx_fuse_1__pyx_f_8implicit_9_implicit_posv(((char *)"U"), (&__pyx_v_factors), (&__pyx_v_one), __pyx_v_A, (&__pyx_v_factors), __pyx_v_b, (&__pyx_v_factors), (&__pyx_v_err)); /* "implicit/_implicit.pyx":82 * * # fall back to using a LU decomposition if this fails * if err: # <<<<<<<<<<<<<< * gesv(&factors, &one, A, &factors, pivot, b, &factors, &err) * */ __pyx_t_32 = (__pyx_v_err != 0); if (__pyx_t_32) { /* "implicit/_implicit.pyx":83 * # fall back to using a LU decomposition if this fails * if err: * gesv(&factors, &one, A, &factors, pivot, b, &factors, &err) # <<<<<<<<<<<<<< * * if not err: */ __pyx_fuse_1__pyx_f_8implicit_9_implicit_gesv((&__pyx_v_factors), (&__pyx_v_one), __pyx_v_A, (&__pyx_v_factors), __pyx_v_pivot, __pyx_v_b, (&__pyx_v_factors), (&__pyx_v_err)); /* "implicit/_implicit.pyx":82 * * # fall back to using a LU decomposition if this fails * if err: # <<<<<<<<<<<<<< * gesv(&factors, &one, A, &factors, pivot, b, &factors, &err) * */ } /* "implicit/_implicit.pyx":85 * gesv(&factors, &one, A, &factors, pivot, b, &factors, &err) * * if not err: # <<<<<<<<<<<<<< * memcpy(&X[u, 0], b, sizeof(floating) * factors) * */ __pyx_t_32 = ((!(__pyx_v_err != 0)) != 0); if (__pyx_t_32) { /* "implicit/_implicit.pyx":86 * * if not err: * memcpy(&X[u, 0], b, sizeof(floating) * factors) # <<<<<<<<<<<<<< * * else: */ __pyx_t_33 = __pyx_v_u; __pyx_t_34 = 0; if (__pyx_t_33 < 0) __pyx_t_33 += __pyx_v_X.shape[0]; if (__pyx_t_34 < 0) __pyx_t_34 += __pyx_v_X.shape[1]; memcpy((&(*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_X.data + __pyx_t_33 * __pyx_v_X.strides[0]) ) + __pyx_t_34 * __pyx_v_X.strides[1]) )))), __pyx_v_b, ((sizeof(double)) * __pyx_v_factors)); /* "implicit/_implicit.pyx":85 * gesv(&factors, &one, A, &factors, pivot, b, &factors, &err) * * if not err: # <<<<<<<<<<<<<< * memcpy(&X[u, 0], b, sizeof(floating) * factors) * */ goto __pyx_L24; } /* "implicit/_implicit.pyx":89 * * else: * with gil: # <<<<<<<<<<<<<< * raise ValueError("Singular matrix (err=%i) on row %i" % (err, u)) * */ /*else*/ { { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif /*try:*/ { /* "implicit/_implicit.pyx":90 * else: * with gil: * raise ValueError("Singular matrix (err=%i) on row %i" % (err, u)) # <<<<<<<<<<<<<< * * finally: */ __pyx_t_7 = __Pyx_PyInt_From_int(__pyx_v_err); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 90, __pyx_L28_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_u); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 90, __pyx_L28_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 90, __pyx_L28_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_2); __pyx_t_7 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_Singular_matrix_err_i_on_row_i, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 90, __pyx_L28_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 90, __pyx_L28_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_1, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 90, __pyx_L28_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(0, 90, __pyx_L28_error) } /* "implicit/_implicit.pyx":89 * * else: * with gil: # <<<<<<<<<<<<<< * raise ValueError("Singular matrix (err=%i) on row %i" % (err, u)) * */ /*finally:*/ { __pyx_L28_error: { #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif goto __pyx_L17_error; } } } } __pyx_L24:; goto __pyx_L31; __pyx_L17_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L30; __pyx_L30:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates1) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_confidence; __pyx_parallel_temp1 = __pyx_v_i; __pyx_parallel_temp2 = __pyx_v_index; __pyx_parallel_temp3 = __pyx_v_j; __pyx_parallel_temp4 = __pyx_v_temp; __pyx_parallel_temp5 = __pyx_v_u; } __pyx_L31:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_confidence = __pyx_parallel_temp0; __pyx_v_i = __pyx_parallel_temp1; __pyx_v_index = __pyx_parallel_temp2; __pyx_v_j = __pyx_parallel_temp3; __pyx_v_temp = __pyx_parallel_temp4; __pyx_v_u = __pyx_parallel_temp5; switch (__pyx_parallel_why) { case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L13_error; } } } } /* "implicit/_implicit.pyx":93 * * finally: * free(A) # <<<<<<<<<<<<<< * free(b) * free(pivot) */ /*finally:*/ { /*normal exit:*/{ free(__pyx_v_A); /* "implicit/_implicit.pyx":94 * finally: * free(A) * free(b) # <<<<<<<<<<<<<< * free(pivot) */ free(__pyx_v_b); /* "implicit/_implicit.pyx":95 * free(A) * free(b) * free(pivot) # <<<<<<<<<<<<<< */ free(__pyx_v_pivot); goto __pyx_L14; } /*exception exit:*/{ __Pyx_PyThreadState_declare #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save; #endif __pyx_L13_error:; __pyx_t_36 = 0; __pyx_t_37 = 0; __pyx_t_38 = 0; __pyx_t_39 = 0; __pyx_t_40 = 0; __pyx_t_41 = 0; #ifdef WITH_THREAD __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_XDEC_MEMVIEW(&__pyx_t_5, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_4, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_11, 1); if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_39, &__pyx_t_40, &__pyx_t_41); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_36, &__pyx_t_37, &__pyx_t_38) < 0)) __Pyx_ErrFetch(&__pyx_t_36, &__pyx_t_37, &__pyx_t_38); __Pyx_XGOTREF(__pyx_t_36); __Pyx_XGOTREF(__pyx_t_37); __Pyx_XGOTREF(__pyx_t_38); __Pyx_XGOTREF(__pyx_t_39); __Pyx_XGOTREF(__pyx_t_40); __Pyx_XGOTREF(__pyx_t_41); __pyx_t_14 = __pyx_lineno; __pyx_t_13 = __pyx_clineno; __pyx_t_35 = __pyx_filename; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { /* "implicit/_implicit.pyx":93 * * finally: * free(A) # <<<<<<<<<<<<<< * free(b) * free(pivot) */ free(__pyx_v_A); /* "implicit/_implicit.pyx":94 * finally: * free(A) * free(b) # <<<<<<<<<<<<<< * free(pivot) */ free(__pyx_v_b); /* "implicit/_implicit.pyx":95 * free(A) * free(b) * free(pivot) # <<<<<<<<<<<<<< */ free(__pyx_v_pivot); } #ifdef WITH_THREAD __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_PyThreadState_assign if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_39); __Pyx_XGIVEREF(__pyx_t_40); __Pyx_XGIVEREF(__pyx_t_41); __Pyx_ExceptionReset(__pyx_t_39, __pyx_t_40, __pyx_t_41); } __Pyx_XGIVEREF(__pyx_t_36); __Pyx_XGIVEREF(__pyx_t_37); __Pyx_XGIVEREF(__pyx_t_38); __Pyx_ErrRestore(__pyx_t_36, __pyx_t_37, __pyx_t_38); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif __pyx_t_36 = 0; __pyx_t_37 = 0; __pyx_t_38 = 0; __pyx_t_39 = 0; __pyx_t_40 = 0; __pyx_t_41 = 0; __pyx_lineno = __pyx_t_14; __pyx_clineno = __pyx_t_13; __pyx_filename = __pyx_t_35; goto __pyx_L8_error; } __pyx_L14:; } goto __pyx_L37; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L37; __pyx_L37:; #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = NULL; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = NULL; __Pyx_XDECREF(__pyx_t_36); __pyx_t_36 = NULL; __Pyx_XDECREF(__pyx_t_37); __pyx_t_37 = NULL; __Pyx_XDECREF(__pyx_t_38); __pyx_t_38 = NULL; __Pyx_XDECREF(__pyx_t_39); __pyx_t_39 = NULL; __Pyx_XDECREF(__pyx_t_40); __pyx_t_40 = NULL; __Pyx_XDECREF(__pyx_t_41); __pyx_t_41 = NULL; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = NULL; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { switch (__pyx_parallel_why) { case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "implicit/_implicit.pyx":51 * cdef int * pivot * * with nogil, parallel(num_threads = num_threads): # <<<<<<<<<<<<<< * # allocate temp memory for each thread * A = <floating *> malloc(sizeof(floating) * factors * factors) */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "implicit/_implicit.pyx":33 * * @cython.boundscheck(False) * def least_squares(Cui, floating [:, :] X, floating [:, :] Y, double regularization, int num_threads): # <<<<<<<<<<<<<< * dtype = numpy.float64 if floating is double else numpy.float32 * */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __PYX_XDEC_MEMVIEW(&__pyx_t_4, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_5, 1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __PYX_XDEC_MEMVIEW(&__pyx_t_11, 1); __Pyx_AddTraceback("implicit._implicit.least_squares", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_dtype); __PYX_XDEC_MEMVIEW(&__pyx_v_indptr, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_indices, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_data, 1); __Pyx_XDECREF(__pyx_v_YtY); __PYX_XDEC_MEMVIEW(&__pyx_v_initialA, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_initialB, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_X, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_Y, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":120 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 120, __pyx_L3_error) } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 120, __pyx_L3_error) } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 120, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 120, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 121, __pyx_L3_error) } else { /* "View.MemoryView":121 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 120, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 120, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 120, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":120 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":127 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 127, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(1, 127, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":128 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":130 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":131 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 131, __pyx_L1_error) /* "View.MemoryView":130 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":133 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":134 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 134, __pyx_L1_error) /* "View.MemoryView":133 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":136 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":137 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":136 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":138 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 138, __pyx_L1_error) __pyx_t_5 = __pyx_v_format; __Pyx_INCREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":139 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ __pyx_t_6 = __Pyx_PyObject_AsString(__pyx_v_self->_format); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(1, 139, __pyx_L1_error) __pyx_v_self->format = __pyx_t_6; /* "View.MemoryView":142 * * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":143 * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":145 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":146 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 146, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 146, __pyx_L1_error) /* "View.MemoryView":145 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":149 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_7 = 0; __pyx_t_5 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_5); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_5)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_1); __Pyx_INCREF(__pyx_t_3); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 149, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_5, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 149, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __pyx_t_8 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_8 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 149, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_dim = __pyx_t_8; __pyx_v_idx = __pyx_t_7; __pyx_t_7 = (__pyx_t_7 + 1); /* "View.MemoryView":150 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (__pyx_t_4) { /* "View.MemoryView":151 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_9); __pyx_t_3 = 0; __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_10, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_Raise(__pyx_t_9, 0, 0, 0); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __PYX_ERR(1, 151, __pyx_L1_error) /* "View.MemoryView":150 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":152 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":149 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":155 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 155, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":156 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":157 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":155 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":158 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 158, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":159 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":160 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":158 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":162 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_5 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 162, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 162, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_9, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 162, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 162, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":164 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":167 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":168 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_5 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 168, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 168, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":169 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":172 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":173 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":174 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 174, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 174, __pyx_L1_error) /* "View.MemoryView":173 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":176 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":177 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":178 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 178, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 178, __pyx_L1_error) } __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_1; __pyx_t_8+=1) { __pyx_v_i = __pyx_t_8; /* "View.MemoryView":179 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":180 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":176 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":169 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":120 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":183 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "View.MemoryView":184 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":185 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 185, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":186 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":185 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":187 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":188 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":187 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":189 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":190 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 190, __pyx_L1_error) /* "View.MemoryView":189 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":191 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":192 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":193 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":194 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":195 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":196 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":197 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":198 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":200 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":201 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":200 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":203 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":205 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":183 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":209 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":210 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":211 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":210 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":212 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":213 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":214 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":213 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":216 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":212 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":217 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":209 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":220 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":221 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 221, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":220 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":224 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_memview", 0); /* "View.MemoryView":225 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":226 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":224 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":229 * * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":230 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 230, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 230, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":229 * * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":232 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":233 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 233, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 233, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":232 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":235 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":236 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 236, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 236, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":235 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":240 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":244 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":245 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 245, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 245, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 245, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 245, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 245, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":244 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":247 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":248 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 248, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 248, __pyx_L1_error) /* "View.MemoryView":247 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":249 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":251 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":240 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":277 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 277, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 277, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":278 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":277 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":279 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":280 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":279 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":294 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":296 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":300 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":302 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":303 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":302 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":305 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":294 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":341 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 341, __pyx_L3_error) } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 341, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 341, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 341, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 341, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":342 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":343 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":344 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":345 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 345, __pyx_L1_error) /* "View.MemoryView":346 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":347 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":348 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":346 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":344 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":351 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":352 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":353 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":351 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":354 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":355 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":356 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":357 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(1, 357, __pyx_L1_error) /* "View.MemoryView":356 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":354 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":359 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":360 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":359 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":362 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":364 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":366 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":341 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":368 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; PyThread_type_lock __pyx_t_5; PyThread_type_lock __pyx_t_6; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":369 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":370 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * * cdef int i */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":369 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ } /* "View.MemoryView":374 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":375 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":376 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":377 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":378 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":380 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_5 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":379 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_5; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_6; /* "View.MemoryView":378 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":381 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":376 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":383 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":374 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":368 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":385 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":387 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":389 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 389, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 389, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 389, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 389, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":390 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 390, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == NULL)) __PYX_ERR(1, 390, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":389 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":392 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":385 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":395 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":396 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":397 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":396 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":399 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 399, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 399, __pyx_L1_error) } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 399, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 399, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 399, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":402 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 402, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":403 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":402 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":405 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == NULL)) __PYX_ERR(1, 405, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":406 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 406, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":395 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":408 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * have_slices, index = _unellipsify(index, self.view.ndim) * */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":409 * * def __setitem__(memoryview self, object index, object value): * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_1 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 409, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (likely(__pyx_t_1 != Py_None)) { PyObject* sequence = __pyx_t_1; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 409, __pyx_L1_error) } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); #else __pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 409, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 409, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 409, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_2; __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":411 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 411, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":412 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 412, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_obj = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":413 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 413, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":414 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_1, __pyx_v_obj); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":413 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L4; } /* "View.MemoryView":416 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_3 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 416, __pyx_L1_error) __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_3), __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_L4:; /* "View.MemoryView":411 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L3; } /* "View.MemoryView":418 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_L3:; /* "View.MemoryView":408 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * have_slices, index = _unellipsify(index, self.view.ndim) * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":420 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":421 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":422 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":423 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int((__pyx_v_self->flags | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 423, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":424 * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 424, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":423 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 423, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 423, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":422 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L11_try_end; __pyx_L4_error:; __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":425 * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 425, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":426 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":422 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L11_try_end:; } /* "View.MemoryView":421 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":428 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":420 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":430 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":434 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 434, __pyx_L1_error) /* "View.MemoryView":435 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 435, __pyx_L1_error) /* "View.MemoryView":436 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 436, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 436, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 436, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 436, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":434 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 434, __pyx_L1_error) /* "View.MemoryView":430 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":438 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; char const *__pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":440 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":445 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); /* "View.MemoryView":447 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_1) { /* "View.MemoryView":448 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":449 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_1 = ((__pyx_v_tmp == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":450 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(1, 450, __pyx_L1_error) /* "View.MemoryView":449 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":451 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":447 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":453 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":455 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":456 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":457 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":456 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":459 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 459, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L8:; /* "View.MemoryView":463 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":464 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_2 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 464, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":463 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":465 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":468 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } /*exception exit:*/{ __Pyx_PyThreadState_declare __pyx_L6_error:; __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8) < 0)) __Pyx_ErrFetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __pyx_t_3 = __pyx_lineno; __pyx_t_4 = __pyx_clineno; __pyx_t_5 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } __Pyx_PyThreadState_assign if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11); } __Pyx_XGIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_ErrRestore(__pyx_t_6, __pyx_t_7, __pyx_t_8); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_4; __pyx_filename = __pyx_t_5; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":438 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":470 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":471 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == NULL)) __PYX_ERR(1, 471, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":472 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 472, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":470 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":474 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; int __pyx_t_12; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":477 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 477, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":480 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 480, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":481 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":482 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 482, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 482, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 482, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 482, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":481 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":486 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":487 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 487, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":486 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":488 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":483 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 483, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_12 = __Pyx_PyErr_ExceptionMatches(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_12) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9) < 0) __PYX_ERR(1, 483, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_9); /* "View.MemoryView":484 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 484, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 484, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":481 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":474 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":490 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; Py_ssize_t __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; char *__pyx_t_10; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":493 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":498 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":499 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 499, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":498 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":501 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 501, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":503 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_7 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(1, 503, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_9 = __pyx_v_bytesvalue; __pyx_t_11 = PyBytes_AS_STRING(__pyx_t_9); __pyx_t_12 = (__pyx_t_11 + PyBytes_GET_SIZE(__pyx_t_9)); for (__pyx_t_13 = __pyx_t_11; __pyx_t_13 < __pyx_t_12; __pyx_t_13++) { __pyx_t_10 = __pyx_t_13; __pyx_v_c = (__pyx_t_10[0]); /* "View.MemoryView":504 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_7; /* "View.MemoryView":503 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_7 = (__pyx_t_7 + 1); /* "View.MemoryView":504 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "View.MemoryView":490 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":507 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_STRIDES: * info.shape = self.view.shape */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; Py_ssize_t *__pyx_t_2; char *__pyx_t_3; void *__pyx_t_4; int __pyx_t_5; Py_ssize_t __pyx_t_6; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "View.MemoryView":508 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":509 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_2 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_2; /* "View.MemoryView":508 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L3; } /* "View.MemoryView":511 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L3:; /* "View.MemoryView":513 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":514 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_2 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_2; /* "View.MemoryView":513 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L4; } /* "View.MemoryView":516 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L4:; /* "View.MemoryView":518 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":519 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_2 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_2; /* "View.MemoryView":518 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L5; } /* "View.MemoryView":521 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L5:; /* "View.MemoryView":523 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":524 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_3 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_3; /* "View.MemoryView":523 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L6; } /* "View.MemoryView":526 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L6:; /* "View.MemoryView":528 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_4 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":529 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_5 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_5; /* "View.MemoryView":530 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = 0 */ __pyx_t_6 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_6; /* "View.MemoryView":531 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = 0 * info.obj = self */ __pyx_t_6 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_6; /* "View.MemoryView":532 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = 0 # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_v_info->readonly = 0; /* "View.MemoryView":533 * info.len = self.view.len * info.readonly = 0 * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":507 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_STRIDES: * info.shape = self.view.shape */ /* function exit code */ __pyx_r = 0; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":539 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":540 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 540, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 540, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":541 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == 0)) __PYX_ERR(1, 541, __pyx_L1_error) /* "View.MemoryView":542 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":539 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":545 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":546 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":545 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":549 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":550 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 550, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":549 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":553 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":554 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":556 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 556, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 556, __pyx_L1_error) /* "View.MemoryView":554 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":558 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 558, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 558, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 558, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 558, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":553 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":561 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":562 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":563 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 563, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__15, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 563, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":562 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":565 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":561 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":568 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":569 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 569, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":568 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":572 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":573 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 573, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":572 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":576 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":577 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":576 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":580 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":581 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":582 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":584 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 584, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":585 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 585, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":587 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":581 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":589 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":580 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":591 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":592 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":593 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":592 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":595 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":591 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":597 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":598 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":599 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_self)); __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_id, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":598 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":597 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":601 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":602 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 602, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 602, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 602, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 602, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 602, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":601 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":605 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":608 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":609 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 609, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":605 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":611 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":614 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":615 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 615, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":611 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":617 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":619 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":621 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":622 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 622, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":627 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 627, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":617 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":629 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":631 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":633 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":634 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 634, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":639 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 639, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":629 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":643 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":644 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 644, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 644, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 644, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 644, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":645 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":646 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":643 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":649 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":650 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":649 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":652 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":657 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":658 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":657 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":660 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":662 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 662, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":663 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":664 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":665 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 665, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 665, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 665, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 665, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":666 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":667 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":668 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == -1)) __PYX_ERR(1, 668, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 668, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__16); __Pyx_GIVEREF(__pyx_slice__16); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__16); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(1, 668, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":669 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":667 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":671 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__17); if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(1, 671, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":672 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":666 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":674 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":675 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_Format(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = PyTuple_New(1); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_t_11, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_Raise(__pyx_t_7, 0, 0, 0); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __PYX_ERR(1, 675, __pyx_L1_error) /* "View.MemoryView":674 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":677 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":678 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(1, 678, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":665 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":680 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == -1)) __PYX_ERR(1, 680, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":681 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":682 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__18); __Pyx_GIVEREF(__pyx_slice__18); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__18); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":681 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":684 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L0; /* "View.MemoryView":652 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":686 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":687 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":688 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_4) { /* "View.MemoryView":689 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 689, __pyx_L1_error) /* "View.MemoryView":688 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":686 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":696 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":697 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":704 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))); /* "View.MemoryView":708 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(1, 708, __pyx_L1_error) } } #endif /* "View.MemoryView":710 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":711 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 711, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":712 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":710 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":714 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":715 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":721 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":722 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":727 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":728 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":732 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 732, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 732, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 732, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 732, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 732, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 732, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 732, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":733 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":737 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 737, __pyx_L1_error) /* "View.MemoryView":734 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == -1)) __PYX_ERR(1, 734, __pyx_L1_error) /* "View.MemoryView":733 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":740 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":741 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":742 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":743 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":744 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":740 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":746 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 746, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":747 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 747, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 747, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 747, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":748 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 748, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 748, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 748, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":750 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 750, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":751 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 751, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":752 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 752, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":754 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == -1)) __PYX_ERR(1, 754, __pyx_L1_error) /* "View.MemoryView":760 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":732 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":762 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":763 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":764 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 764, __pyx_L1_error) } /* "View.MemoryView":765 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 765, __pyx_L1_error) } /* "View.MemoryView":763 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 763, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 763, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":762 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":768 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":769 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 768, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":768 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 768, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":696 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":793 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":813 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":815 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":816 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":815 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":817 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":818 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) __PYX_ERR(1, 818, __pyx_L1_error) /* "View.MemoryView":817 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":813 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":821 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":823 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":824 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) __PYX_ERR(1, 824, __pyx_L1_error) /* "View.MemoryView":823 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":827 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":828 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":829 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":830 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":831 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":830 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":828 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":832 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":833 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":834 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":833 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":836 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":832 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":827 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":838 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":839 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":838 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":841 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":843 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":844 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":845 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":846 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":847 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":846 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":844 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":848 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":849 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":848 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":843 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":851 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":852 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":851 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":854 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":856 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":857 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":856 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":861 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":863 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":864 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":863 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":866 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":867 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":866 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":870 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":871 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":872 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":875 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":876 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":875 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":878 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":880 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":881 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":882 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":883 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":882 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":885 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":886 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) __PYX_ERR(1, 885, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":881 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":888 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":880 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":890 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":793 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":896 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":898 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":899 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":902 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":903 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 903, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 903, __pyx_L1_error) } __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); /* "View.MemoryView":904 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":902 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":906 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":907 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":908 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":909 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":908 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":911 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":912 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":913 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":914 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 914, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 914, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 914, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 914, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 914, __pyx_L1_error) /* "View.MemoryView":913 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":911 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":916 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":917 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 917, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 917, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 917, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 917, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 917, __pyx_L1_error) /* "View.MemoryView":916 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":919 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":920 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":921 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":920 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":923 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":896 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":929 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; /* "View.MemoryView":930 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":932 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":933 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":937 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_3; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":938 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":939 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_4 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_5 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_4; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":940 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_5 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_4 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_5; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_4; /* "View.MemoryView":942 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L6_bool_binop_done; } __pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L6_bool_binop_done:; if (__pyx_t_6) { /* "View.MemoryView":943 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_8 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_8 == -1)) __PYX_ERR(1, 943, __pyx_L1_error) /* "View.MemoryView":942 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":945 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":929 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":962 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":963 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":962 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":965 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":966 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":967 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 967, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":966 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":969 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 969, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":965 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":971 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":972 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":973 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == 0)) __PYX_ERR(1, 973, __pyx_L1_error) /* "View.MemoryView":972 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":975 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 975, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":971 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":978 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":979 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":978 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":985 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":993 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":994 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; /* "View.MemoryView":993 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":999 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 999, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 999, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 999, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1001 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1002 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1004 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1004, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1005 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1007 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1008 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1009 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1010 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1011 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * result.flags = PyBUF_RECORDS */ Py_INCREF(Py_None); /* "View.MemoryView":1013 * Py_INCREF(Py_None) * * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1015 * result.flags = PyBUF_RECORDS * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1016 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1019 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1020 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1021 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1022 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1023 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L5_break; /* "View.MemoryView":1021 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L5_break:; /* "View.MemoryView":1025 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1026 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1026, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1027 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1027, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1027, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1027, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1029 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1030 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1032 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":985 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1035 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1038 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1039 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1039, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1040 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1038 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1042 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1043 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1035 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 0, 0); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1046 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1050 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1051 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1052 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1054 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1055 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1057 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_dim = __pyx_t_3; /* "View.MemoryView":1058 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1059 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1060 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_4 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_4 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_4; } /* "View.MemoryView":1046 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1066 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1067 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1067, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1070 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1077 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1078 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1079 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1077 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1081 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1082 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1084 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1086 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1070 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1092 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1093 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1094 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1093 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1096 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1092 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1099 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1104 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1105 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1107 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1L; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1108 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1109 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1110 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1108 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1112 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_1; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1113 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1114 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1115 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1113 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1117 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1118 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1117 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1120 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1099 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1123 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; /* "View.MemoryView":1130 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1131 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1132 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1133 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1135 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1136 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1137 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1136 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1138 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)); /* "View.MemoryView":1136 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1140 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1141 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize); /* "View.MemoryView":1142 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1143 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1135 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1145 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1146 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1150 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1151 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1123 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1153 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1156 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1153 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1160 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1163 * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i * cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1165 * cdef Py_ssize_t size = src.memview.view.itemsize * * for i in range(ndim): # <<<<<<<<<<<<<< * size *= src.shape[i] * */ __pyx_t_2 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1166 * * for i in range(ndim): * size *= src.shape[i] # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i])); } /* "View.MemoryView":1168 * size *= src.shape[i] * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1160 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1171 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1180 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1181 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ __pyx_t_2 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_idx = __pyx_t_3; /* "View.MemoryView":1182 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1183 * for idx in range(ndim): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1180 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1185 * stride = stride * shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1L; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1186 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1187 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1189 * stride = stride * shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1171 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1192 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; /* "View.MemoryView":1203 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1204 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1206 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1207 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1208 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == -1)) __PYX_ERR(1, 1208, __pyx_L1_error) /* "View.MemoryView":1207 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1211 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1212 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1213 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1214 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1215 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1217 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ __pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order); /* "View.MemoryView":1221 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1222 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1223 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1222 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1225 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1226 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size); /* "View.MemoryView":1225 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1228 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1230 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1192 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1235 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1238 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1238, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1238, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1238, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1238, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1237 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 1237, __pyx_L1_error) /* "View.MemoryView":1235 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1241 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1242 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1242, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1242, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1242, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (!__pyx_t_2) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1242, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); } else { __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1242, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1242, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 1242, __pyx_L1_error) /* "View.MemoryView":1241 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1245 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1246 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":1247 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } if (!__pyx_t_5) { __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1247, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 1247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 1247, __pyx_L1_error) /* "View.MemoryView":1246 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1249 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(1, 1249, __pyx_L1_error) } /* "View.MemoryView":1245 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1252 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; void *__pyx_t_6; int __pyx_t_7; /* "View.MemoryView":1260 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1261 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1263 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1264 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1265 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1268 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1269 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1268 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1270 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1271 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1270 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1273 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1275 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_5; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1276 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1277 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1278 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1279 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1277 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1281 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_4 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 1281, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1276 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1283 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1284 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_4 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 1284, __pyx_L1_error) /* "View.MemoryView":1283 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1286 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1288 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1289 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1288 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1291 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_6 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_6 == NULL)) __PYX_ERR(1, 1291, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_6; /* "View.MemoryView":1292 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1286 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1294 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1297 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1298 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1297 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1299 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1300 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1299 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1302 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1304 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1305 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)); /* "View.MemoryView":1306 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1307 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1308 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1302 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1294 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1310 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_7 = (__pyx_t_2 != 0); if (__pyx_t_7) { /* "View.MemoryView":1313 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == 0)) __PYX_ERR(1, 1313, __pyx_L1_error) /* "View.MemoryView":1314 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == 0)) __PYX_ERR(1, 1314, __pyx_L1_error) /* "View.MemoryView":1310 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1316 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1317 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1318 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1320 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1321 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1252 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1324 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; /* "View.MemoryView":1328 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1330 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1L; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1331 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1332 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1333 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1335 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "View.MemoryView":1336 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1337 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1338 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1324 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1346 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1350 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1351 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1350 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1346 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1355 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1358 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1355 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1361 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; int __pyx_t_3; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1365 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "View.MemoryView":1366 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_3 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_3) { /* "View.MemoryView":1367 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_3 = (__pyx_v_inc != 0); if (__pyx_t_3) { /* "View.MemoryView":1368 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1367 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1370 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1366 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1372 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1373 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1375 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1361 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1381 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1384 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1385 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1387 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1381 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1391 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; /* "View.MemoryView":1395 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1396 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1398 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1399 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1400 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize); /* "View.MemoryView":1401 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1398 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1403 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1404 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1406 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1391 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) { Py_DECREF(o); o = 0; } return o; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_array___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { 0, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { 0, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "implicit._implicit.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "implicit._implicit.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) { Py_DECREF(o); o = 0; } return o; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryview___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "implicit._implicit.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryviewslice___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "implicit._implicit._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { #if PY_VERSION_HEX < 0x03020000 { PyObject_HEAD_INIT(NULL) NULL, 0, NULL }, #else PyModuleDef_HEAD_INIT, #endif "_implicit", 0, /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_s_, __pyx_k_, sizeof(__pyx_k_), 0, 0, 1, 0}, {&__pyx_n_s_A, __pyx_k_A, sizeof(__pyx_k_A), 0, 0, 1, 1}, {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_n_s_AttributeError, __pyx_k_AttributeError, sizeof(__pyx_k_AttributeError), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_Cui, __pyx_k_Cui, sizeof(__pyx_k_Cui), 0, 0, 1, 1}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_kp_s_Expected_at_least_d_arguments, __pyx_k_Expected_at_least_d_arguments, sizeof(__pyx_k_Expected_at_least_d_arguments), 0, 0, 1, 0}, {&__pyx_kp_s_Function_call_with_ambiguous_arg, __pyx_k_Function_call_with_ambiguous_arg, sizeof(__pyx_k_Function_call_with_ambiguous_arg), 0, 0, 1, 0}, {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_kp_s_No_matching_signature_found, __pyx_k_No_matching_signature_found, sizeof(__pyx_k_No_matching_signature_found), 0, 0, 1, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_kp_s_Singular_matrix_err_i_on_row_i, __pyx_k_Singular_matrix_err_i_on_row_i, sizeof(__pyx_k_Singular_matrix_err_i_on_row_i), 0, 0, 1, 0}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_X, __pyx_k_X, sizeof(__pyx_k_X), 0, 0, 1, 1}, {&__pyx_n_s_Y, __pyx_k_Y, sizeof(__pyx_k_Y), 0, 0, 1, 1}, {&__pyx_n_s_YtY, __pyx_k_YtY, sizeof(__pyx_k_YtY), 0, 0, 1, 1}, {&__pyx_kp_s__3, __pyx_k__3, sizeof(__pyx_k__3), 0, 0, 1, 0}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_args, __pyx_k_args, sizeof(__pyx_k_args), 0, 0, 1, 1}, {&__pyx_n_s_b, __pyx_k_b, sizeof(__pyx_k_b), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_confidence, __pyx_k_confidence, sizeof(__pyx_k_confidence), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_data, __pyx_k_data, sizeof(__pyx_k_data), 0, 0, 1, 1}, {&__pyx_n_s_defaults, __pyx_k_defaults, sizeof(__pyx_k_defaults), 0, 0, 1, 1}, {&__pyx_n_s_dot, __pyx_k_dot, sizeof(__pyx_k_dot), 0, 0, 1, 1}, {&__pyx_n_s_double, __pyx_k_double, sizeof(__pyx_k_double), 0, 0, 1, 1}, {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_err, __pyx_k_err, sizeof(__pyx_k_err), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_eye, __pyx_k_eye, sizeof(__pyx_k_eye), 0, 0, 1, 1}, {&__pyx_n_s_factors, __pyx_k_factors, sizeof(__pyx_k_factors), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_float, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1}, {&__pyx_n_s_float32, __pyx_k_float32, sizeof(__pyx_k_float32), 0, 0, 1, 1}, {&__pyx_n_s_float64, __pyx_k_float64, sizeof(__pyx_k_float64), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_kp_s_home_escherba_dev_vevo_tyrion_e, __pyx_k_home_escherba_dev_vevo_tyrion_e, sizeof(__pyx_k_home_escherba_dev_vevo_tyrion_e), 0, 0, 1, 0}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_implicit__implicit, __pyx_k_implicit__implicit, sizeof(__pyx_k_implicit__implicit), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_index, __pyx_k_index, sizeof(__pyx_k_index), 0, 0, 1, 1}, {&__pyx_n_s_indices, __pyx_k_indices, sizeof(__pyx_k_indices), 0, 0, 1, 1}, {&__pyx_n_s_indptr, __pyx_k_indptr, sizeof(__pyx_k_indptr), 0, 0, 1, 1}, {&__pyx_n_s_initialA, __pyx_k_initialA, sizeof(__pyx_k_initialA), 0, 0, 1, 1}, {&__pyx_n_s_initialB, __pyx_k_initialB, sizeof(__pyx_k_initialB), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, {&__pyx_n_s_kind, __pyx_k_kind, sizeof(__pyx_k_kind), 0, 0, 1, 1}, {&__pyx_n_s_kwargs, __pyx_k_kwargs, sizeof(__pyx_k_kwargs), 0, 0, 1, 1}, {&__pyx_n_s_least_squares, __pyx_k_least_squares, sizeof(__pyx_k_least_squares), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndarray, __pyx_k_ndarray, sizeof(__pyx_k_ndarray), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_num_threads, __pyx_k_num_threads, sizeof(__pyx_k_num_threads), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_one, __pyx_k_one, sizeof(__pyx_k_one), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_pivot, __pyx_k_pivot, sizeof(__pyx_k_pivot), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_regularization, __pyx_k_regularization, sizeof(__pyx_k_regularization), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_signatures, __pyx_k_signatures, sizeof(__pyx_k_signatures), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_split, __pyx_k_split, sizeof(__pyx_k_split), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_strip, __pyx_k_strip, sizeof(__pyx_k_strip), 0, 0, 1, 1}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_temp, __pyx_k_temp, sizeof(__pyx_k_temp), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_transpose, __pyx_k_transpose, sizeof(__pyx_k_transpose), 0, 0, 1, 1}, {&__pyx_n_s_u, __pyx_k_u, sizeof(__pyx_k_u), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_users, __pyx_k_users, sizeof(__pyx_k_users), 0, 0, 1, 1}, {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, {&__pyx_n_s_zip, __pyx_k_zip, sizeof(__pyx_k_zip), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(0, 33, __pyx_L1_error) __pyx_builtin_AttributeError = __Pyx_GetBuiltinName(__pyx_n_s_AttributeError); if (!__pyx_builtin_AttributeError) __PYX_ERR(0, 33, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(0, 33, __pyx_L1_error) __pyx_builtin_zip = __Pyx_GetBuiltinName(__pyx_n_s_zip); if (!__pyx_builtin_zip) __PYX_ERR(0, 33, __pyx_L1_error) __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 65, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 90, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 146, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 149, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 396, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 599, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 818, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "implicit/_implicit.pyx":33 * * @cython.boundscheck(False) * def least_squares(Cui, floating [:, :] X, floating [:, :] Y, double regularization, int num_threads): # <<<<<<<<<<<<<< * dtype = numpy.float64 if floating is double else numpy.float32 * */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s__3); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_No_matching_signature_found); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Function_call_with_ambiguous_arg); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "View.MemoryView":131 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 131, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "View.MemoryView":134 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":137 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_n_s_ASCII); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":146 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 146, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":174 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 174, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "View.MemoryView":190 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_GIVEREF(__pyx_tuple__12); /* "View.MemoryView":484 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 484, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "View.MemoryView":556 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 556, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); /* "View.MemoryView":563 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__15 = PyTuple_New(1); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 563, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__15); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__15, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__15); /* "View.MemoryView":668 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__16 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__16)) __PYX_ERR(1, 668, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__16); __Pyx_GIVEREF(__pyx_slice__16); /* "View.MemoryView":671 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ __pyx_slice__17 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__17)) __PYX_ERR(1, 671, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__17); __Pyx_GIVEREF(__pyx_slice__17); /* "View.MemoryView":682 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_slice__18 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__18)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__18); __Pyx_GIVEREF(__pyx_slice__18); /* "View.MemoryView":689 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); /* "implicit/_implicit.pyx":33 * * @cython.boundscheck(False) * def least_squares(Cui, floating [:, :] X, floating [:, :] Y, double regularization, int num_threads): # <<<<<<<<<<<<<< * dtype = numpy.float64 if floating is double else numpy.float32 * */ __pyx_tuple__20 = PyTuple_Pack(25, __pyx_n_s_Cui, __pyx_n_s_X, __pyx_n_s_Y, __pyx_n_s_regularization, __pyx_n_s_num_threads, __pyx_n_s_dtype, __pyx_n_s_indptr, __pyx_n_s_indices, __pyx_n_s_data, __pyx_n_s_users, __pyx_n_s_factors, __pyx_n_s_u, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_index, __pyx_n_s_err, __pyx_n_s_one, __pyx_n_s_confidence, __pyx_n_s_temp, __pyx_n_s_YtY, __pyx_n_s_initialA, __pyx_n_s_initialB, __pyx_n_s_A, __pyx_n_s_b, __pyx_n_s_pivot); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__20); __Pyx_GIVEREF(__pyx_tuple__20); __pyx_codeobj__21 = (PyObject*)__Pyx_PyCode_New(5, 0, 25, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__20, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_escherba_dev_vevo_tyrion_e, __pyx_n_s_least_squares, 33, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__21)) __PYX_ERR(0, 33, __pyx_L1_error) /* "View.MemoryView":282 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 282, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__22); __Pyx_GIVEREF(__pyx_tuple__22); /* "View.MemoryView":283 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 283, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); /* "View.MemoryView":284 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 284, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__24); __Pyx_GIVEREF(__pyx_tuple__24); /* "View.MemoryView":287 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); /* "View.MemoryView":288 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__26 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__26); __Pyx_GIVEREF(__pyx_tuple__26); __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC init_implicit(void); /*proto*/ PyMODINIT_FUNC init_implicit(void) #else PyMODINIT_FUNC PyInit__implicit(void); /*proto*/ PyMODINIT_FUNC PyInit__implicit(void) #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; static PyThread_type_lock __pyx_t_6[8]; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit__implicit(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("_implicit", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_implicit___implicit) { if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "implicit._implicit")) { if (unlikely(PyDict_SetItemString(modules, "implicit._implicit", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 103, __pyx_L1_error) __pyx_type___pyx_array.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 103, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 275, __pyx_L1_error) __pyx_type___pyx_MemviewEnum.tp_print = 0; __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 326, __pyx_L1_error) __pyx_type___pyx_memoryview.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 326, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 951, __pyx_L1_error) __pyx_type___pyx_memoryviewslice.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 951, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; /*--- Type import code ---*/ /*--- Variable import code ---*/ /*--- Function import code ---*/ __pyx_t_1 = __Pyx_ImportModule("scipy.linalg.cython_lapack"); if (!__pyx_t_1) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_1, "dgesv", (void (**)(void))&__pyx_f_5scipy_6linalg_13cython_lapack_dgesv, "void (int *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_d *, int *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_d *, int *, int *)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_1, "dposv", (void (**)(void))&__pyx_f_5scipy_6linalg_13cython_lapack_dposv, "void (char *, int *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_d *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_d *, int *, int *)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_1, "sgesv", (void (**)(void))&__pyx_f_5scipy_6linalg_13cython_lapack_sgesv, "void (int *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_s *, int *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_s *, int *, int *)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_1, "sposv", (void (**)(void))&__pyx_f_5scipy_6linalg_13cython_lapack_sposv, "void (char *, int *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_s *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_s *, int *, int *)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) Py_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_2 = __Pyx_ImportModule("scipy.linalg.cython_blas"); if (!__pyx_t_2) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_2, "daxpy", (void (**)(void))&__pyx_f_5scipy_6linalg_11cython_blas_daxpy, "void (int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_2, "saxpy", (void (**)(void))&__pyx_f_5scipy_6linalg_11cython_blas_saxpy, "void (int *, __pyx_t_5scipy_6linalg_11cython_blas_s *, __pyx_t_5scipy_6linalg_11cython_blas_s *, int *, __pyx_t_5scipy_6linalg_11cython_blas_s *, int *)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) Py_DECREF(__pyx_t_2); __pyx_t_2 = 0; /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "implicit/_implicit.pyx":1 * import numpy # <<<<<<<<<<<<<< * import cython * from cython cimport floating */ __pyx_t_3 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_d, __pyx_n_s_numpy, __pyx_t_3) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "implicit/_implicit.pyx":33 * * @cython.boundscheck(False) * def least_squares(Cui, floating [:, :] X, floating [:, :] Y, double regularization, int num_threads): # <<<<<<<<<<<<<< * dtype = numpy.float64 if floating is double else numpy.float32 * */ __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __pyx_FusedFunction_NewEx(&__pyx_fuse_0__pyx_mdef_8implicit_9_implicit_3least_squares, 0, __pyx_n_s_least_squares, NULL, __pyx_n_s_implicit__implicit, __pyx_d, ((PyObject *)__pyx_codeobj__21)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_4, __pyx_empty_tuple); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_float, __pyx_t_4) < 0) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __pyx_FusedFunction_NewEx(&__pyx_fuse_1__pyx_mdef_8implicit_9_implicit_5least_squares, 0, __pyx_n_s_least_squares, NULL, __pyx_n_s_implicit__implicit, __pyx_d, ((PyObject *)__pyx_codeobj__21)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_4, __pyx_empty_tuple); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_double, __pyx_t_4) < 0) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __pyx_FusedFunction_NewEx(&__pyx_mdef_8implicit_9_implicit_1least_squares, 0, __pyx_n_s_least_squares, NULL, __pyx_n_s_implicit__implicit, __pyx_d, ((PyObject *)__pyx_codeobj__21)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_4, __pyx_empty_tuple); ((__pyx_FusedFunctionObject *) __pyx_t_4)->__signatures__ = __pyx_t_3; __Pyx_GIVEREF(__pyx_t_3); if (PyDict_SetItem(__pyx_d, __pyx_n_s_least_squares, __pyx_t_4) < 0) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "implicit/_implicit.pyx":1 * import numpy # <<<<<<<<<<<<<< * import cython * from cython cimport floating */ __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_5) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":207 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_5 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 207, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_5) < 0) __PYX_ERR(1, 207, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":282 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 282, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":283 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 283, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":284 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 284, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":287 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":288 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__26, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":312 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":313 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_6[0] = PyThread_allocate_lock(); __pyx_t_6[1] = PyThread_allocate_lock(); __pyx_t_6[2] = PyThread_allocate_lock(); __pyx_t_6[3] = PyThread_allocate_lock(); __pyx_t_6[4] = PyThread_allocate_lock(); __pyx_t_6[5] = PyThread_allocate_lock(); __pyx_t_6[6] = PyThread_allocate_lock(); __pyx_t_6[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_6, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":535 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 535, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_5) < 0) __PYX_ERR(1, 535, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":981 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 981, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_5) < 0) __PYX_ERR(1, 981, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "View.MemoryView":1391 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init implicit._implicit", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init implicit._implicit"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_VERSION_HEX < 0x03030000 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); #endif if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_VERSION_HEX < 0x03030000 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* SaveResetException */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* PyErrExceptionMatches */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; return PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetException */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { #endif PyObject *local_type, *local_value, *local_tb; #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_COMPILING_IN_CPYTHON tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyErrFetchRestore */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } #if PY_VERSION_HEX >= 0x03030000 if (cause) { #else if (cause && cause != Py_None) { #endif PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* UnicodeAsUCS4 */ static CYTHON_INLINE Py_UCS4 __Pyx_PyUnicode_AsPy_UCS4(PyObject* x) { Py_ssize_t length; #if CYTHON_PEP393_ENABLED length = PyUnicode_GET_LENGTH(x); if (likely(length == 1)) { return PyUnicode_READ_CHAR(x, 0); } #else length = PyUnicode_GET_SIZE(x); if (likely(length == 1)) { return PyUnicode_AS_UNICODE(x)[0]; } #if Py_UNICODE_SIZE == 2 else if (PyUnicode_GET_SIZE(x) == 2) { Py_UCS4 high_val = PyUnicode_AS_UNICODE(x)[0]; if (high_val >= 0xD800 && high_val <= 0xDBFF) { Py_UCS4 low_val = PyUnicode_AS_UNICODE(x)[1]; if (low_val >= 0xDC00 && low_val <= 0xDFFF) { return 0x10000 + (((high_val & ((1<<10)-1)) << 10) | (low_val & ((1<<10)-1))); } } } #endif #endif PyErr_Format(PyExc_ValueError, "only single character unicode strings can be converted to Py_UCS4, " "got length %" CYTHON_FORMAT_SSIZE_T "d", length); return (Py_UCS4)-1; } /* object_ord */ static long __Pyx__PyObject_Ord(PyObject* c) { Py_ssize_t size; if (PyBytes_Check(c)) { size = PyBytes_GET_SIZE(c); if (likely(size == 1)) { return (unsigned char) PyBytes_AS_STRING(c)[0]; } #if PY_MAJOR_VERSION < 3 } else if (PyUnicode_Check(c)) { return (long)__Pyx_PyUnicode_AsPy_UCS4(c); #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) } else if (PyByteArray_Check(c)) { size = PyByteArray_GET_SIZE(c); if (likely(size == 1)) { return (unsigned char) PyByteArray_AS_STRING(c)[0]; } #endif } else { PyErr_Format(PyExc_TypeError, "ord() expected string of length 1, but %.200s found", c->ob_type->tp_name); return (long)(Py_UCS4)-1; } PyErr_Format(PyExc_TypeError, "ord() expected a character, but string of length %zd found", size); return (long)(Py_UCS4)-1; } /* SetItemInt */ static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { int r; if (!j) return -1; r = PyObject_SetItem(o, j, v); Py_DECREF(j); return r; } static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = (!wraparound) ? i : ((likely(i >= 0)) ? i : i + PyList_GET_SIZE(o)); if ((!boundscheck) || likely((n >= 0) & (n < PyList_GET_SIZE(o)))) { PyObject* old = PyList_GET_ITEM(o, n); Py_INCREF(v); PyList_SET_ITEM(o, n, v); Py_DECREF(old); return 1; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_ass_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return -1; PyErr_Clear(); } } return m->sq_ass_item(o, i, v); } } #else #if CYTHON_COMPILING_IN_PYPY if (is_list || (PySequence_Check(o) && !PyDict_Check(o))) { #else if (is_list || PySequence_Check(o)) { #endif return PySequence_SetItem(o, i, v); } #endif return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v); } /* IterFinish */ static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(exc_type == PyExc_StopIteration) || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallNoArg */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { return __Pyx_PyObject_CallMethO(func, NULL); } } return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* PyObjectCallMethod0 */ static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name) { PyObject *method, *result = NULL; method = __Pyx_PyObject_GetAttrStr(obj, method_name); if (unlikely(!method)) goto bad; #if CYTHON_COMPILING_IN_CPYTHON if (likely(PyMethod_Check(method))) { PyObject *self = PyMethod_GET_SELF(method); if (likely(self)) { PyObject *function = PyMethod_GET_FUNCTION(method); result = __Pyx_PyObject_CallOneArg(function, self); Py_DECREF(method); return result; } } #endif result = __Pyx_PyObject_CallNoArg(method); Py_DECREF(method); bad: return result; } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* UnpackItemEndCheck */ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* UnpackTupleError */ static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) { if (t == Py_None) { __Pyx_RaiseNoneNotIterableError(); } else if (PyTuple_GET_SIZE(t) < index) { __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t)); } else { __Pyx_RaiseTooManyValuesError(index); } } /* UnpackTuple2 */ static CYTHON_INLINE int __Pyx_unpack_tuple2(PyObject* tuple, PyObject** pvalue1, PyObject** pvalue2, int is_tuple, int has_known_size, int decref_tuple) { Py_ssize_t index; PyObject *value1 = NULL, *value2 = NULL, *iter = NULL; if (!is_tuple && unlikely(!PyTuple_Check(tuple))) { iternextfunc iternext; iter = PyObject_GetIter(tuple); if (unlikely(!iter)) goto bad; if (decref_tuple) { Py_DECREF(tuple); tuple = NULL; } iternext = Py_TYPE(iter)->tp_iternext; value1 = iternext(iter); if (unlikely(!value1)) { index = 0; goto unpacking_failed; } value2 = iternext(iter); if (unlikely(!value2)) { index = 1; goto unpacking_failed; } if (!has_known_size && unlikely(__Pyx_IternextUnpackEndCheck(iternext(iter), 2))) goto bad; Py_DECREF(iter); } else { if (!has_known_size && unlikely(PyTuple_GET_SIZE(tuple) != 2)) { __Pyx_UnpackTupleError(tuple, 2); goto bad; } #if CYTHON_COMPILING_IN_PYPY value1 = PySequence_ITEM(tuple, 0); if (unlikely(!value1)) goto bad; value2 = PySequence_ITEM(tuple, 1); if (unlikely(!value2)) goto bad; #else value1 = PyTuple_GET_ITEM(tuple, 0); value2 = PyTuple_GET_ITEM(tuple, 1); Py_INCREF(value1); Py_INCREF(value2); #endif if (decref_tuple) { Py_DECREF(tuple); } } *pvalue1 = value1; *pvalue2 = value2; return 0; unpacking_failed: if (!has_known_size && __Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); bad: Py_XDECREF(iter); Py_XDECREF(value1); Py_XDECREF(value2); if (decref_tuple) { Py_XDECREF(tuple); } return -1; } /* dict_iter */ static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* iterable, int is_dict, PyObject* method_name, Py_ssize_t* p_orig_length, int* p_source_is_dict) { is_dict = is_dict || likely(PyDict_CheckExact(iterable)); *p_source_is_dict = is_dict; #if !CYTHON_COMPILING_IN_PYPY if (is_dict) { *p_orig_length = PyDict_Size(iterable); Py_INCREF(iterable); return iterable; } #endif *p_orig_length = 0; if (method_name) { PyObject* iter; iterable = __Pyx_PyObject_CallMethod0(iterable, method_name); if (!iterable) return NULL; #if !CYTHON_COMPILING_IN_PYPY if (PyTuple_CheckExact(iterable) || PyList_CheckExact(iterable)) return iterable; #endif iter = PyObject_GetIter(iterable); Py_DECREF(iterable); return iter; } return PyObject_GetIter(iterable); } static CYTHON_INLINE int __Pyx_dict_iter_next( PyObject* iter_obj, CYTHON_NCP_UNUSED Py_ssize_t orig_length, CYTHON_NCP_UNUSED Py_ssize_t* ppos, PyObject** pkey, PyObject** pvalue, PyObject** pitem, int source_is_dict) { PyObject* next_item; #if !CYTHON_COMPILING_IN_PYPY if (source_is_dict) { PyObject *key, *value; if (unlikely(orig_length != PyDict_Size(iter_obj))) { PyErr_SetString(PyExc_RuntimeError, "dictionary changed size during iteration"); return -1; } if (unlikely(!PyDict_Next(iter_obj, ppos, &key, &value))) { return 0; } if (pitem) { PyObject* tuple = PyTuple_New(2); if (unlikely(!tuple)) { return -1; } Py_INCREF(key); Py_INCREF(value); PyTuple_SET_ITEM(tuple, 0, key); PyTuple_SET_ITEM(tuple, 1, value); *pitem = tuple; } else { if (pkey) { Py_INCREF(key); *pkey = key; } if (pvalue) { Py_INCREF(value); *pvalue = value; } } return 1; } else if (PyTuple_CheckExact(iter_obj)) { Py_ssize_t pos = *ppos; if (unlikely(pos >= PyTuple_GET_SIZE(iter_obj))) return 0; *ppos = pos + 1; next_item = PyTuple_GET_ITEM(iter_obj, pos); Py_INCREF(next_item); } else if (PyList_CheckExact(iter_obj)) { Py_ssize_t pos = *ppos; if (unlikely(pos >= PyList_GET_SIZE(iter_obj))) return 0; *ppos = pos + 1; next_item = PyList_GET_ITEM(iter_obj, pos); Py_INCREF(next_item); } else #endif { next_item = PyIter_Next(iter_obj); if (unlikely(!next_item)) { return __Pyx_IterFinish(); } } if (pitem) { *pitem = next_item; } else if (pkey && pvalue) { if (__Pyx_unpack_tuple2(next_item, pkey, pvalue, source_is_dict, source_is_dict, 1)) return -1; } else if (pkey) { *pkey = next_item; } else { *pvalue = next_item; } return 1; } /* GetModuleGlobalName */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON result = PyDict_GetItem(__pyx_d, name); if (likely(result)) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } /* SwapException */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* BufferFormatCheck */ static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static CYTHON_INLINE PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (!buf) { PyErr_SetString(PyExc_ValueError, "buf is NULL."); goto fail; } else if (memviewslice->memview || memviewslice->data) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } static CYTHON_INLINE void __pyx_fatalerror(const char *fmt, ...) { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); Py_FatalError(msg); va_end(vargs); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview || (PyObject *) memview == Py_None) return; if (__pyx_get_slice_count(memview) < 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (first_time) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview ) { return; } else if ((PyObject *) memview == Py_None) { memslice->memview = NULL; return; } if (__pyx_get_slice_count(memview) <= 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (last_time) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* ArgTypeTest */ static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); } static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (likely(Py_TYPE(obj) == type)) return 1; #if PY_MAJOR_VERSION == 2 else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(PyObject_TypeCheck(obj, type))) return 1; } __Pyx_RaiseArgumentTypeInvalid(name, obj, type); return 0; } /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* None */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { Py_ssize_t q = a / b; Py_ssize_t r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_COMPILING_IN_CPYTHON #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } length = stop - start; if (unlikely(length <= 0)) return PyUnicode_FromUnicode(NULL, 0); cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* GetItemInt */ static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (wraparound & unlikely(i < 0)) i += PyList_GET_SIZE(o); if ((!boundscheck) || likely((0 <= i) & (i < PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (wraparound & unlikely(i < 0)) i += PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* PyIntBinop */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) { #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS && PY_MAJOR_VERSION >= 3 if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; } case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; } case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; } case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; } case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; } case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; } default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); long_long: llx = lla + llb; return PyLong_FromLongLong(llx); } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* None */ static CYTHON_INLINE long __Pyx_div_long(long a, long b) { long q = a / b; long r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* WriteUnraisableException */ static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback, CYTHON_UNUSED int nogil) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_PyThreadState_declare #ifdef WITH_THREAD PyGILState_STATE state; if (nogil) state = PyGILState_Ensure(); #ifdef _MSC_VER else state = (PyGILState_STATE)-1; #endif #endif __Pyx_PyThreadState_assign __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } #ifdef WITH_THREAD if (nogil) PyGILState_Release(state); #endif } /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* FetchCommonType */ static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) { PyObject* fake_module; PyTypeObject* cached_type = NULL; fake_module = PyImport_AddModule((char*) "_cython_" CYTHON_ABI); if (!fake_module) return NULL; Py_INCREF(fake_module); cached_type = (PyTypeObject*) PyObject_GetAttrString(fake_module, type->tp_name); if (cached_type) { if (!PyType_Check((PyObject*)cached_type)) { PyErr_Format(PyExc_TypeError, "Shared Cython type %.200s is not a type object", type->tp_name); goto bad; } if (cached_type->tp_basicsize != type->tp_basicsize) { PyErr_Format(PyExc_TypeError, "Shared Cython type %.200s has the wrong size, try recompiling", type->tp_name); goto bad; } } else { if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; PyErr_Clear(); if (PyType_Ready(type) < 0) goto bad; if (PyObject_SetAttrString(fake_module, type->tp_name, (PyObject*) type) < 0) goto bad; Py_INCREF(type); cached_type = type; } done: Py_DECREF(fake_module); return cached_type; bad: Py_XDECREF(cached_type); cached_type = NULL; goto done; } /* CythonFunction */ static PyObject * __Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *closure) { if (unlikely(op->func_doc == NULL)) { if (op->func.m_ml->ml_doc) { #if PY_MAJOR_VERSION >= 3 op->func_doc = PyUnicode_FromString(op->func.m_ml->ml_doc); #else op->func_doc = PyString_FromString(op->func.m_ml->ml_doc); #endif if (unlikely(op->func_doc == NULL)) return NULL; } else { Py_INCREF(Py_None); return Py_None; } } Py_INCREF(op->func_doc); return op->func_doc; } static int __Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value) { PyObject *tmp = op->func_doc; if (value == NULL) { value = Py_None; } Py_INCREF(value); op->func_doc = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op) { if (unlikely(op->func_name == NULL)) { #if PY_MAJOR_VERSION >= 3 op->func_name = PyUnicode_InternFromString(op->func.m_ml->ml_name); #else op->func_name = PyString_InternFromString(op->func.m_ml->ml_name); #endif if (unlikely(op->func_name == NULL)) return NULL; } Py_INCREF(op->func_name); return op->func_name; } static int __Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value) { PyObject *tmp; #if PY_MAJOR_VERSION >= 3 if (unlikely(value == NULL || !PyUnicode_Check(value))) { #else if (unlikely(value == NULL || !PyString_Check(value))) { #endif PyErr_SetString(PyExc_TypeError, "__name__ must be set to a string object"); return -1; } tmp = op->func_name; Py_INCREF(value); op->func_name = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op) { Py_INCREF(op->func_qualname); return op->func_qualname; } static int __Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value) { PyObject *tmp; #if PY_MAJOR_VERSION >= 3 if (unlikely(value == NULL || !PyUnicode_Check(value))) { #else if (unlikely(value == NULL || !PyString_Check(value))) { #endif PyErr_SetString(PyExc_TypeError, "__qualname__ must be set to a string object"); return -1; } tmp = op->func_qualname; Py_INCREF(value); op->func_qualname = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_self(__pyx_CyFunctionObject *m, CYTHON_UNUSED void *closure) { PyObject *self; self = m->func_closure; if (self == NULL) self = Py_None; Py_INCREF(self); return self; } static PyObject * __Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op) { if (unlikely(op->func_dict == NULL)) { op->func_dict = PyDict_New(); if (unlikely(op->func_dict == NULL)) return NULL; } Py_INCREF(op->func_dict); return op->func_dict; } static int __Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value) { PyObject *tmp; if (unlikely(value == NULL)) { PyErr_SetString(PyExc_TypeError, "function's dictionary may not be deleted"); return -1; } if (unlikely(!PyDict_Check(value))) { PyErr_SetString(PyExc_TypeError, "setting function's dictionary to a non-dict"); return -1; } tmp = op->func_dict; Py_INCREF(value); op->func_dict = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op) { Py_INCREF(op->func_globals); return op->func_globals; } static PyObject * __Pyx_CyFunction_get_closure(CYTHON_UNUSED __pyx_CyFunctionObject *op) { Py_INCREF(Py_None); return Py_None; } static PyObject * __Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op) { PyObject* result = (op->func_code) ? op->func_code : Py_None; Py_INCREF(result); return result; } static int __Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) { int result = 0; PyObject *res = op->defaults_getter((PyObject *) op); if (unlikely(!res)) return -1; #if CYTHON_COMPILING_IN_CPYTHON op->defaults_tuple = PyTuple_GET_ITEM(res, 0); Py_INCREF(op->defaults_tuple); op->defaults_kwdict = PyTuple_GET_ITEM(res, 1); Py_INCREF(op->defaults_kwdict); #else op->defaults_tuple = PySequence_ITEM(res, 0); if (unlikely(!op->defaults_tuple)) result = -1; else { op->defaults_kwdict = PySequence_ITEM(res, 1); if (unlikely(!op->defaults_kwdict)) result = -1; } #endif Py_DECREF(res); return result; } static int __Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value) { PyObject* tmp; if (!value) { value = Py_None; } else if (value != Py_None && !PyTuple_Check(value)) { PyErr_SetString(PyExc_TypeError, "__defaults__ must be set to a tuple object"); return -1; } Py_INCREF(value); tmp = op->defaults_tuple; op->defaults_tuple = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op) { PyObject* result = op->defaults_tuple; if (unlikely(!result)) { if (op->defaults_getter) { if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL; result = op->defaults_tuple; } else { result = Py_None; } } Py_INCREF(result); return result; } static int __Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value) { PyObject* tmp; if (!value) { value = Py_None; } else if (value != Py_None && !PyDict_Check(value)) { PyErr_SetString(PyExc_TypeError, "__kwdefaults__ must be set to a dict object"); return -1; } Py_INCREF(value); tmp = op->defaults_kwdict; op->defaults_kwdict = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op) { PyObject* result = op->defaults_kwdict; if (unlikely(!result)) { if (op->defaults_getter) { if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL; result = op->defaults_kwdict; } else { result = Py_None; } } Py_INCREF(result); return result; } static int __Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value) { PyObject* tmp; if (!value || value == Py_None) { value = NULL; } else if (!PyDict_Check(value)) { PyErr_SetString(PyExc_TypeError, "__annotations__ must be set to a dict object"); return -1; } Py_XINCREF(value); tmp = op->func_annotations; op->func_annotations = value; Py_XDECREF(tmp); return 0; } static PyObject * __Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op) { PyObject* result = op->func_annotations; if (unlikely(!result)) { result = PyDict_New(); if (unlikely(!result)) return NULL; op->func_annotations = result; } Py_INCREF(result); return result; } static PyGetSetDef __pyx_CyFunction_getsets[] = { {(char *) "func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, {(char *) "__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, {(char *) "func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, {(char *) "__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, {(char *) "__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0}, {(char *) "__self__", (getter)__Pyx_CyFunction_get_self, 0, 0, 0}, {(char *) "func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, {(char *) "__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, {(char *) "func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, {(char *) "__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, {(char *) "func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, {(char *) "__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, {(char *) "func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, {(char *) "__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, {(char *) "func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, {(char *) "__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, {(char *) "__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0}, {(char *) "__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0}, {0, 0, 0, 0, 0} }; static PyMemberDef __pyx_CyFunction_members[] = { {(char *) "__module__", T_OBJECT, offsetof(__pyx_CyFunctionObject, func.m_module), PY_WRITE_RESTRICTED, 0}, {0, 0, 0, 0, 0} }; static PyObject * __Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, CYTHON_UNUSED PyObject *args) { #if PY_MAJOR_VERSION >= 3 return PyUnicode_FromString(m->func.m_ml->ml_name); #else return PyString_FromString(m->func.m_ml->ml_name); #endif } static PyMethodDef __pyx_CyFunction_methods[] = { {"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0}, {0, 0, 0, 0} }; #if PY_VERSION_HEX < 0x030500A0 #define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist) #else #define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func.m_weakreflist) #endif static PyObject *__Pyx_CyFunction_New(PyTypeObject *type, PyMethodDef *ml, int flags, PyObject* qualname, PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { __pyx_CyFunctionObject *op = PyObject_GC_New(__pyx_CyFunctionObject, type); if (op == NULL) return NULL; op->flags = flags; __Pyx_CyFunction_weakreflist(op) = NULL; op->func.m_ml = ml; op->func.m_self = (PyObject *) op; Py_XINCREF(closure); op->func_closure = closure; Py_XINCREF(module); op->func.m_module = module; op->func_dict = NULL; op->func_name = NULL; Py_INCREF(qualname); op->func_qualname = qualname; op->func_doc = NULL; op->func_classobj = NULL; op->func_globals = globals; Py_INCREF(op->func_globals); Py_XINCREF(code); op->func_code = code; op->defaults_pyobjects = 0; op->defaults = NULL; op->defaults_tuple = NULL; op->defaults_kwdict = NULL; op->defaults_getter = NULL; op->func_annotations = NULL; PyObject_GC_Track(op); return (PyObject *) op; } static int __Pyx_CyFunction_clear(__pyx_CyFunctionObject *m) { Py_CLEAR(m->func_closure); Py_CLEAR(m->func.m_module); Py_CLEAR(m->func_dict); Py_CLEAR(m->func_name); Py_CLEAR(m->func_qualname); Py_CLEAR(m->func_doc); Py_CLEAR(m->func_globals); Py_CLEAR(m->func_code); Py_CLEAR(m->func_classobj); Py_CLEAR(m->defaults_tuple); Py_CLEAR(m->defaults_kwdict); Py_CLEAR(m->func_annotations); if (m->defaults) { PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); int i; for (i = 0; i < m->defaults_pyobjects; i++) Py_XDECREF(pydefaults[i]); PyObject_Free(m->defaults); m->defaults = NULL; } return 0; } static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m) { PyObject_GC_UnTrack(m); if (__Pyx_CyFunction_weakreflist(m) != NULL) PyObject_ClearWeakRefs((PyObject *) m); __Pyx_CyFunction_clear(m); PyObject_GC_Del(m); } static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg) { Py_VISIT(m->func_closure); Py_VISIT(m->func.m_module); Py_VISIT(m->func_dict); Py_VISIT(m->func_name); Py_VISIT(m->func_qualname); Py_VISIT(m->func_doc); Py_VISIT(m->func_globals); Py_VISIT(m->func_code); Py_VISIT(m->func_classobj); Py_VISIT(m->defaults_tuple); Py_VISIT(m->defaults_kwdict); if (m->defaults) { PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); int i; for (i = 0; i < m->defaults_pyobjects; i++) Py_VISIT(pydefaults[i]); } return 0; } static PyObject *__Pyx_CyFunction_descr_get(PyObject *func, PyObject *obj, PyObject *type) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; if (m->flags & __Pyx_CYFUNCTION_STATICMETHOD) { Py_INCREF(func); return func; } if (m->flags & __Pyx_CYFUNCTION_CLASSMETHOD) { if (type == NULL) type = (PyObject *)(Py_TYPE(obj)); return __Pyx_PyMethod_New(func, type, (PyObject *)(Py_TYPE(type))); } if (obj == Py_None) obj = NULL; return __Pyx_PyMethod_New(func, obj, type); } static PyObject* __Pyx_CyFunction_repr(__pyx_CyFunctionObject *op) { #if PY_MAJOR_VERSION >= 3 return PyUnicode_FromFormat("<cyfunction %U at %p>", op->func_qualname, (void *)op); #else return PyString_FromFormat("<cyfunction %s at %p>", PyString_AsString(op->func_qualname), (void *)op); #endif } #if CYTHON_COMPILING_IN_PYPY static PyObject * __Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyCFunctionObject* f = (PyCFunctionObject*)func; PyCFunction meth = f->m_ml->ml_meth; PyObject *self = f->m_self; Py_ssize_t size; switch (f->m_ml->ml_flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) { case METH_VARARGS: if (likely(kw == NULL || PyDict_Size(kw) == 0)) return (*meth)(self, arg); break; case METH_VARARGS | METH_KEYWORDS: return (*(PyCFunctionWithKeywords)meth)(self, arg, kw); case METH_NOARGS: if (likely(kw == NULL || PyDict_Size(kw) == 0)) { size = PyTuple_GET_SIZE(arg); if (likely(size == 0)) return (*meth)(self, NULL); PyErr_Format(PyExc_TypeError, "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", f->m_ml->ml_name, size); return NULL; } break; case METH_O: if (likely(kw == NULL || PyDict_Size(kw) == 0)) { size = PyTuple_GET_SIZE(arg); if (likely(size == 1)) { PyObject *result, *arg0 = PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL; result = (*meth)(self, arg0); Py_DECREF(arg0); return result; } PyErr_Format(PyExc_TypeError, "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", f->m_ml->ml_name, size); return NULL; } break; default: PyErr_SetString(PyExc_SystemError, "Bad call flags in " "__Pyx_CyFunction_Call. METH_OLDARGS is no " "longer supported!"); return NULL; } PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments", f->m_ml->ml_name); return NULL; } #else static PyObject * __Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) { return PyCFunction_Call(func, arg, kw); } #endif static PyTypeObject __pyx_CyFunctionType_type = { PyVarObject_HEAD_INIT(0, 0) "cython_function_or_method", sizeof(__pyx_CyFunctionObject), 0, (destructor) __Pyx_CyFunction_dealloc, 0, 0, 0, #if PY_MAJOR_VERSION < 3 0, #else 0, #endif (reprfunc) __Pyx_CyFunction_repr, 0, 0, 0, 0, __Pyx_CyFunction_Call, 0, 0, 0, 0, Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, 0, (traverseproc) __Pyx_CyFunction_traverse, (inquiry) __Pyx_CyFunction_clear, 0, #if PY_VERSION_HEX < 0x030500A0 offsetof(__pyx_CyFunctionObject, func_weakreflist), #else offsetof(PyCFunctionObject, m_weakreflist), #endif 0, 0, __pyx_CyFunction_methods, __pyx_CyFunction_members, __pyx_CyFunction_getsets, 0, 0, __Pyx_CyFunction_descr_get, 0, offsetof(__pyx_CyFunctionObject, func_dict), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, #if PY_VERSION_HEX >= 0x030400a1 0, #endif }; static int __pyx_CyFunction_init(void) { #if !CYTHON_COMPILING_IN_PYPY __pyx_CyFunctionType_type.tp_call = PyCFunction_Call; #endif __pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type); if (__pyx_CyFunctionType == NULL) { return -1; } return 0; } static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->defaults = PyObject_Malloc(size); if (!m->defaults) return PyErr_NoMemory(); memset(m->defaults, 0, size); m->defaults_pyobjects = pyobjects; return m->defaults; } static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->defaults_tuple = tuple; Py_INCREF(tuple); } static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->defaults_kwdict = dict; Py_INCREF(dict); } static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; m->func_annotations = dict; Py_INCREF(dict); } /* FusedFunction */ static PyObject * __pyx_FusedFunction_New(PyTypeObject *type, PyMethodDef *ml, int flags, PyObject *qualname, PyObject *self, PyObject *module, PyObject *globals, PyObject *code) { __pyx_FusedFunctionObject *fusedfunc = (__pyx_FusedFunctionObject *) __Pyx_CyFunction_New(type, ml, flags, qualname, self, module, globals, code); if (!fusedfunc) return NULL; fusedfunc->__signatures__ = NULL; fusedfunc->type = NULL; fusedfunc->self = NULL; return (PyObject *) fusedfunc; } static void __pyx_FusedFunction_dealloc(__pyx_FusedFunctionObject *self) { __pyx_FusedFunction_clear(self); __pyx_FusedFunctionType->tp_free((PyObject *) self); } static int __pyx_FusedFunction_traverse(__pyx_FusedFunctionObject *self, visitproc visit, void *arg) { Py_VISIT(self->self); Py_VISIT(self->type); Py_VISIT(self->__signatures__); return __Pyx_CyFunction_traverse((__pyx_CyFunctionObject *) self, visit, arg); } static int __pyx_FusedFunction_clear(__pyx_FusedFunctionObject *self) { Py_CLEAR(self->self); Py_CLEAR(self->type); Py_CLEAR(self->__signatures__); return __Pyx_CyFunction_clear((__pyx_CyFunctionObject *) self); } static PyObject * __pyx_FusedFunction_descr_get(PyObject *self, PyObject *obj, PyObject *type) { __pyx_FusedFunctionObject *func, *meth; func = (__pyx_FusedFunctionObject *) self; if (func->self || func->func.flags & __Pyx_CYFUNCTION_STATICMETHOD) { Py_INCREF(self); return self; } if (obj == Py_None) obj = NULL; meth = (__pyx_FusedFunctionObject *) __pyx_FusedFunction_NewEx( ((PyCFunctionObject *) func)->m_ml, ((__pyx_CyFunctionObject *) func)->flags, ((__pyx_CyFunctionObject *) func)->func_qualname, ((__pyx_CyFunctionObject *) func)->func_closure, ((PyCFunctionObject *) func)->m_module, ((__pyx_CyFunctionObject *) func)->func_globals, ((__pyx_CyFunctionObject *) func)->func_code); if (!meth) return NULL; Py_XINCREF(func->func.func_classobj); meth->func.func_classobj = func->func.func_classobj; Py_XINCREF(func->__signatures__); meth->__signatures__ = func->__signatures__; Py_XINCREF(type); meth->type = type; Py_XINCREF(func->func.defaults_tuple); meth->func.defaults_tuple = func->func.defaults_tuple; if (func->func.flags & __Pyx_CYFUNCTION_CLASSMETHOD) obj = type; Py_XINCREF(obj); meth->self = obj; return (PyObject *) meth; } static PyObject * _obj_to_str(PyObject *obj) { if (PyType_Check(obj)) return PyObject_GetAttr(obj, __pyx_n_s_name_2); else return PyObject_Str(obj); } static PyObject * __pyx_FusedFunction_getitem(__pyx_FusedFunctionObject *self, PyObject *idx) { PyObject *signature = NULL; PyObject *unbound_result_func; PyObject *result_func = NULL; if (self->__signatures__ == NULL) { PyErr_SetString(PyExc_TypeError, "Function is not fused"); return NULL; } if (PyTuple_Check(idx)) { PyObject *list = PyList_New(0); Py_ssize_t n = PyTuple_GET_SIZE(idx); PyObject *string = NULL; PyObject *sep = NULL; int i; if (!list) return NULL; for (i = 0; i < n; i++) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *item = PyTuple_GET_ITEM(idx, i); #else PyObject *item = PySequence_ITEM(idx, i); #endif string = _obj_to_str(item); #if !CYTHON_COMPILING_IN_CPYTHON Py_DECREF(item); #endif if (!string || PyList_Append(list, string) < 0) goto __pyx_err; Py_DECREF(string); } sep = PyUnicode_FromString("|"); if (sep) signature = PyUnicode_Join(sep, list); __pyx_err: ; Py_DECREF(list); Py_XDECREF(sep); } else { signature = _obj_to_str(idx); } if (!signature) return NULL; unbound_result_func = PyObject_GetItem(self->__signatures__, signature); if (unbound_result_func) { if (self->self || self->type) { __pyx_FusedFunctionObject *unbound = (__pyx_FusedFunctionObject *) unbound_result_func; Py_CLEAR(unbound->func.func_classobj); Py_XINCREF(self->func.func_classobj); unbound->func.func_classobj = self->func.func_classobj; result_func = __pyx_FusedFunction_descr_get(unbound_result_func, self->self, self->type); } else { result_func = unbound_result_func; Py_INCREF(result_func); } } Py_DECREF(signature); Py_XDECREF(unbound_result_func); return result_func; } static PyObject * __pyx_FusedFunction_callfunction(PyObject *func, PyObject *args, PyObject *kw) { __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func; PyObject *result; int static_specialized = (cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD && !((__pyx_FusedFunctionObject *) func)->__signatures__); if (cyfunc->flags & __Pyx_CYFUNCTION_CCLASS && !static_specialized) { Py_ssize_t argc; PyObject *new_args; PyObject *self; PyObject *m_self; argc = PyTuple_GET_SIZE(args); new_args = PyTuple_GetSlice(args, 1, argc); if (!new_args) return NULL; self = PyTuple_GetItem(args, 0); if (!self) return NULL; m_self = cyfunc->func.m_self; cyfunc->func.m_self = self; result = __Pyx_CyFunction_Call(func, new_args, kw); cyfunc->func.m_self = m_self; Py_DECREF(new_args); } else { result = __Pyx_CyFunction_Call(func, args, kw); } return result; } static PyObject * __pyx_FusedFunction_call(PyObject *func, PyObject *args, PyObject *kw) { __pyx_FusedFunctionObject *binding_func = (__pyx_FusedFunctionObject *) func; Py_ssize_t argc = PyTuple_GET_SIZE(args); PyObject *new_args = NULL; __pyx_FusedFunctionObject *new_func = NULL; PyObject *result = NULL; PyObject *self = NULL; int is_staticmethod = binding_func->func.flags & __Pyx_CYFUNCTION_STATICMETHOD; int is_classmethod = binding_func->func.flags & __Pyx_CYFUNCTION_CLASSMETHOD; if (binding_func->self) { Py_ssize_t i; new_args = PyTuple_New(argc + 1); if (!new_args) return NULL; self = binding_func->self; #if !CYTHON_COMPILING_IN_CPYTHON Py_INCREF(self); #endif Py_INCREF(self); PyTuple_SET_ITEM(new_args, 0, self); for (i = 0; i < argc; i++) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *item = PyTuple_GET_ITEM(args, i); Py_INCREF(item); #else PyObject *item = PySequence_ITEM(args, i); if (unlikely(!item)) goto bad; #endif PyTuple_SET_ITEM(new_args, i + 1, item); } args = new_args; } else if (binding_func->type) { if (argc < 1) { PyErr_SetString(PyExc_TypeError, "Need at least one argument, 0 given."); return NULL; } #if CYTHON_COMPILING_IN_CPYTHON self = PyTuple_GET_ITEM(args, 0); #else self = PySequence_ITEM(args, 0); if (unlikely(!self)) return NULL; #endif } if (self && !is_classmethod && !is_staticmethod) { int is_instance = PyObject_IsInstance(self, binding_func->type); if (unlikely(!is_instance)) { PyErr_Format(PyExc_TypeError, "First argument should be of type %.200s, got %.200s.", ((PyTypeObject *) binding_func->type)->tp_name, self->ob_type->tp_name); goto bad; } else if (unlikely(is_instance == -1)) { goto bad; } } #if !CYTHON_COMPILING_IN_CPYTHON Py_XDECREF(self); self = NULL; #endif if (binding_func->__signatures__) { PyObject *tup = PyTuple_Pack(4, binding_func->__signatures__, args, kw == NULL ? Py_None : kw, binding_func->func.defaults_tuple); if (!tup) goto bad; new_func = (__pyx_FusedFunctionObject *) __pyx_FusedFunction_callfunction(func, tup, NULL); Py_DECREF(tup); if (!new_func) goto bad; Py_XINCREF(binding_func->func.func_classobj); Py_CLEAR(new_func->func.func_classobj); new_func->func.func_classobj = binding_func->func.func_classobj; func = (PyObject *) new_func; } result = __pyx_FusedFunction_callfunction(func, args, kw); bad: #if !CYTHON_COMPILING_IN_CPYTHON Py_XDECREF(self); #endif Py_XDECREF(new_args); Py_XDECREF((PyObject *) new_func); return result; } static PyMemberDef __pyx_FusedFunction_members[] = { {(char *) "__signatures__", T_OBJECT, offsetof(__pyx_FusedFunctionObject, __signatures__), READONLY, 0}, {0, 0, 0, 0, 0}, }; static PyMappingMethods __pyx_FusedFunction_mapping_methods = { 0, (binaryfunc) __pyx_FusedFunction_getitem, 0, }; static PyTypeObject __pyx_FusedFunctionType_type = { PyVarObject_HEAD_INIT(0, 0) "fused_cython_function", sizeof(__pyx_FusedFunctionObject), 0, (destructor) __pyx_FusedFunction_dealloc, 0, 0, 0, #if PY_MAJOR_VERSION < 3 0, #else 0, #endif 0, 0, 0, &__pyx_FusedFunction_mapping_methods, 0, (ternaryfunc) __pyx_FusedFunction_call, 0, 0, 0, 0, Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, 0, (traverseproc) __pyx_FusedFunction_traverse, (inquiry) __pyx_FusedFunction_clear, 0, 0, 0, 0, 0, __pyx_FusedFunction_members, __pyx_CyFunction_getsets, &__pyx_CyFunctionType_type, 0, __pyx_FusedFunction_descr_get, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, #if PY_VERSION_HEX >= 0x030400a1 0, #endif }; static int __pyx_FusedFunction_init(void) { __pyx_FusedFunctionType = __Pyx_FetchCommonType(&__pyx_FusedFunctionType_type); if (__pyx_FusedFunctionType == NULL) { return -1; } return 0; } /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (PyObject_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (PyObject_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } Py_DECREF(obj); view->obj = NULL; } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (buf->strides[dim] != sizeof(void *)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (buf->strides[dim] != buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (stride < buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (spec & (__Pyx_MEMVIEW_PTR)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (buf->suboffsets) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (buf->suboffsets && buf->suboffsets[dim] >= 0) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (!buf->suboffsets || (buf->suboffsets && buf->suboffsets[dim] < 0)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (buf->ndim != ndim) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned) buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (!__pyx_check_strides(buf, i, ndim, spec)) goto fail; if (!__pyx_check_suboffsets(buf, i, ndim, spec)) goto fail; } if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_float(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 2, &__Pyx_TypeInfo_float, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* BytesContains */ static CYTHON_INLINE int __Pyx_BytesContains(PyObject* bytes, char character) { const Py_ssize_t length = PyBytes_GET_SIZE(bytes); char* char_start = PyBytes_AS_STRING(bytes); char* pos; for (pos=char_start; pos < char_start+length; pos++) { if (character == pos[0]) return 1; } return 0; } /* MemviewDtypeToObject */ static CYTHON_INLINE PyObject *__pyx_memview_get_float(const char *itemp) { return (PyObject *) PyFloat_FromDouble(*(float *) itemp); } static CYTHON_INLINE int __pyx_memview_set_float(const char *itemp, PyObject *obj) { float value = __pyx_PyFloat_AsFloat(obj); if ((value == (float)-1) && PyErr_Occurred()) return 0; *(float *) itemp = value; return 1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* MemviewDtypeToObject */ static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp) { return (PyObject *) PyFloat_FromDouble(*(double *) itemp); } static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj) { double value = __pyx_PyFloat_AsDouble(obj); if ((value == (double)-1) && PyErr_Occurred()) return 0; *(double *) itemp = value; return 1; } /* None */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif /* None */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif /* None */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif /* None */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (from_mvs->suboffsets[i] >= 0) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) -1, const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_int(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 1, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_double(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 1, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_float(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 1, &__Pyx_TypeInfo_float, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* ModuleImport */ #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif /* FunctionImport */ #ifndef __PYX_HAVE_RT_ImportFunction #define __PYX_HAVE_RT_ImportFunction static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig) { PyObject *d = 0; PyObject *cobj = 0; union { void (*fp)(void); void *p; } tmp; d = PyObject_GetAttrString(module, (char *)"__pyx_capi__"); if (!d) goto bad; cobj = PyDict_GetItemString(d, funcname); if (!cobj) { PyErr_Format(PyExc_ImportError, "%.200s does not export expected C function %.200s", PyModule_GetName(module), funcname); goto bad; } #if PY_VERSION_HEX >= 0x02070000 if (!PyCapsule_IsValid(cobj, sig)) { PyErr_Format(PyExc_TypeError, "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj)); goto bad; } tmp.p = PyCapsule_GetPointer(cobj, sig); #else {const char *desc, *s1, *s2; desc = (const char *)PyCObject_GetDesc(cobj); if (!desc) goto bad; s1 = desc; s2 = sig; while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; } if (*s1 != *s2) { PyErr_Format(PyExc_TypeError, "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", PyModule_GetName(module), funcname, sig, desc); goto bad; } tmp.p = PyCObject_AsVoidPtr(cobj);} #endif *f = tmp.fp; if (!(*f)) goto bad; Py_DECREF(d); return 0; bad: Py_XDECREF(d); return -1; } #endif /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { #if PY_VERSION_HEX < 0x03030000 char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; #else if (__Pyx_PyUnicode_READY(o) == -1) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (PyUnicode_IS_ASCII(o)) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif #endif } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return __Pyx_NewRef(x); m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_MAJOR_VERSION < 3 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(x); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
rand.c
/* Copyright 2013. The Regents of the University of California. * Copyright 2021. Uecker Lab. University Center Göttingen. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: Martin Uecker, Dara Bahri, Moritz Blumenthal */ #define _GNU_SOURCE #include <stdlib.h> #include <math.h> #include <complex.h> #ifdef _WIN32 #include "win/rand_r.h" #endif #include "num/multind.h" #ifdef USE_CUDA #include "num/gpuops.h" #endif #include "rand.h" unsigned int num_rand_seed = 123; void num_rand_init(unsigned int seed) { num_rand_seed = seed; } double uniform_rand(void) { double ret; #pragma omp critical ret = rand_r(&num_rand_seed) / (double)RAND_MAX; return ret; } /** * Box-Muller */ complex double gaussian_rand(void) { double u1, u2, s; do { u1 = 2. * uniform_rand() - 1.; u2 = 2. * uniform_rand() - 1.; s = u1 * u1 + u2 * u2; } while (s > 1.); double re = sqrt(-2. * log(s) / s) * u1; double im = sqrt(-2. * log(s) / s) * u2; return re + 1.i * im; } void md_gaussian_rand(unsigned int D, const long dims[D], complex float* dst) { #ifdef USE_CUDA if (cuda_ondevice(dst)) { complex float* tmp = md_alloc(D, dims, sizeof(complex float)); md_gaussian_rand(D, dims, tmp); md_copy(D, dims, dst, tmp, sizeof(complex float)); md_free(tmp); return; } #endif //#pragma omp parallel for for (long i = 0; i < md_calc_size(D, dims); i++) dst[i] = (complex float)gaussian_rand(); } void md_uniform_rand(unsigned int D, const long dims[D], complex float* dst) { #ifdef USE_CUDA if (cuda_ondevice(dst)) { complex float* tmp = md_alloc(D, dims, sizeof(complex float)); md_uniform_rand(D, dims, tmp); md_copy(D, dims, dst, tmp, sizeof(complex float)); md_free(tmp); return; } #endif for (long i = 0; i < md_calc_size(D, dims); i++) dst[i] = (complex float)uniform_rand(); } void md_rand_one(unsigned int D, const long dims[D], complex float* dst, double p) { #ifdef USE_CUDA if (cuda_ondevice(dst)) { complex float* tmp = md_alloc(D, dims, sizeof(complex float)); md_rand_one(D, dims, tmp, p); md_copy(D, dims, dst, tmp, sizeof(complex float)); md_free(tmp); return; } #endif for (long i = 0; i < md_calc_size(D, dims); i++) dst[i] = (complex float)(uniform_rand() < p); }
munit.c
/* Copyright (c) 2013-2018 Evan Nemerson <evan@nemerson.com> * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /*** Configuration ***/ /* This is just where the output from the test goes. It's really just * meant to let you choose stdout or stderr, but if anyone really want * to direct it to a file let me know, it would be fairly easy to * support. */ #if !defined(MUNIT_OUTPUT_FILE) # define MUNIT_OUTPUT_FILE stdout #endif /* This is a bit more useful; it tells µnit how to format the seconds in * timed tests. If your tests run for longer you might want to reduce * it, and if your computer is really fast and your tests are tiny you * can increase it. */ #if !defined(MUNIT_TEST_TIME_FORMAT) # define MUNIT_TEST_TIME_FORMAT "0.8f" #endif /* If you have long test names you might want to consider bumping * this. The result information takes 43 characters. */ #if !defined(MUNIT_TEST_NAME_LEN) # define MUNIT_TEST_NAME_LEN 37 #endif /* If you don't like the timing information, you can disable it by * defining MUNIT_DISABLE_TIMING. */ #if !defined(MUNIT_DISABLE_TIMING) # define MUNIT_ENABLE_TIMING #endif /*** End configuration ***/ #if defined(_POSIX_C_SOURCE) && (_POSIX_C_SOURCE < 200809L) # undef _POSIX_C_SOURCE #endif #if !defined(_POSIX_C_SOURCE) # define _POSIX_C_SOURCE 200809L #endif /* Solaris freaks out if you try to use a POSIX or SUS standard without * the "right" C standard. */ #if defined(_XOPEN_SOURCE) # undef _XOPEN_SOURCE #endif #if defined(__STDC_VERSION__) # if __STDC_VERSION__ >= 201112L # define _XOPEN_SOURCE 700 # elif __STDC_VERSION__ >= 199901L # define _XOPEN_SOURCE 600 # endif #endif /* Because, according to Microsoft, POSIX is deprecated. You've got * to appreciate the chutzpah. */ #if defined(_MSC_VER) && !defined(_CRT_NONSTDC_NO_DEPRECATE) # define _CRT_NONSTDC_NO_DEPRECATE #endif #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) # include <stdbool.h> #elif defined(_WIN32) /* https://msdn.microsoft.com/en-us/library/tf4dy80a.aspx */ #endif #include <limits.h> #include <time.h> #include <errno.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <stdarg.h> #include <setjmp.h> #if !defined(MUNIT_NO_NL_LANGINFO) && !defined(_WIN32) #define MUNIT_NL_LANGINFO #include <locale.h> #include <langinfo.h> #include <strings.h> #endif #if !defined(_WIN32) # include <unistd.h> # include <sys/types.h> # include <sys/wait.h> #else # include <windows.h> # include <io.h> # include <fcntl.h> # if !defined(STDERR_FILENO) # define STDERR_FILENO _fileno(stderr) # endif #endif #include "munit.h" #define MUNIT_STRINGIFY(x) #x #define MUNIT_XSTRINGIFY(x) MUNIT_STRINGIFY(x) #if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_CC) || defined(__IBMCPP__) # define MUNIT_THREAD_LOCAL __thread #elif defined(_WIN32) # define MUNIT_THREAD_LOCAL __declspec(thread) #elif defined(_Thread_local) # define MUNIT_THREAD_LOCAL _Thread_local #elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201102L) # define MUNIT_THREAD_LOCAL _Thread_local #endif /* MSVC 12.0 will emit a warning at /W4 for code like 'do { ... } * while (0)', or 'do { ... } while (1)'. I'm pretty sure nobody * at Microsoft compiles with /W4. */ #if defined(_MSC_VER) && (_MSC_VER <= 1800) #pragma warning(disable: 4127) #endif #if defined(_WIN32) || defined(__EMSCRIPTEN__) # define MUNIT_NO_FORK #endif #if defined(__EMSCRIPTEN__) # define MUNIT_NO_BUFFER #endif /*** Logging ***/ static MunitLogLevel munit_log_level_visible = MUNIT_LOG_INFO; static MunitLogLevel munit_log_level_fatal = MUNIT_LOG_ERROR; #if defined(MUNIT_THREAD_LOCAL) static MUNIT_THREAD_LOCAL munit_bool munit_error_jmp_buf_valid = 0; static MUNIT_THREAD_LOCAL jmp_buf munit_error_jmp_buf; #endif /* At certain warning levels, mingw will trigger warnings about * suggesting the format attribute, which we've explicity *not* set * because it will then choke on our attempts to use the MS-specific * I64 modifier for size_t (which we have to use since MSVC doesn't * support the C99 z modifier). */ #if defined(__MINGW32__) || defined(__MINGW64__) # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wsuggest-attribute=format" #endif MUNIT_PRINTF(5,0) static void munit_logf_exv(MunitLogLevel level, FILE* fp, const char* filename, int line, const char* format, va_list ap) { if (level < munit_log_level_visible) return; switch (level) { case MUNIT_LOG_DEBUG: fputs("Debug", fp); break; case MUNIT_LOG_INFO: fputs("Info", fp); break; case MUNIT_LOG_WARNING: fputs("Warning", fp); break; case MUNIT_LOG_ERROR: fputs("Error", fp); break; default: munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Invalid log level (%d)", level); return; } fputs(": ", fp); if (filename != NULL) fprintf(fp, "%s:%d: ", filename, line); vfprintf(fp, format, ap); fputc('\n', fp); } MUNIT_PRINTF(3,4) static void munit_logf_internal(MunitLogLevel level, FILE* fp, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(level, fp, NULL, 0, format, ap); va_end(ap); } static void munit_log_internal(MunitLogLevel level, FILE* fp, const char* message) { munit_logf_internal(level, fp, "%s", message); } void munit_logf_ex(MunitLogLevel level, const char* filename, int line, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(level, stderr, filename, line, format, ap); va_end(ap); if (level >= munit_log_level_fatal) { #if defined(MUNIT_THREAD_LOCAL) if (munit_error_jmp_buf_valid) longjmp(munit_error_jmp_buf, 1); #endif abort(); } } void munit_errorf_ex(const char* filename, int line, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(MUNIT_LOG_ERROR, stderr, filename, line, format, ap); va_end(ap); #if defined(MUNIT_THREAD_LOCAL) if (munit_error_jmp_buf_valid) longjmp(munit_error_jmp_buf, 1); #endif abort(); } #if defined(__MINGW32__) || defined(__MINGW64__) #pragma GCC diagnostic pop #endif #if !defined(MUNIT_STRERROR_LEN) # define MUNIT_STRERROR_LEN 80 #endif static void munit_log_errno(MunitLogLevel level, FILE* fp, const char* msg) { #if defined(MUNIT_NO_STRERROR_R) || (defined(__MINGW32__) && !defined(MINGW_HAS_SECURE_API)) munit_logf_internal(level, fp, "%s: %s (%d)", msg, strerror(errno), errno); #else char munit_error_str[MUNIT_STRERROR_LEN]; munit_error_str[0] = '\0'; #if !defined(_WIN32) strerror_r(errno, munit_error_str, MUNIT_STRERROR_LEN); #else strerror_s(munit_error_str, MUNIT_STRERROR_LEN, errno); #endif munit_logf_internal(level, fp, "%s: %s (%d)", msg, munit_error_str, errno); #endif } /*** Memory allocation ***/ void* munit_malloc_ex(const char* filename, int line, size_t size) { void* ptr; if (size == 0) return NULL; ptr = calloc(1, size); if (MUNIT_UNLIKELY(ptr == NULL)) { munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Failed to allocate %" MUNIT_SIZE_MODIFIER "u bytes.", size); } return ptr; } /*** Timer code ***/ #if defined(MUNIT_ENABLE_TIMING) #define psnip_uint64_t munit_uint64_t #define psnip_uint32_t munit_uint32_t /* Code copied from portable-snippets * <https://github.com/nemequ/portable-snippets/>. If you need to * change something, please do it there so we can keep the code in * sync. */ /* Clocks (v1) * Portable Snippets - https://gitub.com/nemequ/portable-snippets * Created by Evan Nemerson <evan@nemerson.com> * * To the extent possible under law, the authors have waived all * copyright and related or neighboring rights to this code. For * details, see the Creative Commons Zero 1.0 Universal license at * https://creativecommons.org/publicdomain/zero/1.0/ */ #if !defined(PSNIP_CLOCK_H) #define PSNIP_CLOCK_H #if !defined(psnip_uint64_t) # include "../exact-int/exact-int.h" #endif #if !defined(PSNIP_CLOCK_STATIC_INLINE) # if defined(__GNUC__) # define PSNIP_CLOCK__COMPILER_ATTRIBUTES __attribute__((__unused__)) # else # define PSNIP_CLOCK__COMPILER_ATTRIBUTES # endif # define PSNIP_CLOCK__FUNCTION PSNIP_CLOCK__COMPILER_ATTRIBUTES static #endif enum PsnipClockType { /* This clock provides the current time, in units since 1970-01-01 * 00:00:00 UTC not including leap seconds. In other words, UNIX * time. Keep in mind that this clock doesn't account for leap * seconds, and can go backwards (think NTP adjustments). */ PSNIP_CLOCK_TYPE_WALL = 1, /* The CPU time is a clock which increases only when the current * process is active (i.e., it doesn't increment while blocking on * I/O). */ PSNIP_CLOCK_TYPE_CPU = 2, /* Monotonic time is always running (unlike CPU time), but it only ever moves forward unless you reboot the system. Things like NTP adjustments have no effect on this clock. */ PSNIP_CLOCK_TYPE_MONOTONIC = 3 }; struct PsnipClockTimespec { psnip_uint64_t seconds; psnip_uint64_t nanoseconds; }; /* Methods we support: */ #define PSNIP_CLOCK_METHOD_CLOCK_GETTIME 1 #define PSNIP_CLOCK_METHOD_TIME 2 #define PSNIP_CLOCK_METHOD_GETTIMEOFDAY 3 #define PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER 4 #define PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME 5 #define PSNIP_CLOCK_METHOD_CLOCK 6 #define PSNIP_CLOCK_METHOD_GETPROCESSTIMES 7 #define PSNIP_CLOCK_METHOD_GETRUSAGE 8 #define PSNIP_CLOCK_METHOD_GETSYSTEMTIMEPRECISEASFILETIME 9 #define PSNIP_CLOCK_METHOD_GETTICKCOUNT64 10 #include <assert.h> #if defined(HEDLEY_UNREACHABLE) # define PSNIP_CLOCK_UNREACHABLE() HEDLEY_UNREACHABLE() #else # define PSNIP_CLOCK_UNREACHABLE() assert(0) #endif /* Choose an implementation */ /* #undef PSNIP_CLOCK_WALL_METHOD */ /* #undef PSNIP_CLOCK_CPU_METHOD */ /* #undef PSNIP_CLOCK_MONOTONIC_METHOD */ /* We want to be able to detect the libc implementation, so we include <limits.h> (<features.h> isn't available everywhere). */ #if defined(__unix__) || defined(__unix) || defined(__linux__) # include <limits.h> # include <unistd.h> #endif #if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) /* These are known to work without librt. If you know of others * please let us know so we can add them. */ # if \ (defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 17))) || \ (defined(__FreeBSD__)) # define PSNIP_CLOCK_HAVE_CLOCK_GETTIME # elif !defined(PSNIP_CLOCK_NO_LIBRT) # define PSNIP_CLOCK_HAVE_CLOCK_GETTIME # endif #endif #if defined(_WIN32) # if !defined(PSNIP_CLOCK_CPU_METHOD) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_GETPROCESSTIMES # endif # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER # endif #endif #if defined(__MACH__) && !defined(__gnu_hurd__) # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME # endif #endif #if defined(PSNIP_CLOCK_HAVE_CLOCK_GETTIME) # include <time.h> # if !defined(PSNIP_CLOCK_WALL_METHOD) # if defined(CLOCK_REALTIME_PRECISE) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME_PRECISE # elif !defined(__sun) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME # endif # endif # if !defined(PSNIP_CLOCK_CPU_METHOD) # if defined(_POSIX_CPUTIME) || defined(CLOCK_PROCESS_CPUTIME_ID) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_PROCESS_CPUTIME_ID # elif defined(CLOCK_VIRTUAL) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_VIRTUAL # endif # endif # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # if defined(CLOCK_MONOTONIC_RAW) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC # elif defined(CLOCK_MONOTONIC_PRECISE) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC_PRECISE # elif defined(_POSIX_MONOTONIC_CLOCK) || defined(CLOCK_MONOTONIC) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC # endif # endif #endif #if defined(_POSIX_VERSION) && (_POSIX_VERSION >= 200112L) # if !defined(PSNIP_CLOCK_WALL_METHOD) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_GETTIMEOFDAY # endif #endif #if !defined(PSNIP_CLOCK_WALL_METHOD) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_TIME #endif #if !defined(PSNIP_CLOCK_CPU_METHOD) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK #endif /* Primarily here for testing. */ #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) && defined(PSNIP_CLOCK_REQUIRE_MONOTONIC) # error No monotonic clock found. #endif /* Implementations */ #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_TIME)) # include <time.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) # include <sys/time.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) # include <windows.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) # include <sys/time.h> # include <sys/resource.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) # include <CoreServices/CoreServices.h> # include <mach/mach.h> # include <mach/mach_time.h> #endif /*** Implementations ***/ #define PSNIP_CLOCK_NSEC_PER_SEC ((psnip_uint32_t) (1000000000ULL)) #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock__clock_getres (clockid_t clk_id) { struct timespec res; int r; r = clock_getres(clk_id, &res); if (r != 0) return 0; return (psnip_uint32_t) (PSNIP_CLOCK_NSEC_PER_SEC / res.tv_nsec); } PSNIP_CLOCK__FUNCTION int psnip_clock__clock_gettime (clockid_t clk_id, struct PsnipClockTimespec* res) { struct timespec ts; if (clock_gettime(clk_id, &ts) != 0) return -10; res->seconds = (psnip_uint64_t) (ts.tv_sec); res->nanoseconds = (psnip_uint64_t) (ts.tv_nsec); return 0; } #endif PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_wall_get_precision (void) { #if !defined(PSNIP_CLOCK_WALL_METHOD) return 0; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_WALL); #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY return 1000000; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME return 1; #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_wall_get_time (struct PsnipClockTimespec* res) { (void) res; #if !defined(PSNIP_CLOCK_WALL_METHOD) return -2; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_WALL, res); #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME res->seconds = time(NULL); res->nanoseconds = 0; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY struct timeval tv; if (gettimeofday(&tv, NULL) != 0) return -6; res->seconds = tv.tv_sec; res->nanoseconds = tv.tv_usec * 1000; #else return -2; #endif return 0; } PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_cpu_get_precision (void) { #if !defined(PSNIP_CLOCK_CPU_METHOD) return 0; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_CPU); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK return CLOCKS_PER_SEC; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES return PSNIP_CLOCK_NSEC_PER_SEC / 100; #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_cpu_get_time (struct PsnipClockTimespec* res) { #if !defined(PSNIP_CLOCK_CPU_METHOD) (void) res; return -2; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_CPU, res); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK clock_t t = clock(); if (t == ((clock_t) -1)) return -5; res->seconds = t / CLOCKS_PER_SEC; res->nanoseconds = (t % CLOCKS_PER_SEC) * (PSNIP_CLOCK_NSEC_PER_SEC / CLOCKS_PER_SEC); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES FILETIME CreationTime, ExitTime, KernelTime, UserTime; LARGE_INTEGER date, adjust; if (!GetProcessTimes(GetCurrentProcess(), &CreationTime, &ExitTime, &KernelTime, &UserTime)) return -7; /* http://www.frenk.com/2009/12/convert-filetime-to-unix-timestamp/ */ date.HighPart = UserTime.dwHighDateTime; date.LowPart = UserTime.dwLowDateTime; adjust.QuadPart = 11644473600000 * 10000; date.QuadPart -= adjust.QuadPart; res->seconds = date.QuadPart / 10000000; res->nanoseconds = (date.QuadPart % 10000000) * (PSNIP_CLOCK_NSEC_PER_SEC / 100); #elif PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE struct rusage usage; if (getrusage(RUSAGE_SELF, &usage) != 0) return -8; res->seconds = usage.ru_utime.tv_sec; res->nanoseconds = tv.tv_usec * 1000; #else (void) res; return -2; #endif return 0; } PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_monotonic_get_precision (void) { #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) return 0; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME static mach_timebase_info_data_t tbi = { 0, }; if (tbi.denom == 0) mach_timebase_info(&tbi); return (psnip_uint32_t) (tbi.numer / tbi.denom); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64 return 1000; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER LARGE_INTEGER Frequency; QueryPerformanceFrequency(&Frequency); return (psnip_uint32_t) ((Frequency.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) ? PSNIP_CLOCK_NSEC_PER_SEC : Frequency.QuadPart); #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_monotonic_get_time (struct PsnipClockTimespec* res) { #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) (void) res; return -2; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC, res); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME psnip_uint64_t nsec = mach_absolute_time(); static mach_timebase_info_data_t tbi = { 0, }; if (tbi.denom == 0) mach_timebase_info(&tbi); nsec *= ((psnip_uint64_t) tbi.numer) / ((psnip_uint64_t) tbi.denom); res->seconds = nsec / PSNIP_CLOCK_NSEC_PER_SEC; res->nanoseconds = nsec % PSNIP_CLOCK_NSEC_PER_SEC; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER LARGE_INTEGER t, f; if (QueryPerformanceCounter(&t) == 0) return -12; QueryPerformanceFrequency(&f); res->seconds = t.QuadPart / f.QuadPart; res->nanoseconds = t.QuadPart % f.QuadPart; if (f.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) res->nanoseconds /= f.QuadPart / PSNIP_CLOCK_NSEC_PER_SEC; else res->nanoseconds *= PSNIP_CLOCK_NSEC_PER_SEC / f.QuadPart; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64 const ULONGLONG msec = GetTickCount64(); res->seconds = msec / 1000; res->nanoseconds = sec % 1000; #else return -2; #endif return 0; } /* Returns the number of ticks per second for the specified clock. * For example, a clock with millisecond precision would return 1000, * and a clock with 1 second (such as the time() function) would * return 1. * * If the requested clock isn't available, it will return 0. * Hopefully this will be rare, but if it happens to you please let us * know so we can work on finding a way to support your system. * * Note that different clocks on the same system often have a * different precisions. */ PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_get_precision (enum PsnipClockType clock_type) { switch (clock_type) { case PSNIP_CLOCK_TYPE_MONOTONIC: return psnip_clock_monotonic_get_precision (); case PSNIP_CLOCK_TYPE_CPU: return psnip_clock_cpu_get_precision (); case PSNIP_CLOCK_TYPE_WALL: return psnip_clock_wall_get_precision (); } PSNIP_CLOCK_UNREACHABLE(); return 0; } /* Set the provided timespec to the requested time. Returns 0 on * success, or a negative value on failure. */ PSNIP_CLOCK__FUNCTION int psnip_clock_get_time (enum PsnipClockType clock_type, struct PsnipClockTimespec* res) { assert(res != NULL); switch (clock_type) { case PSNIP_CLOCK_TYPE_MONOTONIC: return psnip_clock_monotonic_get_time (res); case PSNIP_CLOCK_TYPE_CPU: return psnip_clock_cpu_get_time (res); case PSNIP_CLOCK_TYPE_WALL: return psnip_clock_wall_get_time (res); } return -1; } #endif /* !defined(PSNIP_CLOCK_H) */ static psnip_uint64_t munit_clock_get_elapsed(struct PsnipClockTimespec* start, struct PsnipClockTimespec* end) { psnip_uint64_t r = (end->seconds - start->seconds) * PSNIP_CLOCK_NSEC_PER_SEC; if (end->nanoseconds < start->nanoseconds) { r -= (start->nanoseconds - end->nanoseconds); } else { r += (end->nanoseconds - start->nanoseconds); } return r; } #else # include <time.h> #endif /* defined(MUNIT_ENABLE_TIMING) */ /*** PRNG stuff ***/ /* This is (unless I screwed up, which is entirely possible) the * version of PCG with 32-bit state. It was chosen because it has a * small enough state that we should reliably be able to use CAS * instead of requiring a lock for thread-safety. * * If I did screw up, I probably will not bother changing it unless * there is a significant bias. It's really not important this be * particularly strong, as long as it is fairly random it's much more * important that it be reproducible, so bug reports have a better * chance of being reproducible. */ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) && !defined(__EMSCRIPTEN__) && (!defined(__GNUC_MINOR__) || (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ > 8)) # define HAVE_STDATOMIC #elif defined(__clang__) # if __has_extension(c_atomic) # define HAVE_CLANG_ATOMICS # endif #endif /* Workaround for http://llvm.org/bugs/show_bug.cgi?id=26911 */ #if defined(__clang__) && defined(_WIN32) # undef HAVE_STDATOMIC # if defined(__c2__) # undef HAVE_CLANG_ATOMICS # endif #endif #if defined(_OPENMP) # define ATOMIC_UINT32_T uint32_t # define ATOMIC_UINT32_INIT(x) (x) #elif defined(HAVE_STDATOMIC) # include <stdatomic.h> # define ATOMIC_UINT32_T _Atomic uint32_t # define ATOMIC_UINT32_INIT(x) ATOMIC_VAR_INIT(x) #elif defined(HAVE_CLANG_ATOMICS) # define ATOMIC_UINT32_T _Atomic uint32_t # define ATOMIC_UINT32_INIT(x) (x) #elif defined(_WIN32) # define ATOMIC_UINT32_T volatile LONG # define ATOMIC_UINT32_INIT(x) (x) #else # define ATOMIC_UINT32_T volatile uint32_t # define ATOMIC_UINT32_INIT(x) (x) #endif static ATOMIC_UINT32_T munit_rand_state = ATOMIC_UINT32_INIT(42); #if defined(_OPENMP) static inline void munit_atomic_store(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T value) { #pragma omp critical (munit_atomics) *dest = value; } static inline uint32_t munit_atomic_load(ATOMIC_UINT32_T* src) { int ret; #pragma omp critical (munit_atomics) ret = *src; return ret; } static inline uint32_t munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) { munit_bool ret; #pragma omp critical (munit_atomics) { if (*dest == *expected) { *dest = desired; ret = 1; } else { ret = 0; } } return ret; } #elif defined(HAVE_STDATOMIC) # define munit_atomic_store(dest, value) atomic_store(dest, value) # define munit_atomic_load(src) atomic_load(src) # define munit_atomic_cas(dest, expected, value) atomic_compare_exchange_weak(dest, expected, value) #elif defined(HAVE_CLANG_ATOMICS) # define munit_atomic_store(dest, value) __c11_atomic_store(dest, value, __ATOMIC_SEQ_CST) # define munit_atomic_load(src) __c11_atomic_load(src, __ATOMIC_SEQ_CST) # define munit_atomic_cas(dest, expected, value) __c11_atomic_compare_exchange_weak(dest, expected, value, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #elif defined(__GNUC__) && (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7) # define munit_atomic_store(dest, value) __atomic_store_n(dest, value, __ATOMIC_SEQ_CST) # define munit_atomic_load(src) __atomic_load_n(src, __ATOMIC_SEQ_CST) # define munit_atomic_cas(dest, expected, value) __atomic_compare_exchange_n(dest, expected, value, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #elif defined(__GNUC__) && (__GNUC__ >= 4) # define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) # define munit_atomic_cas(dest, expected, value) __sync_bool_compare_and_swap(dest, *expected, value) #elif defined(_WIN32) /* Untested */ # define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) # define munit_atomic_cas(dest, expected, value) InterlockedCompareExchange((dest), (value), *(expected)) #else # warning No atomic implementation, PRNG will not be thread-safe # define munit_atomic_store(dest, value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) static inline munit_bool munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) { if (*dest == *expected) { *dest = desired; return 1; } else { return 0; } } #endif #define MUNIT_PRNG_MULTIPLIER (747796405U) #define MUNIT_PRNG_INCREMENT (1729U) static munit_uint32_t munit_rand_next_state(munit_uint32_t state) { return state * MUNIT_PRNG_MULTIPLIER + MUNIT_PRNG_INCREMENT; } static munit_uint32_t munit_rand_from_state(munit_uint32_t state) { munit_uint32_t res = ((state >> ((state >> 28) + 4)) ^ state) * (277803737U); res ^= res >> 22; return res; } void munit_rand_seed(munit_uint32_t seed) { munit_uint32_t state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT); munit_atomic_store(&munit_rand_state, state); } static munit_uint32_t munit_rand_generate_seed(void) { munit_uint32_t seed, state; #if defined(MUNIT_ENABLE_TIMING) struct PsnipClockTimespec wc = { 0, }; psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wc); seed = (munit_uint32_t) wc.nanoseconds; #else seed = (munit_uint32_t) time(NULL); #endif state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT); return munit_rand_from_state(state); } static munit_uint32_t munit_rand_state_uint32(munit_uint32_t* state) { const munit_uint32_t old = *state; *state = munit_rand_next_state(old); return munit_rand_from_state(old); } munit_uint32_t munit_rand_uint32(void) { munit_uint32_t old, state; do { old = munit_atomic_load(&munit_rand_state); state = munit_rand_next_state(old); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return munit_rand_from_state(old); } static void munit_rand_state_memory(munit_uint32_t* state, size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) { size_t members_remaining = size / sizeof(munit_uint32_t); size_t bytes_remaining = size % sizeof(munit_uint32_t); munit_uint8_t* b = data; munit_uint32_t rv; while (members_remaining-- > 0) { rv = munit_rand_state_uint32(state); memcpy(b, &rv, sizeof(munit_uint32_t)); b += sizeof(munit_uint32_t); } if (bytes_remaining != 0) { rv = munit_rand_state_uint32(state); memcpy(b, &rv, bytes_remaining); } } void munit_rand_memory(size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) { munit_uint32_t old, state; do { state = old = munit_atomic_load(&munit_rand_state); munit_rand_state_memory(&state, size, data); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); } static munit_uint32_t munit_rand_state_at_most(munit_uint32_t* state, munit_uint32_t salt, munit_uint32_t max) { /* We want (UINT32_MAX + 1) % max, which in unsigned arithmetic is the same * as (UINT32_MAX + 1 - max) % max = -max % max. We compute -max using not * to avoid compiler warnings. */ const munit_uint32_t min = (~max + 1U) % max; munit_uint32_t x; if (max == (~((munit_uint32_t) 0U))) return munit_rand_state_uint32(state) ^ salt; max++; do { x = munit_rand_state_uint32(state) ^ salt; } while (x < min); return x % max; } static munit_uint32_t munit_rand_at_most(munit_uint32_t salt, munit_uint32_t max) { munit_uint32_t old, state; munit_uint32_t retval; do { state = old = munit_atomic_load(&munit_rand_state); retval = munit_rand_state_at_most(&state, salt, max); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return retval; } int munit_rand_int_range(int min, int max) { munit_uint64_t range = (munit_uint64_t) max - (munit_uint64_t) min; if (min > max) return munit_rand_int_range(max, min); if (range > (~((munit_uint32_t) 0U))) range = (~((munit_uint32_t) 0U)); return min + munit_rand_at_most(0, (munit_uint32_t) range); } double munit_rand_double(void) { munit_uint32_t old, state; double retval = 0.0; do { state = old = munit_atomic_load(&munit_rand_state); /* See http://mumble.net/~campbell/tmp/random_real.c for how to do * this right. Patches welcome if you feel that this is too * biased. */ retval = munit_rand_state_uint32(&state) / ((~((munit_uint32_t) 0U)) + 1.0); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return retval; } /*** Test suite handling ***/ typedef struct { unsigned int successful; unsigned int skipped; unsigned int failed; unsigned int errored; #if defined(MUNIT_ENABLE_TIMING) munit_uint64_t cpu_clock; munit_uint64_t wall_clock; #endif } MunitReport; typedef struct { const char* prefix; const MunitSuite* suite; const char** tests; munit_uint32_t seed; unsigned int iterations; MunitParameter* parameters; munit_bool single_parameter_mode; void* user_data; MunitReport report; munit_bool colorize; munit_bool fork; munit_bool show_stderr; munit_bool fatal_failures; } MunitTestRunner; const char* munit_parameters_get(const MunitParameter params[], const char* key) { const MunitParameter* param; for (param = params ; param != NULL && param->name != NULL ; param++) if (strcmp(param->name, key) == 0) return param->value; return NULL; } #if defined(MUNIT_ENABLE_TIMING) static void munit_print_time(FILE* fp, munit_uint64_t nanoseconds) { fprintf(fp, "%" MUNIT_TEST_TIME_FORMAT, ((double) nanoseconds) / ((double) PSNIP_CLOCK_NSEC_PER_SEC)); } #endif /* Add a paramter to an array of parameters. */ static MunitResult munit_parameters_add(size_t* params_size, MunitParameter* params[MUNIT_ARRAY_PARAM(*params_size)], char* name, char* value) { *params = realloc(*params, sizeof(MunitParameter) * (*params_size + 2)); if (*params == NULL) return MUNIT_ERROR; (*params)[*params_size].name = name; (*params)[*params_size].value = value; (*params_size)++; (*params)[*params_size].name = NULL; (*params)[*params_size].value = NULL; return MUNIT_OK; } /* Concatenate two strings, but just return one of the components * unaltered if the other is NULL or "". */ static char* munit_maybe_concat(size_t* len, char* prefix, char* suffix) { char* res; size_t res_l; const size_t prefix_l = prefix != NULL ? strlen(prefix) : 0; const size_t suffix_l = suffix != NULL ? strlen(suffix) : 0; if (prefix_l == 0 && suffix_l == 0) { res = NULL; res_l = 0; } else if (prefix_l == 0 && suffix_l != 0) { res = suffix; res_l = suffix_l; } else if (prefix_l != 0 && suffix_l == 0) { res = prefix; res_l = prefix_l; } else { res_l = prefix_l + suffix_l; res = malloc(res_l + 1); memcpy(res, prefix, prefix_l); memcpy(res + prefix_l, suffix, suffix_l); res[res_l] = 0; } if (len != NULL) *len = res_l; return res; } /* Possbily free a string returned by munit_maybe_concat. */ static void munit_maybe_free_concat(char* s, const char* prefix, const char* suffix) { if (prefix != s && suffix != s) free(s); } /* Cheap string hash function, just used to salt the PRNG. */ static munit_uint32_t munit_str_hash(const char* name) { const char *p; munit_uint32_t h = 5381U; for (p = name; *p != '\0'; p++) h = (h << 5) + h + *p; return h; } static void munit_splice(int from, int to) { munit_uint8_t buf[1024]; #if !defined(_WIN32) ssize_t len; ssize_t bytes_written; ssize_t write_res; #else int len; int bytes_written; int write_res; #endif do { len = read(from, buf, sizeof(buf)); if (len > 0) { bytes_written = 0; do { write_res = write(to, buf + bytes_written, len - bytes_written); if (write_res < 0) break; bytes_written += write_res; } while (bytes_written < len); } else break; } while (1); } /* This is the part that should be handled in the child process */ static MunitResult munit_test_runner_exec(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[], MunitReport* report) { unsigned int iterations = runner->iterations; MunitResult result = MUNIT_FAIL; #if defined(MUNIT_ENABLE_TIMING) struct PsnipClockTimespec wall_clock_begin = { 0, }, wall_clock_end = { 0, }; struct PsnipClockTimespec cpu_clock_begin = { 0, }, cpu_clock_end = { 0, }; #endif unsigned int i = 0; if ((test->options & MUNIT_TEST_OPTION_SINGLE_ITERATION) == MUNIT_TEST_OPTION_SINGLE_ITERATION) iterations = 1; else if (iterations == 0) iterations = runner->suite->iterations; munit_rand_seed(runner->seed); do { void* data = (test->setup == NULL) ? runner->user_data : test->setup(params, runner->user_data); #if defined(MUNIT_ENABLE_TIMING) psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_begin); psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_begin); #endif result = test->test(params, data); #if defined(MUNIT_ENABLE_TIMING) psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_end); psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_end); #endif if (test->tear_down != NULL) test->tear_down(data); if (MUNIT_LIKELY(result == MUNIT_OK)) { report->successful++; #if defined(MUNIT_ENABLE_TIMING) report->wall_clock += munit_clock_get_elapsed(&wall_clock_begin, &wall_clock_end); report->cpu_clock += munit_clock_get_elapsed(&cpu_clock_begin, &cpu_clock_end); #endif } else { switch ((int) result) { case MUNIT_SKIP: report->skipped++; break; case MUNIT_FAIL: report->failed++; break; case MUNIT_ERROR: report->errored++; break; default: break; } break; } } while (++i < iterations); return result; } #if defined(MUNIT_EMOTICON) # define MUNIT_RESULT_STRING_OK ":)" # define MUNIT_RESULT_STRING_SKIP ":|" # define MUNIT_RESULT_STRING_FAIL ":(" # define MUNIT_RESULT_STRING_ERROR ":o" # define MUNIT_RESULT_STRING_TODO ":/" #else # define MUNIT_RESULT_STRING_OK "OK " # define MUNIT_RESULT_STRING_SKIP "SKIP " # define MUNIT_RESULT_STRING_FAIL "FAIL " # define MUNIT_RESULT_STRING_ERROR "ERROR" # define MUNIT_RESULT_STRING_TODO "TODO " #endif static void munit_test_runner_print_color(const MunitTestRunner* runner, const char* string, char color) { if (runner->colorize) fprintf(MUNIT_OUTPUT_FILE, "\x1b[3%cm%s\x1b[39m", color, string); else fputs(string, MUNIT_OUTPUT_FILE); } #if !defined(MUNIT_NO_BUFFER) static int munit_replace_stderr(FILE* stderr_buf) { if (stderr_buf != NULL) { const int orig_stderr = dup(STDERR_FILENO); int errfd = fileno(stderr_buf); if (MUNIT_UNLIKELY(errfd == -1)) { exit(EXIT_FAILURE); } dup2(errfd, STDERR_FILENO); return orig_stderr; } return -1; } static void munit_restore_stderr(int orig_stderr) { if (orig_stderr != -1) { dup2(orig_stderr, STDERR_FILENO); close(orig_stderr); } } #endif /* !defined(MUNIT_NO_BUFFER) */ /* Run a test with the specified parameters. */ static void munit_test_runner_run_test_with_params(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[]) { MunitResult result = MUNIT_OK; MunitReport report = { 0, 0, 0, 0, #if defined(MUNIT_ENABLE_TIMING) 0, 0 #endif }; unsigned int output_l; munit_bool first; const MunitParameter* param; FILE* stderr_buf; #if !defined(MUNIT_NO_FORK) int pipefd[2]; pid_t fork_pid; int orig_stderr; ssize_t bytes_written = 0; ssize_t write_res; ssize_t bytes_read = 0; ssize_t read_res; int status = 0; pid_t changed_pid; #endif if (params != NULL) { output_l = 2; fputs(" ", MUNIT_OUTPUT_FILE); first = 1; for (param = params ; param != NULL && param->name != NULL ; param++) { if (!first) { fputs(", ", MUNIT_OUTPUT_FILE); output_l += 2; } else { first = 0; } output_l += fprintf(MUNIT_OUTPUT_FILE, "%s=%s", param->name, param->value); } while (output_l++ < MUNIT_TEST_NAME_LEN) { fputc(' ', MUNIT_OUTPUT_FILE); } } fflush(MUNIT_OUTPUT_FILE); stderr_buf = NULL; #if !defined(_WIN32) || defined(__MINGW32__) stderr_buf = tmpfile(); #else tmpfile_s(&stderr_buf); #endif if (stderr_buf == NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create buffer for stderr"); result = MUNIT_ERROR; goto print_result; } #if !defined(MUNIT_NO_FORK) if (runner->fork) { pipefd[0] = -1; pipefd[1] = -1; if (pipe(pipefd) != 0) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create pipe"); result = MUNIT_ERROR; goto print_result; } fork_pid = fork(); if (fork_pid == 0) { close(pipefd[0]); orig_stderr = munit_replace_stderr(stderr_buf); munit_test_runner_exec(runner, test, params, &report); /* Note that we don't restore stderr. This is so we can buffer * things written to stderr later on (such as by * asan/tsan/ubsan, valgrind, etc.) */ close(orig_stderr); do { write_res = write(pipefd[1], ((munit_uint8_t*) (&report)) + bytes_written, sizeof(report) - bytes_written); if (write_res < 0) { if (stderr_buf != NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to write to pipe"); } exit(EXIT_FAILURE); } bytes_written += write_res; } while ((size_t) bytes_written < sizeof(report)); if (stderr_buf != NULL) fclose(stderr_buf); close(pipefd[1]); exit(EXIT_SUCCESS); } else if (fork_pid == -1) { close(pipefd[0]); close(pipefd[1]); if (stderr_buf != NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to fork"); } report.errored++; result = MUNIT_ERROR; } else { close(pipefd[1]); do { read_res = read(pipefd[0], ((munit_uint8_t*) (&report)) + bytes_read, sizeof(report) - bytes_read); if (read_res < 1) break; bytes_read += read_res; } while (bytes_read < (ssize_t) sizeof(report)); changed_pid = waitpid(fork_pid, &status, 0); if (MUNIT_LIKELY(changed_pid == fork_pid) && MUNIT_LIKELY(WIFEXITED(status))) { if (bytes_read != sizeof(report)) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited unexpectedly with status %d", WEXITSTATUS(status)); report.errored++; } else if (WEXITSTATUS(status) != EXIT_SUCCESS) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited with status %d", WEXITSTATUS(status)); report.errored++; } } else { if (WIFSIGNALED(status)) { #if defined(_XOPEN_VERSION) && (_XOPEN_VERSION >= 700) munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d (%s)", WTERMSIG(status), strsignal(WTERMSIG(status))); #else munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d", WTERMSIG(status)); #endif } else if (WIFSTOPPED(status)) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child stopped by signal %d", WSTOPSIG(status)); } report.errored++; } close(pipefd[0]); waitpid(fork_pid, NULL, 0); } } else #endif { #if !defined(MUNIT_NO_BUFFER) const volatile int orig_stderr = munit_replace_stderr(stderr_buf); #endif #if defined(MUNIT_THREAD_LOCAL) if (MUNIT_UNLIKELY(setjmp(munit_error_jmp_buf) != 0)) { result = MUNIT_FAIL; report.failed++; } else { munit_error_jmp_buf_valid = 1; result = munit_test_runner_exec(runner, test, params, &report); } #else result = munit_test_runner_exec(runner, test, params, &report); #endif #if !defined(MUNIT_NO_BUFFER) munit_restore_stderr(orig_stderr); #endif /* Here just so that the label is used on Windows and we don't get * a warning */ goto print_result; } print_result: fputs("[ ", MUNIT_OUTPUT_FILE); if ((test->options & MUNIT_TEST_OPTION_TODO) == MUNIT_TEST_OPTION_TODO) { if (report.failed != 0 || report.errored != 0 || report.skipped != 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_TODO, '3'); result = MUNIT_OK; } else { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1'); if (MUNIT_LIKELY(stderr_buf != NULL)) munit_log_internal(MUNIT_LOG_ERROR, stderr_buf, "Test marked TODO, but was successful."); runner->report.failed++; result = MUNIT_ERROR; } } else if (report.failed > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_FAIL, '1'); runner->report.failed++; result = MUNIT_FAIL; } else if (report.errored > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1'); runner->report.errored++; result = MUNIT_ERROR; } else if (report.skipped > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_SKIP, '3'); runner->report.skipped++; result = MUNIT_SKIP; } else if (report.successful > 1) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2'); #if defined(MUNIT_ENABLE_TIMING) fputs(" ] [ ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock / report.successful); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock / report.successful); fprintf(MUNIT_OUTPUT_FILE, " CPU ]\n %-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s Total: [ ", ""); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock); fputs(" CPU", MUNIT_OUTPUT_FILE); #endif runner->report.successful++; result = MUNIT_OK; } else if (report.successful > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2'); #if defined(MUNIT_ENABLE_TIMING) fputs(" ] [ ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock); fputs(" CPU", MUNIT_OUTPUT_FILE); #endif runner->report.successful++; result = MUNIT_OK; } fputs(" ]\n", MUNIT_OUTPUT_FILE); if (stderr_buf != NULL) { if (result == MUNIT_FAIL || result == MUNIT_ERROR || runner->show_stderr) { fflush(MUNIT_OUTPUT_FILE); rewind(stderr_buf); munit_splice(fileno(stderr_buf), STDERR_FILENO); fflush(stderr); } fclose(stderr_buf); } } static void munit_test_runner_run_test_wild(MunitTestRunner* runner, const MunitTest* test, const char* test_name, MunitParameter* params, MunitParameter* p) { const MunitParameterEnum* pe; char** values; MunitParameter* next; for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) { if (p->name == pe->name) break; } if (pe == NULL) return; for (values = pe->values ; *values != NULL ; values++) { next = p + 1; p->value = *values; if (next->name == NULL) { munit_test_runner_run_test_with_params(runner, test, params); } else { munit_test_runner_run_test_wild(runner, test, test_name, params, next); } if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) break; } } /* Run a single test, with every combination of parameters * requested. */ static void munit_test_runner_run_test(MunitTestRunner* runner, const MunitTest* test, const char* prefix) { char* test_name = munit_maybe_concat(NULL, (char*) prefix, (char*) test->name); /* The array of parameters to pass to * munit_test_runner_run_test_with_params */ MunitParameter* params = NULL; size_t params_l = 0; /* Wildcard parameters are parameters which have possible values * specified in the test, but no specific value was passed to the * CLI. That means we want to run the test once for every * possible combination of parameter values or, if --single was * passed to the CLI, a single time with a random set of * parameters. */ MunitParameter* wild_params = NULL; size_t wild_params_l = 0; const MunitParameterEnum* pe; const MunitParameter* cli_p; munit_bool filled; unsigned int possible; char** vals; size_t first_wild; const MunitParameter* wp; int pidx; munit_rand_seed(runner->seed); fprintf(MUNIT_OUTPUT_FILE, "%-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s", test_name); if (test->parameters == NULL) { /* No parameters. Simple, nice. */ munit_test_runner_run_test_with_params(runner, test, NULL); } else { fputc('\n', MUNIT_OUTPUT_FILE); for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) { /* Did we received a value for this parameter from the CLI? */ filled = 0; for (cli_p = runner->parameters ; cli_p != NULL && cli_p->name != NULL ; cli_p++) { if (strcmp(cli_p->name, pe->name) == 0) { if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, cli_p->value) != MUNIT_OK)) goto cleanup; filled = 1; break; } } if (filled) continue; /* Nothing from CLI, is the enum NULL/empty? We're not a * fuzzer… */ if (pe->values == NULL || pe->values[0] == NULL) continue; /* If --single was passed to the CLI, choose a value from the * list of possibilities randomly. */ if (runner->single_parameter_mode) { possible = 0; for (vals = pe->values ; *vals != NULL ; vals++) possible++; /* We want the tests to be reproducible, even if you're only * running a single test, but we don't want every test with * the same number of parameters to choose the same parameter * number, so use the test name as a primitive salt. */ pidx = munit_rand_at_most(munit_str_hash(test_name), possible - 1); if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, pe->values[pidx]) != MUNIT_OK)) goto cleanup; } else { /* We want to try every permutation. Put in a placeholder * entry, we'll iterate through them later. */ if (MUNIT_UNLIKELY(munit_parameters_add(&wild_params_l, &wild_params, pe->name, NULL) != MUNIT_OK)) goto cleanup; } } if (wild_params_l != 0) { first_wild = params_l; for (wp = wild_params ; wp != NULL && wp->name != NULL ; wp++) { for (pe = test->parameters ; pe != NULL && pe->name != NULL && pe->values != NULL ; pe++) { if (strcmp(wp->name, pe->name) == 0) { if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, pe->values[0]) != MUNIT_OK)) goto cleanup; } } } munit_test_runner_run_test_wild(runner, test, test_name, params, params + first_wild); } else { munit_test_runner_run_test_with_params(runner, test, params); } cleanup: free(params); free(wild_params); } munit_maybe_free_concat(test_name, prefix, test->name); } /* Recurse through the suite and run all the tests. If a list of * tests to run was provied on the command line, run only those * tests. */ static void munit_test_runner_run_suite(MunitTestRunner* runner, const MunitSuite* suite, const char* prefix) { size_t pre_l; char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix); const MunitTest* test; const char** test_name; const MunitSuite* child_suite; /* Run the tests. */ for (test = suite->tests ; test != NULL && test->test != NULL ; test++) { if (runner->tests != NULL) { /* Specific tests were requested on the CLI */ for (test_name = runner->tests ; test_name != NULL && *test_name != NULL ; test_name++) { if ((pre_l == 0 || strncmp(pre, *test_name, pre_l) == 0) && strncmp(test->name, *test_name + pre_l, strlen(*test_name + pre_l)) == 0) { munit_test_runner_run_test(runner, test, pre); if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) goto cleanup; } } } else { /* Run all tests */ munit_test_runner_run_test(runner, test, pre); } } if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) goto cleanup; /* Run any child suites. */ for (child_suite = suite->suites ; child_suite != NULL && child_suite->prefix != NULL ; child_suite++) { munit_test_runner_run_suite(runner, child_suite, pre); } cleanup: munit_maybe_free_concat(pre, prefix, suite->prefix); } static void munit_test_runner_run(MunitTestRunner* runner) { munit_test_runner_run_suite(runner, runner->suite, NULL); } static void munit_print_help(int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)], void* user_data, const MunitArgument arguments[]) { const MunitArgument* arg; (void) argc; printf("USAGE: %s [OPTIONS...] [TEST...]\n\n", argv[0]); puts(" --seed SEED\n" " Value used to seed the PRNG. Must be a 32-bit integer in decimal\n" " notation with no separators (commas, decimals, spaces, etc.), or\n" " hexidecimal prefixed by \"0x\".\n" " --iterations N\n" " Run each test N times. 0 means the default number.\n" " --param name value\n" " A parameter key/value pair which will be passed to any test with\n" " takes a parameter of that name. If not provided, the test will be\n" " run once for each possible parameter value.\n" " --list Write a list of all available tests.\n" " --list-params\n" " Write a list of all available tests and their possible parameters.\n" " --single Run each parameterized test in a single configuration instead of\n" " every possible combination\n" " --log-visible debug|info|warning|error\n" " --log-fatal debug|info|warning|error\n" " Set the level at which messages of different severities are visible,\n" " or cause the test to terminate.\n" #if !defined(MUNIT_NO_FORK) " --no-fork Do not execute tests in a child process. If this option is supplied\n" " and a test crashes (including by failing an assertion), no further\n" " tests will be performed.\n" #endif " --fatal-failures\n" " Stop executing tests as soon as a failure is found.\n" " --show-stderr\n" " Show data written to stderr by the tests, even if the test succeeds.\n" " --color auto|always|never\n" " Colorize (or don't) the output.\n" /* 12345678901234567890123456789012345678901234567890123456789012345678901234567890 */ " --help Print this help message and exit.\n"); #if defined(MUNIT_NL_LANGINFO) setlocale(LC_ALL, ""); fputs((strcasecmp("UTF-8", nl_langinfo(CODESET)) == 0) ? "µnit" : "munit", stdout); #else puts("munit"); #endif printf(" %d.%d.%d\n" "Full documentation at: https://nemequ.github.io/munit/\n", (MUNIT_CURRENT_VERSION >> 16) & 0xff, (MUNIT_CURRENT_VERSION >> 8) & 0xff, (MUNIT_CURRENT_VERSION >> 0) & 0xff); for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++) arg->write_help(arg, user_data); } static const MunitArgument* munit_arguments_find(const MunitArgument arguments[], const char* name) { const MunitArgument* arg; for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++) if (strcmp(arg->name, name) == 0) return arg; return NULL; } static void munit_suite_list_tests(const MunitSuite* suite, munit_bool show_params, const char* prefix) { size_t pre_l; char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix); const MunitTest* test; const MunitParameterEnum* params; munit_bool first; char** val; const MunitSuite* child_suite; for (test = suite->tests ; test != NULL && test->name != NULL ; test++) { if (pre != NULL) fputs(pre, stdout); puts(test->name); if (show_params) { for (params = test->parameters ; params != NULL && params->name != NULL ; params++) { fprintf(stdout, " - %s: ", params->name); if (params->values == NULL) { puts("Any"); } else { first = 1; for (val = params->values ; *val != NULL ; val++ ) { if(!first) { fputs(", ", stdout); } else { first = 0; } fputs(*val, stdout); } putc('\n', stdout); } } } } for (child_suite = suite->suites ; child_suite != NULL && child_suite->prefix != NULL ; child_suite++) { munit_suite_list_tests(child_suite, show_params, pre); } munit_maybe_free_concat(pre, prefix, suite->prefix); } static munit_bool munit_stream_supports_ansi(FILE *stream) { #if !defined(_WIN32) return isatty(fileno(stream)); #else #if !defined(__MINGW32__) size_t ansicon_size = 0; #endif if (isatty(fileno(stream))) { #if !defined(__MINGW32__) getenv_s(&ansicon_size, NULL, 0, "ANSICON"); return ansicon_size != 0; #else return getenv("ANSICON") != NULL; #endif } return 0; #endif } int munit_suite_main_custom(const MunitSuite* suite, void* user_data, int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)], const MunitArgument arguments[]) { int result = EXIT_FAILURE; MunitTestRunner runner; size_t parameters_size = 0; size_t tests_size = 0; int arg; char* envptr; unsigned long ts; char* endptr; unsigned long long iterations; MunitLogLevel level; const MunitArgument* argument; const char** runner_tests; unsigned int tests_run; unsigned int tests_total; runner.prefix = NULL; runner.suite = NULL; runner.tests = NULL; runner.seed = 0; runner.iterations = 0; runner.parameters = NULL; runner.single_parameter_mode = 0; runner.user_data = NULL; runner.report.successful = 0; runner.report.skipped = 0; runner.report.failed = 0; runner.report.errored = 0; #if defined(MUNIT_ENABLE_TIMING) runner.report.cpu_clock = 0; runner.report.wall_clock = 0; #endif runner.colorize = 0; #if !defined(_WIN32) runner.fork = 1; #else runner.fork = 0; #endif runner.show_stderr = 0; runner.fatal_failures = 0; runner.suite = suite; runner.user_data = user_data; runner.seed = munit_rand_generate_seed(); runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE); for (arg = 1 ; arg < argc ; arg++) { if (strncmp("--", argv[arg], 2) == 0) { if (strcmp("seed", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } envptr = argv[arg + 1]; ts = strtoul(argv[arg + 1], &envptr, 0); if (*envptr != '\0' || ts > (~((munit_uint32_t) 0U))) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } runner.seed = (munit_uint32_t) ts; arg++; } else if (strcmp("iterations", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } endptr = argv[arg + 1]; iterations = strtoul(argv[arg + 1], &endptr, 0); if (*endptr != '\0' || iterations > UINT_MAX) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } runner.iterations = (unsigned int) iterations; arg++; } else if (strcmp("param", argv[arg] + 2) == 0) { if (arg + 2 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires two arguments", argv[arg]); goto cleanup; } runner.parameters = realloc(runner.parameters, sizeof(MunitParameter) * (parameters_size + 2)); if (runner.parameters == NULL) { munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory"); goto cleanup; } runner.parameters[parameters_size].name = (char*) argv[arg + 1]; runner.parameters[parameters_size].value = (char*) argv[arg + 2]; parameters_size++; runner.parameters[parameters_size].name = NULL; runner.parameters[parameters_size].value = NULL; arg += 2; } else if (strcmp("color", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } if (strcmp(argv[arg + 1], "always") == 0) runner.colorize = 1; else if (strcmp(argv[arg + 1], "never") == 0) runner.colorize = 0; else if (strcmp(argv[arg + 1], "auto") == 0) runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE); else { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } arg++; } else if (strcmp("help", argv[arg] + 2) == 0) { munit_print_help(argc, argv, user_data, arguments); result = EXIT_SUCCESS; goto cleanup; } else if (strcmp("single", argv[arg] + 2) == 0) { runner.single_parameter_mode = 1; } else if (strcmp("show-stderr", argv[arg] + 2) == 0) { runner.show_stderr = 1; #if !defined(_WIN32) } else if (strcmp("no-fork", argv[arg] + 2) == 0) { runner.fork = 0; #endif } else if (strcmp("fatal-failures", argv[arg] + 2) == 0) { runner.fatal_failures = 1; } else if (strcmp("log-visible", argv[arg] + 2) == 0 || strcmp("log-fatal", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } if (strcmp(argv[arg + 1], "debug") == 0) level = MUNIT_LOG_DEBUG; else if (strcmp(argv[arg + 1], "info") == 0) level = MUNIT_LOG_INFO; else if (strcmp(argv[arg + 1], "warning") == 0) level = MUNIT_LOG_WARNING; else if (strcmp(argv[arg + 1], "error") == 0) level = MUNIT_LOG_ERROR; else { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } if (strcmp("log-visible", argv[arg] + 2) == 0) munit_log_level_visible = level; else munit_log_level_fatal = level; arg++; } else if (strcmp("list", argv[arg] + 2) == 0) { munit_suite_list_tests(suite, 0, NULL); result = EXIT_SUCCESS; goto cleanup; } else if (strcmp("list-params", argv[arg] + 2) == 0) { munit_suite_list_tests(suite, 1, NULL); result = EXIT_SUCCESS; goto cleanup; } else { argument = munit_arguments_find(arguments, argv[arg] + 2); if (argument == NULL) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "unknown argument ('%s')", argv[arg]); goto cleanup; } if (!argument->parse_argument(suite, user_data, &arg, argc, argv)) goto cleanup; } } else { runner_tests = realloc((void*) runner.tests, sizeof(char*) * (tests_size + 2)); if (runner_tests == NULL) { munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory"); goto cleanup; } runner.tests = runner_tests; runner.tests[tests_size++] = argv[arg]; runner.tests[tests_size] = NULL; } } fflush(stderr); fprintf(MUNIT_OUTPUT_FILE, "Running test suite with seed 0x%08" PRIx32 "...\n", runner.seed); munit_test_runner_run(&runner); tests_run = runner.report.successful + runner.report.failed + runner.report.errored; tests_total = tests_run + runner.report.skipped; if (tests_run == 0) { fprintf(stderr, "No tests run, %d (100%%) skipped.\n", runner.report.skipped); } else { fprintf(MUNIT_OUTPUT_FILE, "%d of %d (%0.0f%%) tests successful, %d (%0.0f%%) test skipped.\n", runner.report.successful, tests_run, (((double) runner.report.successful) / ((double) tests_run)) * 100.0, runner.report.skipped, (((double) runner.report.skipped) / ((double) tests_total)) * 100.0); } if (runner.report.failed == 0 && runner.report.errored == 0) { result = EXIT_SUCCESS; } cleanup: free(runner.parameters); free((void*) runner.tests); return result; } int munit_suite_main(const MunitSuite* suite, void* user_data, int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)]) { return munit_suite_main_custom(suite, user_data, argc, argv, NULL); }
pr96867.c
/* PR c++/96867 */ int *v; void foo (int x) { #pragma omp target update to (x, v[:]) /* { dg-error "for pointer type length expression must be specified" } */ }
move_particle_utility.h
// KRATOS ___ ___ _ ___ __ ___ ___ ___ ___ // / __/ _ \| \| \ \ / /__| \_ _| __| __| // | (_| (_) | .` |\ V /___| |) | || _|| _| // \___\___/|_|\_| \_/ |___/___|_| |_| APPLICATION // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Pablo Becker // #if !defined(KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_TRANSPORT_INCLUDED) #define KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_TRANSPORT_INCLUDED // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/node.h" /// #include "includes/dof.h" #include "includes/variables.h" #include "containers/array_1d.h" #include "containers/data_value_container.h" #include "includes/mesh.h" #include "utilities/math_utils.h" /// #include "utilities/geometry_utilities.h" #include "includes/model_part.h" #include "spatial_containers/spatial_containers.h" #include "spatial_containers/cell.h" #include "spatial_containers/bins_dynamic_objects.h" #include "utilities/spatial_containers_configure.h" #include "geometries/line_2d_2.h" #include "geometries/triangle_2d_3.h" #include "geometries/triangle_3d_3.h" #include "geometries/point.h" #include "convection_diffusion_application.h" #include "convection_particle.h" #include "utilities/openmp_utils.h" #include "utilities/parallel_utilities.h" #include "time.h" //#include "processes/process.h" namespace Kratos { //this class is to be modified by the user to customize the interpolation process template< unsigned int TDim> class MoveParticleUtilityScalarTransport { public: typedef SpatialContainersConfigure<TDim> Configure; typedef typename Configure::PointType PointType; //typedef PointType::CoordinatesArrayType CoordinatesArrayType; typedef typename Configure::ContainerType ContainerType; //typedef Configure::PointerType PointerType; typedef typename Configure::IteratorType IteratorType; typedef typename Configure::ResultContainerType ResultContainerType; //typedef Configure::ResultPointerType ResultPointerType; typedef typename Configure::ResultIteratorType ResultIteratorType; typedef PointerVector< Convection_Particle, Convection_Particle*, std::vector<Convection_Particle*> > ParticlePointerVector; //typedef Configure::ContactPairType ContactPairType; //typedef Configure::ContainerContactType ContainerContactType; //typedef Configure::IteratorContactType IteratorContactType; //typedef Configure::PointerContactType PointerContactType; //typedef Configure::PointerTypeIterator PointerTypeIterator; KRATOS_CLASS_POINTER_DEFINITION(MoveParticleUtilityScalarTransport); //template<unsigned int TDim> MoveParticleUtilityScalarTransport(ModelPart& model_part, int maximum_number_of_particles) : mr_model_part(model_part) , mmaximum_number_of_particles(maximum_number_of_particles) , mUnknownVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetUnknownVariable()) , mProjectionVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetProjectionVariable()) , mVelocityVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetVelocityVariable()) , mMeshVelocityVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetMeshVelocityVariable()) { std::cout << "initializing moveparticle utility for scalar transport" << std::endl; Check(); //storing water and air density and their inverses, just in case it is needed for the streamline integration //loop in elements to change their ID to their position in the array. Easier to get information later. //DO NOT PARALELIZE THIS! IT MUST BE SERIAL!!!!!!!!!!!!!!!!!!!!!! ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; ielem->SetId(ii+1); } mlast_elem_id= (mr_model_part.ElementsEnd()-1)->Id(); int node_id=0; // we look for the smallest edge. could be used as a weighting function when going lagrangian->eulerian instead of traditional shape functions(method currently used) ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator pnode = inodebegin+ii; array_1d<double,3> position_node; double distance=0.0; position_node = pnode->Coordinates(); GlobalPointersVector< Node<3> >& rneigh = pnode->GetValue(NEIGHBOUR_NODES); //we loop all the nodes to check all the edges const double number_of_neighbours = double(rneigh.size()); for( GlobalPointersVector<Node<3> >::iterator inode = rneigh.begin(); inode!=rneigh.end(); inode++) { array_1d<double,3> position_difference; position_difference = inode->Coordinates() - position_node; double current_distance= sqrt(pow(position_difference[0],2)+pow(position_difference[1],2)+pow(position_difference[2],2)); //if (current_distance>distance) // distance=current_distance; distance += current_distance / number_of_neighbours; } //and we save the largest edge. pnode->FastGetSolutionStepValue(MEAN_SIZE)=distance; node_id=pnode->GetId(); } } mlast_node_id=node_id; //we also calculate the element mean size in the same way, for the courant number //also we set the right size to the LHS column for the pressure enrichments, in order to recover correctly the enrichment pressure vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); //before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element. #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; double mElemSize; array_1d<double,3> Edge(3,0.0); Edge = ielem->GetGeometry()[1].Coordinates() - ielem->GetGeometry()[0].Coordinates(); mElemSize = Edge[0]*Edge[0]; for (unsigned int d = 1; d < TDim; d++) mElemSize += Edge[d]*Edge[d]; for (unsigned int i = 2; i < (TDim+1); i++) for(unsigned int j = 0; j < i; j++) { Edge = ielem->GetGeometry()[i].Coordinates() - ielem->GetGeometry()[j].Coordinates(); double Length = Edge[0]*Edge[0]; for (unsigned int d = 1; d < TDim; d++) Length += Edge[d]*Edge[d]; if (Length < mElemSize) mElemSize = Length; } mElemSize = sqrt(mElemSize); ielem->GetValue(MEAN_SIZE) = mElemSize; } } //matrix containing the position of the 4/15/45 particles that we will seed at the beggining BoundedMatrix<double, 5*(1+TDim), 3 > pos; BoundedMatrix<double, 5*(1+TDim), (1+TDim) > N; int particle_id=0; mnelems = mr_model_part.Elements().size(); std::cout << "about to resize vectors" << std::endl; //setting the right size to the vector containing the particles assigned to each element //particles vector. this vector contains ALL the particles in the simulation. mparticles_vector.resize(mnelems*mmaximum_number_of_particles); //and this vector contains the current number of particles that are in each element (currently zero) mnumber_of_particles_in_elems.resize(mnelems); mnumber_of_particles_in_elems=ZeroVector(mnelems); //when moving the particles, an auxiliary vector is necessary (to store the previous number) mnumber_of_particles_in_elems_aux.resize(mnelems); //each element will have a list of pointers to all the particles that are inside. //this vector contains the pointers to the vector of (particle) pointers of each element. mvector_of_particle_pointers_vectors.resize(mnelems); //int artz; //std::cin >> artz; int i_int=0; //careful! it's not the id, but the position inside the array! std::cout << "about to create particles" << std::endl; //now we seed: LOOP IN ELEMENTS //using loop index, DO NOT paralelize this! change lines : mparticles_in_elems_pointers((ii*mmaximum_number_of_particles)+mparticles_in_elems_integers(ii)) = pparticle; and the next one moffset=0; //Convection_Particle& firstparticle =mparticles_vector[0]; for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; //(ielem->GetValue(BED_PARTICLE_POINTERS)) = ParticlePointerVector( mmaximum_number_of_particles*2, &firstparticle ); //ParticlePointerVector& particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); //now we link the mpointers_to_particle_pointers_vectors to the corresponding element //mpointers_to_particle_pointers_vectors(ii) = &particle_pointers; //now we resize the vector of particle pointers. it is double sized because we move the particles from an initial position (first half) to a final position (second half). //for(int j=0; j<(mmaximum_number_of_particles*2); j++) // particle_pointers.push_back(&firstparticle); mvector_of_particle_pointers_vectors[ii] = ParticlePointerVector( mmaximum_number_of_particles*2 ); ParticlePointerVector& particle_pointers = mvector_of_particle_pointers_vectors[ii]; //int & number_of_particles = ielem->GetValue(NUMBER_OF_BED_PARTICLES); int & number_of_particles = mnumber_of_particles_in_elems[ii]; number_of_particles=0; Geometry< Node<3> >& geom = ielem->GetGeometry(); //unsigned int elem_id = ielem->Id(); //mareas_vector[i_int]=CalculateArea(geom); UNUSED SO COMMENTED ComputeGaussPointPositions_initial(geom, pos, N); //we also have the standard (4), and 45 //now we seed the particles in the current element for (unsigned int j = 0; j < pos.size1(); j++) { ++particle_id; Convection_Particle& pparticle = mparticles_vector[particle_id-1]; pparticle.X()=pos(j,0); pparticle.Y()=pos(j,1); pparticle.Z()=pos(j,2); pparticle.GetEraseFlag()=false; float & scalar1= pparticle.GetScalar1(); scalar1=0.0; for (unsigned int k = 0; k < (TDim+1); k++) { scalar1 += N(j, k) * geom[k].FastGetSolutionStepValue(mUnknownVar); } particle_pointers(j) = &pparticle; number_of_particles++ ; } ++i_int; } m_nparticles=particle_id; //we save the last particle created as the total number of particles we have. For the moment this is true. KRATOS_WATCH(m_nparticles); //KRATOS_WATCH(mlast_elem_id); mparticle_printing_tool_initialized=false; //std::cin >> artz; } virtual ~MoveParticleUtilityScalarTransport() {} void MountBin() { KRATOS_TRY //copy the elements to a new container, as the list will //be shuffled duringthe construction of the tree ContainerType& rElements = mr_model_part.ElementsArray(); IteratorType it_begin = rElements.begin(); IteratorType it_end = rElements.end(); //const int number_of_elem = rElements.size(); typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin, it_end ) ); paux.swap(mpBinsObjectDynamic); //BinsObjectDynamic<Configure> mpBinsObjectDynamic(it_begin, it_end ); std::cout << "finished mounting Bins" << std::endl; KRATOS_CATCH("") } void CalculateVelOverElemSize() { KRATOS_TRY //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const double nodal_weight = 1.0/ (1.0 + double (TDim) ); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Geometry<Node<3> >& geom = ielem->GetGeometry(); array_1d<double, 3 >vector_mean_velocity=ZeroVector(3); for (unsigned int i=0; i != (TDim+1) ; i++) vector_mean_velocity += geom[i].FastGetSolutionStepValue(mVelocityVar); vector_mean_velocity *= nodal_weight; const double mean_velocity = sqrt ( pow(vector_mean_velocity[0],2) + pow(vector_mean_velocity[1],2) + pow(vector_mean_velocity[2],2) ); ielem->GetValue(MEAN_VEL_OVER_ELEM_SIZE) = mean_velocity / (ielem->GetValue(MEAN_SIZE)); } } KRATOS_CATCH("") } //name self explained void ResetBoundaryConditions() { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; if (inode->IsFixed(mUnknownVar)) { inode->FastGetSolutionStepValue(mUnknownVar)=inode->GetSolutionStepValue(mUnknownVar,1); } } } KRATOS_CATCH("") } void CalculateDeltaVariables() { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(DELTA_SCALAR1) = inode->FastGetSolutionStepValue(mUnknownVar) - inode->FastGetSolutionStepValue(mProjectionVar) ; } } KRATOS_CATCH("") } void CopyScalarVarToPreviousTimeStep(const Variable<double>& OriginVariable, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->GetSolutionStepValue(OriginVariable,1) = inode->FastGetSolutionStepValue(OriginVariable); } } KRATOS_CATCH("") } //to move all the particles across the streamlines. heavy task! void MoveParticles() { KRATOS_TRY ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //moveparticlesdiff reads from the pointers of one part (ie odd) and saves into the other part (ie even part) //since it is the only function in the whole procedure that does this, it must use alternatively one part and the other. //KRATOS_WATCH(offset) bool even_timestep; if (offset!=0) even_timestep=false; else even_timestep=true; const int post_offset = mmaximum_number_of_particles*int(even_timestep); //and we also save the offset to know the location in which we will save the pointers after we've moved the particles //KRATOS_WATCH(post_offset) double delta_t = CurrentProcessInfo[DELTA_TIME]; array_1d<double,TDim+1> N; const unsigned int max_results = 10000; //double integration_distance= 2.0; max_nsubsteps = 10; max_substep_dt=delta_t/double(max_nsubsteps); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); //before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element. #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { //ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii; int & number_of_particles = mnumber_of_particles_in_elems[ii]; //old_element->GetValue(NUMBER_OF_BED_PARTICLES); mnumber_of_particles_in_elems_aux[ii]=number_of_particles; mnumber_of_particles_in_elems[ii]=0; //we reset the local vectors for a faster access; } } std::cout << "convecting particles" << std::endl; //We move the particles across the fixed mesh and saving change data into them (using the function MoveParticle) #pragma omp barrier #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { ResultContainerType results(max_results); GlobalPointersVector< Element > elements_in_trajectory; elements_in_trajectory.resize(20); for(unsigned int ielem=element_partition[kkk]; ielem<element_partition[kkk+1]; ielem++) { //for(unsigned int ielem=0; ielem<mr_model_part.Elements().size(); ielem++) //{ ModelPart::ElementsContainerType::iterator old_element = ielembegin+ielem; const int old_element_id = old_element->Id(); ParticlePointerVector& old_element_particle_pointers = mvector_of_particle_pointers_vectors(old_element_id-1); if ( (results.size()) !=max_results) results.resize(max_results); unsigned int number_of_elements_in_trajectory=0; //excluding the origin one (current one, ielem) for(int ii=0; ii<(mnumber_of_particles_in_elems_aux(ielem)); ii++) { Convection_Particle & pparticle = old_element_particle_pointers[offset+ii]; Element::Pointer pcurrent_element( *old_element.base() ); ResultIteratorType result_begin = results.begin(); bool & erase_flag=pparticle.GetEraseFlag(); if (erase_flag==false){ MoveParticle(pparticle,pcurrent_element,elements_in_trajectory,number_of_elements_in_trajectory,result_begin,max_results); //saqué N de los argumentos, no lo necesito ya q empieza SIEMPRE en un nodo y no me importa donde termina const int current_element_id = pcurrent_element->Id(); int & number_of_particles_in_current_elem = mnumber_of_particles_in_elems(current_element_id-1); //int & number_of_water_particles_in_current_elem = mnumber_of_water_particles_in_elems(current_element_id-1); if (number_of_particles_in_current_elem<mmaximum_number_of_particles && erase_flag==false) { { ParticlePointerVector& current_element_particle_pointers = mvector_of_particle_pointers_vectors(current_element_id-1); #pragma omp critical { if (number_of_particles_in_current_elem<mmaximum_number_of_particles) // we cant go over this node, there's no room. otherwise we would be in the position of the first particle of the next element!! { current_element_particle_pointers(post_offset+number_of_particles_in_current_elem) = &pparticle; number_of_particles_in_current_elem++ ; if (number_of_particles_in_current_elem>mmaximum_number_of_particles) KRATOS_WATCH("MAL"); } else pparticle.GetEraseFlag()=true; //so we just delete it! } } } else pparticle.GetEraseFlag()=true; //so we just delete it! } } } } /* //now we pass info from the local vector to the elements: #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii; old_element->GetValue(NUMBER_OF_BED_PARTICLES) = mnumber_of_particles_in_elems(ii); //old_element->GetValue(NUMBER_OF_WATER_PARTICLES) = mnumber_of_water_particles_in_elems(ii); } } */ //after having changed everything we change the status of the modd_timestep flag: moffset = post_offset;; // KRATOS_CATCH("") } void TransferLagrangianToEulerian() //explicit { KRATOS_TRY //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); //const double delta_t =CurrentProcessInfo[DELTA_TIME]; const double threshold= 0.0/(double(TDim)+1.0); std::cout << "projecting info to mesh" << std::endl; const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //KRATOS_WATCH(offset) //(flag managed only by MoveParticles //we must project data from the particles (lagrangian) into the eulerian mesh //ValuesVectorType eulerian_nodes_old_temperature; //int nnodes = mr_model_part.Nodes().size(); //array_1d<double,(n_nodes)> eulerian_nodes_sumweights; //we save data from previous time step of the eulerian mesh in case we must reuse it later cos no particle was found around the nodes //though we could've use a bigger buffer, to be changed later! //after having saved data, we reset them to zero, this way it's easier to add the contribution of the surrounding particles. ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(mProjectionVar)=0.0; inode->FastGetSolutionStepValue(YP)=0.0; } } //adding contribution, loop on elements, since each element has stored the particles found inside of it vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; array_1d<double,3*(TDim+1)> nodes_positions; array_1d<double,(TDim+1)> nodes_added_scalar1 = ZeroVector((TDim+1)); array_1d<double,(TDim+1)> nodes_addedweights = ZeroVector((TDim+1)); //array_1d<double,(TDim+1)> weighting_inverse_divisor; Geometry<Node<3> >& geom = ielem->GetGeometry(); for (int i=0 ; i!=(TDim+1) ; ++i) { nodes_positions[i*3+0]=geom[i].X(); nodes_positions[i*3+1]=geom[i].Y(); nodes_positions[i*3+2]=geom[i].Z(); //weighting_inverse_divisor[i]=1.0/((geom[i].FastGetSolutionStepValue(MEAN_SIZE))*1.01); } ///KRATOS_WATCH(ielem->Id()) ///KRATOS_WATCH(ielem->GetValue(NEIGHBOUR_NODES).size()); //int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_BED_PARTICLES); //ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii]; ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii]; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { if (iii==mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; Convection_Particle & pparticle = element_particle_pointers[offset+iii]; if (pparticle.GetEraseFlag()==false) { array_1d<double,3> & position = pparticle.Coordinates(); const float& particle_scalar1 = pparticle.GetScalar1(); // -1 if water, +1 if air array_1d<double,TDim+1> N; bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N); if (is_found==false) //something went wrong. if it was close enough to the edge we simply send it inside the element. { KRATOS_WATCH(N); for (int j=0 ; j!=(TDim+1); j++) if (N[j]<0.0 && N[j]> -1e-5) N[j]=1e-10; } for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element { //double sq_dist = 0; //these lines for a weighting function based on the distance (or square distance) from the node insteadof the shape functions //for (int k=0 ; k!=(TDim); k++) sq_dist += ((position[k] - nodes_positions[j*3+k])*(position[k] - nodes_positions[j*3+k])); //double weight = (1.0 - (sqrt(sq_dist)*weighting_inverse_divisor[j] ) ); double weight=N(j)*N(j); //weight=N(j)*N(j)*N(j); if (weight<threshold) weight=1e-10; if (weight<0.0) {KRATOS_WATCH(weight)}//;weight=0.0;KRATOS_WATCH(velocity);KRATOS_WATCH(N);KRATOS_WATCH(number_of_particles_in_elem);}//{KRATOS_WATCH(weight); KRATOS_WATCH(geom[j].Id()); KRATOS_WATCH(position);} else { nodes_addedweights[j]+= weight; //nodes_addedtemp[j] += weight * particle_temp; nodes_added_scalar1[j] += weight*particle_scalar1; }// } } } for (int i=0 ; i!=(TDim+1) ; ++i) { geom[i].SetLock(); geom[i].FastGetSolutionStepValue(mProjectionVar) +=nodes_added_scalar1[i]; geom[i].FastGetSolutionStepValue(YP) +=nodes_addedweights[i]; geom[i].UnSetLock(); } } } #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; double sum_weights = inode->FastGetSolutionStepValue(YP); if (sum_weights>0.00001) { //inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT)=(inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT))/sum_weights; //resetting the temperature double & height = inode->FastGetSolutionStepValue(mProjectionVar); height /=sum_weights; //resetting the density } else //this should never happen because other ways to recover the information have been executed before, but leaving it just in case.. { inode->FastGetSolutionStepValue(mProjectionVar)=inode->FastGetSolutionStepValue(mUnknownVar,1); //resetting the temperature } } } KRATOS_CATCH("") } void TransferLagrangianToEulerianImp() //semi implicit { KRATOS_TRY // ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); std::cout << "projecting info to mesh (semi implicit)" << std::endl; const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //KRATOS_WATCH(offset) //(flag managed only by MoveParticles //we must project data from the particles (lagrangian) into the eulerian mesh //ValuesVectorType eulerian_nodes_old_temperature; //int nnodes = mr_model_part.Nodes().size(); //array_1d<double,(n_nodes)> eulerian_nodes_sumweights; //we save data from previous time step of the eulerian mesh in case we must reuse it later cos no particle was found around the nodes //though we could've use a bigger buffer, to be changed later! //after having saved data, we reset them to zero, this way it's easier to add the contribution of the surrounding particles. ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(mProjectionVar)=0.0; inode->FastGetSolutionStepValue(YP)=0.0; } } //adding contribution, loop on elements, since each element has stored the particles found inside of it vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { //creating a matrix for each of the problems. BoundedMatrix<double, TDim+1 , TDim+1 > mass_matrix; // WE ONLY NEED ONE! they are the same for all the variables! //_x,mass_matrix_y,mass_matrix_z,mass_matrix_d; //mass matrices for the projected vel (x,y,z) and the distance array_1d<double,(TDim+1)> rhs_scalar1; array_1d<double,3*(TDim+1)> nodes_positions; array_1d<double,(TDim+1)> nodes_added_scalar1 = ZeroVector((TDim+1)); array_1d<double,(TDim+1)> nodes_addedweights = ZeroVector((TDim+1)); for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; nodes_added_scalar1 = ZeroVector((TDim+1)); //resetting vectors nodes_addedweights = ZeroVector((TDim+1)); //resetting vectors mass_matrix = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices. WE ONLY NEED ONE! they are the same for all the variable. only the rhs changes. //mass_matrix_y = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices //mass_matrix_z = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices //mass_matrix_d = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices rhs_scalar1 = ZeroVector((TDim+1)); //resetting vectors Geometry<Node<3> >& geom = ielem->GetGeometry(); const double elem_volume = geom.Area(); for (int i=0 ; i!=(TDim+1) ; ++i) //saving the nodal positions for faster access { nodes_positions[i*3+0]=geom[i].X(); nodes_positions[i*3+1]=geom[i].Y(); nodes_positions[i*3+2]=geom[i].Z(); } ///KRATOS_WATCH(ielem->Id()) ///KRATOS_WATCH(ielem->GetValue(NEIGHBOUR_NODES).size()); //int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_BED_PARTICLES); //ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii]; ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii]; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { if (iii==mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; Convection_Particle & pparticle = element_particle_pointers[offset+iii]; if (pparticle.GetEraseFlag()==false) { array_1d<double,3> & position = pparticle.Coordinates(); const float& particle_scalar1 = pparticle.GetScalar1(); // -1 if water, +1 if air array_1d<double,TDim+1> N; bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N); if (is_found==false) //something went wrong. if it was close enough to the edge we simply send it inside the element. { KRATOS_WATCH(N); for (int j=0 ; j!=(TDim+1); j++) if (N[j]<0.0 && N[j]> -1e-5) N[j]=1e-10; } for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element { double weight=N(j); for (int k=0 ; k!=(TDim+1); k++) //building the mass matrix mass_matrix(j,k) += weight*N(k); rhs_scalar1[j] += weight * double(particle_scalar1); //adding also a part with the lumped mass matrix to reduce overshoots and undershoots if(true) { double this_particle_weight = weight*elem_volume/(double(number_of_particles_in_elem))*0.1; //can be increased or reduced to change the lumped mass contrubtion nodes_addedweights[j]+= this_particle_weight; nodes_added_scalar1[j] += this_particle_weight*particle_scalar1; } } } } //now we invert the matrix BoundedMatrix<double, TDim+1 , TDim+1 > inverse_mass_matrix=ZeroMatrix(TDim+1 , TDim+1); if(TDim==3) InvertMatrix( mass_matrix, inverse_mass_matrix); else InvertMatrix3x3( mass_matrix, inverse_mass_matrix); //and now compute the elemental contribution to the gobal system: if(number_of_particles_in_elem > static_cast<int>(TDim)*3) //otherwise it's impossible to define a correctly the gradients, therefore the results inside the element are useless. { for (int i=0 ; i!=(TDim+1); i++) { for (int j=0 ; j!=(TDim+1); j++) { nodes_added_scalar1[i] += inverse_mass_matrix(i,j)*rhs_scalar1[j]*elem_volume*(1.0/(double(1+TDim))); } } //and also to the mass matrix. LUMPED (but for the contribution of the grandient at elemental level. for (int i=0 ; i!=(TDim+1); i++) nodes_addedweights[i] += elem_volume*(1.0/(double(1+TDim))); } for (int i=0 ; i!=(TDim+1) ; ++i) { geom[i].SetLock(); geom[i].FastGetSolutionStepValue(mProjectionVar) +=nodes_added_scalar1[i]; geom[i].FastGetSolutionStepValue(YP) +=nodes_addedweights[i]; geom[i].UnSetLock(); } } } #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; double sum_weights = inode->FastGetSolutionStepValue(YP); if (sum_weights>0.00001) { double & scalar1 = inode->FastGetSolutionStepValue(mProjectionVar); scalar1 /=sum_weights; //resetting the density } else //this should never happen because other ways to recover the information have been executed before, but leaving it just in case.. { inode->FastGetSolutionStepValue(mProjectionVar)=inode->FastGetSolutionStepValue(mUnknownVar,1); } } } KRATOS_CATCH("") } void CorrectParticlesWithoutMovingUsingDeltaVariables() { KRATOS_TRY //std::cout << "updating particles" << std::endl; //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //(flag managed only by MoveParticles //KRATOS_WATCH(offset) ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Element::Pointer pelement(*ielem.base()); Geometry<Node<3> >& geom = ielem->GetGeometry(); //ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); //int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES); int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii]; ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii]; //std::cout << "elem " << ii << " with " << (unsigned int)number_of_particles_in_elem << " particles" << std::endl; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { //KRATOS_WATCH(iii) if (iii>mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; Convection_Particle & pparticle = element_particle_pointers[offset+iii]; bool erase_flag= pparticle.GetEraseFlag(); if (erase_flag==false) { CorrectParticleUsingDeltaVariables(pparticle,pelement,geom); //'lite' version, we pass by reference the geometry, so much cheaper } } } } KRATOS_CATCH("") } //************************************************************************************************************** //************************************************************************************************************** template< class TDataType > void AddUniqueWeakPointer (GlobalPointersVector< TDataType >& v, const typename TDataType::WeakPointer candidate) { typename GlobalPointersVector< TDataType >::iterator i = v.begin(); typename GlobalPointersVector< TDataType >::iterator endit = v.end(); while ( i != endit && (i)->Id() != (candidate)->Id()) { i++; } if( i == endit ) { v.push_back(candidate); } } //************************************************************************************************************** //************************************************************************************************************** void PreReseed(int minimum_number_of_particles) { KRATOS_TRY //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset =moffset; const int max_results = 1000; //tools for the paralelization unsigned int number_of_threads = ParallelUtilities::GetNumThreads(); vector<unsigned int> elem_partition; int number_of_rows=mr_model_part.Elements().size(); elem_partition.resize(number_of_threads + 1); int elem_partition_size = number_of_rows / number_of_threads; elem_partition[0] = 0; elem_partition[number_of_threads] = number_of_rows; //KRATOS_WATCH(elem_partition_size); for (unsigned int i = 1; i < number_of_threads; i++) elem_partition[i] = elem_partition[i - 1] + elem_partition_size; ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); #pragma omp parallel firstprivate(elem_partition) { ResultContainerType results(max_results); int k = OpenMPUtils::ThisThread(); //ModelPart::ElementsContainerType::iterator it_begin = mr_model_part.ElementsBegin() + elem_partition[k]; //ModelPart::ElementsContainerType::iterator it_end = mr_model_part.ElementsBegin() + elem_partition[k+1] ; //ModelPart::NodesContainerType local_list=aux[k]; //PointerVectorSet<Convection_Particle, IndexedObject> & list=aux[k]; //KRATOS_WATCH(k); BoundedMatrix<double, (TDim+1), 3 > pos; BoundedMatrix<double, (TDim+1) , (TDim+1) > N; unsigned int freeparticle=0; //we start with the first position in the particles array //int local_id=1; for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++) { //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; results.resize(max_results); //const int & elem_id = ielem->Id(); //ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); //int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES); int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii]; ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii]; if (number_of_particles_in_elem<(minimum_number_of_particles))// && (ielem->GetGeometry())[0].Y()<0.10 ) { //KRATOS_WATCH("elem with little particles") Geometry< Node<3> >& geom = ielem->GetGeometry(); ComputeGaussPointPositionsForPreReseed(geom, pos, N); //double conductivity = ielem->GetProperties()[CONDUCTIVITY]; //KRATOS_WATCH(conductivity); for (unsigned int j = 0; j < (pos.size1()); j++) //i am dropping the last one, the one in the middle of the element { bool keep_looking = true; while(keep_looking) { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { #pragma omp critical { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { mparticles_vector[freeparticle].GetEraseFlag()=false; keep_looking=false; } } if (keep_looking==false) break; else freeparticle++; } else { freeparticle++; } } Convection_Particle pparticle(pos(j,0),pos(j,1),pos(j,2)); array_1d<double,TDim+1>aux2_N; bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux2_N); if (is_found==false) { KRATOS_WATCH(aux2_N); } pparticle.GetEraseFlag()=false; ResultIteratorType result_begin = results.begin(); Element::Pointer pelement( *ielem.base() ); MoveParticle_inverse_way(pparticle, pelement, result_begin, max_results); //and we copy it to the array: mparticles_vector[freeparticle] = pparticle; element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle]; pparticle.GetEraseFlag()=false; number_of_particles_in_elem++; } } } } KRATOS_CATCH("") } //************************************************************************************************************** //************************************************************************************************************** void PostReseed(int minimum_number_of_particles) //pooyan's way { KRATOS_TRY //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = moffset; //TOOLS FOR THE PARALELIZATION //int last_id= (mr_linea_model_part.NodesEnd()-1)->Id(); unsigned int number_of_threads = ParallelUtilities::GetNumThreads(); //KRATOS_WATCH(number_of_threads); vector<unsigned int> elem_partition; int number_of_rows=mr_model_part.Elements().size(); //KRATOS_WATCH(number_of_threads); //KRATOS_THROW_ERROR(std::logic_error, "Add ----NODAL_H---- variable!!!!!! ERROR", ""); elem_partition.resize(number_of_threads + 1); int elem_partition_size = number_of_rows / number_of_threads; elem_partition[0] = 0; elem_partition[number_of_threads] = number_of_rows; //KRATOS_WATCH(elem_partition_size); for (unsigned int i = 1; i < number_of_threads; i++) elem_partition[i] = elem_partition[i - 1] + elem_partition_size; //typedef Node < 3 > PointType; //std::vector<ModelPart::NodesContainerType> aux;// aux; //aux.resize(number_of_threads); //ModelPart::NodesContainerType::iterator it_begin_particle_model_part = mr_linea_model_part.NodesBegin(); //ModelPart::NodesContainerType::iterator it_end_particle_model_part = mr_linea_model_part.NodesEnd(); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); #pragma omp parallel firstprivate(elem_partition) // firstprivate(results)//we will add the nodes in different parts of aux and later assemple everything toghether, remaming particles ids to get consecutive ids { unsigned int reused_particles=0; unsigned int freeparticle = 0; //we start by the first position; int k = OpenMPUtils::ThisThread(); //ModelPart::ElementsContainerType::iterator it_begin = mr_model_part.ElementsBegin() + elem_partition[k]; //ModelPart::ElementsContainerType::iterator it_end = mr_model_part.ElementsBegin() + elem_partition[k+1] ; BoundedMatrix<double, (3+2*TDim), 3 > pos; //7 particles (2D) or 9 particles (3D) BoundedMatrix<double, (3+2*TDim), (TDim+1) > N; double mesh_scalar1; array_1d<int, (3+2*TDim) > positions; unsigned int number_of_reseeded_particles; //unsigned int number_of_water_reseeded_particles; //array_1d<double, 3 > nodes_distances; for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++) { //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; //int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_BED_PARTICLES); //ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii]; ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii]; Geometry< Node<3> >& geom = ielem->GetGeometry(); if ( (number_of_particles_in_elem<(minimum_number_of_particles)))// && (geom[0].Y()<0.10) ) || (number_of_water_particles_in_elem>2 && number_of_particles_in_elem<(minimum_number_of_particles) ) ) { //bool reseed_more=false; number_of_reseeded_particles=0; //reseed_more=true; number_of_reseeded_particles= 3+2*TDim; ComputeGaussPointPositionsForPostReseed(geom, pos, N); for (unsigned int j = 0; j < number_of_reseeded_particles; j++) { //now we have to find an empty space ( a particle that was about to be deleted) in the particles model part. once found. there will be our renewed particle: bool keep_looking = true; while(keep_looking) { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { #pragma omp critical { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { mparticles_vector[freeparticle].GetEraseFlag()=false; keep_looking=false; } } if (keep_looking==false) break; else freeparticle++; } else { freeparticle++; } } Convection_Particle pparticle(pos(j,0),pos(j,1),pos(j,2)); array_1d<double,TDim+1>aux_N; bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux_N); if (is_found==false) { KRATOS_WATCH(aux_N); KRATOS_WATCH(j) KRATOS_WATCH(ielem->Id()) } mesh_scalar1 = 0.0; for (unsigned int l = 0; l < (TDim+1); l++) { mesh_scalar1 += N(j,l) * geom[l].FastGetSolutionStepValue(mUnknownVar); } pparticle.GetScalar1()=mesh_scalar1; pparticle.GetEraseFlag()=false; mparticles_vector[freeparticle]=pparticle; element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle]; number_of_particles_in_elem++; if (keep_looking) { KRATOS_THROW_ERROR(std::logic_error, "FINISHED THE LIST AND COULDNT FIND A FREE CELL FOR THE NEW PARTICLE!", ""); } else { reused_particles++; } } } } } KRATOS_CATCH("") } void ExecuteParticlesPritingTool( ModelPart& lagrangian_model_part, int input_filter_factor ) { KRATOS_TRY //mfilter_factor; //we will only print one out of every "filter_factor" particles of the total particle list if(mparticle_printing_tool_initialized==false) { mfilter_factor=input_filter_factor; if(lagrangian_model_part.NodesBegin()-lagrangian_model_part.NodesEnd()>0) KRATOS_THROW_ERROR(std::logic_error, "AN EMPTY MODEL PART IS REQUIRED FOR THE PRINTING OF PARTICLES", ""); lagrangian_model_part.AddNodalSolutionStepVariable(DISPLACEMENT); lagrangian_model_part.AddNodalSolutionStepVariable(mUnknownVar); for (unsigned int i=0; i!=((mmaximum_number_of_particles*mnelems)/mfilter_factor)+mfilter_factor; i++) { Node < 3 > ::Pointer pnode = lagrangian_model_part.CreateNewNode( i+mlast_node_id+1 , 0.0, 0.0, 0.0); //recordar que es el nueevo model part!! //pnode->SetBufferSize(mr_model_part.NodesBegin()->GetBufferSize()); pnode->SetBufferSize(1); } mparticle_printing_tool_initialized=true; } //resetting data of the unused particles const double inactive_particle_position= -10.0; array_1d<double,3>inactive_particle_position_vector; inactive_particle_position_vector(0)=inactive_particle_position; inactive_particle_position_vector(1)=inactive_particle_position; inactive_particle_position_vector(2)=inactive_particle_position; ModelPart::NodesContainerType::iterator inodebegin = lagrangian_model_part.NodesBegin(); for(unsigned int ii=0; ii<lagrangian_model_part.Nodes().size(); ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(mUnknownVar) = 0.0; inode->FastGetSolutionStepValue(DISPLACEMENT) = inactive_particle_position_vector; } int counter=0; //ModelPart::NodesContainerType::iterator it_begin = lagrangian_model_part.NodesBegin(); for (int i=0; i!=mmaximum_number_of_particles*mnelems; i++) { Convection_Particle& pparticle =mparticles_vector[i]; if(pparticle.GetEraseFlag()==false && i%mfilter_factor==0) { ModelPart::NodesContainerType::iterator inode = inodebegin+counter; //copying info from the particle to the (printing) node. inode->FastGetSolutionStepValue(mUnknownVar) = pparticle.GetScalar1(); inode->FastGetSolutionStepValue(DISPLACEMENT) = pparticle.Coordinates(); counter++; } } KRATOS_CATCH("") } protected: private: ///this function moves a particle according to the "velocity" given ///by "rVariable". The movement is performed in nsubsteps, during a total time ///of Dt void MoveParticle( Convection_Particle & pparticle, Element::Pointer & pelement, GlobalPointersVector< Element >& elements_in_trajectory, unsigned int & number_of_elements_in_trajectory, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; unsigned int nsubsteps; double substep_dt; bool KEEP_INTEGRATING=false; bool is_found; //bool have_air_node; //bool have_water_node; array_1d<double,3> vel; array_1d<double,3> vel_without_other_phase_nodes=ZeroVector(3); array_1d<double,3> position; array_1d<double,3> mid_position; array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. position = pparticle.Coordinates(); //initial coordinates double only_integral = 0.0 ; is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { KEEP_INTEGRATING=true; Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in vel=ZeroVector(3); for(unsigned int j=0; j<(TDim+1); j++) { noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j]; } //calculating substep to get +- courant(substep) = 0.1 nsubsteps = 10.0 * (delta_t * pelement->GetValue(MEAN_VEL_OVER_ELEM_SIZE)); if (nsubsteps<1) nsubsteps=1; substep_dt = delta_t / double(nsubsteps); only_integral = 1.0;// weight;//*double(nsubsteps); position += vel*substep_dt;//weight; //DONE THE FIRST LOCATION OF THE PARTICLE, NOW WE PROCEED TO STREAMLINE INTEGRATION USING THE MESH SEDIMENT_VELOCITY ////////////////////////////////////////////////////////////////////////////////////////////////////// unsigned int check_from_element_number=0; for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle. { if (KEEP_INTEGRATING==true) { is_found = FindNodeOnMesh(position, N ,pelement,elements_in_trajectory,number_of_elements_in_trajectory,check_from_element_number,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in vel = ZeroVector(3); for(unsigned int j=0; j<(TDim+1); j++) { noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j]; } only_integral += 1.0; //values saved for the current time step position+=vel*substep_dt;//weight; } else { KEEP_INTEGRATING=false; break; } } else break; } } if (KEEP_INTEGRATING==false) (pparticle.GetEraseFlag()=true); else is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //we must save the pointer of the last element that we're in (inside the pointervector pelement) if (is_found==false) ( pparticle.GetEraseFlag()=true); pparticle.Coordinates() = position; } void CorrectParticleUsingDeltaVariables( Convection_Particle & pparticle, Element::Pointer & pelement, Geometry< Node<3> >& geom) { array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. array_1d<double,3> coords = pparticle.Coordinates(); float & particle_scalar1 = pparticle.GetScalar1(); //double distance=0.0; double delta_scalar1 = 0.0; bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if(is_found == false) { KRATOS_WATCH(N) for (int j=0 ; j!=(TDim+1); j++) if (N[j]<0.0 ) N[j]=1e-10; } for(unsigned int j=0; j<(TDim+1); j++) { delta_scalar1 += geom[j].FastGetSolutionStepValue(DELTA_SCALAR1)*N[j]; } particle_scalar1 = particle_scalar1 + delta_scalar1; } void MoveParticle_inverse_way( Convection_Particle & pparticle, Element::Pointer & pelement, //NOT A REFERENCE!! WE SHALL NOT OVERWRITE THE ELEMENT IT BELONGS TO! ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; unsigned int nsubsteps; double substep_dt; bool KEEP_INTEGRATING=false; bool is_found; array_1d<double,3> vel; array_1d<double,3> position; array_1d<double,3> mid_position; array_1d<double,TDim+1> N; double scalar1 = 0.0; //we start with the first position, then it will enter the loop. position = pparticle.Coordinates(); // + (pparticle)->FastGetSolutionStepValue(DISPLACEMENT); //initial coordinates double only_integral = 0.0 ; is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { KEEP_INTEGRATING=true; Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in vel=ZeroVector(3); scalar1=0.0; for(unsigned int j=0; j<(TDim+1); j++) { scalar1 += geom[j].FastGetSolutionStepValue(mUnknownVar)*N(j); noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j]; } //calculating substep to get +- courant(substep) = 1/4 nsubsteps = 10.0 * (delta_t * pelement->GetValue(MEAN_VEL_OVER_ELEM_SIZE)); if (nsubsteps<1) nsubsteps=1; substep_dt = delta_t / double(nsubsteps); only_integral = 1.0;// weight;//*double(nsubsteps); position -= vel*substep_dt;//weight; for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle. { if (KEEP_INTEGRATING==true) { is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in vel=ZeroVector(3); scalar1=0.0; for(unsigned int j=0; j<(TDim+1); j++) { noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j] ; scalar1 += geom[j].FastGetSolutionStepValue(mUnknownVar)*N(j); } only_integral += 1.0;//weight ; //values saved for the current time step position-=vel*substep_dt;//weight; } else KEEP_INTEGRATING=false; } } pparticle.GetScalar1()=scalar1; } //else {KRATOS_WATCH(position); } } ///this function should find the element into which a given node is located ///and return a pointer to the element and the vector containing the ///shape functions that define the postion within the element ///if "false" is devolved the element is not found bool FindNodeOnMesh( array_1d<double,3>& position, array_1d<double,TDim+1>& N, Element::Pointer & pelement, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; const array_1d<double,3>& coords = position; array_1d<double,TDim+1> aux_N; //before using the bin to search for possible elements we check first the last element in which the particle was. Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry(); bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N); if(is_found_1 == true) //that was easy! { return true; } //to begin with we check the neighbour elements; it is a bit more expensive GlobalPointersVector< Element >& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS); //the first we check is the one that has negative shape function, because it means it went outside in this direction: //commented, it is not faster than simply checking all the neighbours (branching) /* unsigned int checked_element=0; for (unsigned int i=0;i!=(TDim+1);i++) { if (N[i]<0.0) { checked_element=i; Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N); if (is_found_2) { pelement=Element::Pointer(((neighb_elems(i)))); N=aux_N; return true; } break; } } */ //we check all the neighbour elements for (unsigned int i=0;i!=(neighb_elems.size());i++) { Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if (is_found_2) { pelement=neighb_elems(i)->shared_from_this(); return true; } } //if checking all the neighbour elements did not work, we have to use the bins //ask to the container for the list of candidate elements SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults ); if(results_found>0){ //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i< results_found; i++) { Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry(); //find local position bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if(is_found == true) { pelement=Element::Pointer((*(result_begin+i))); return true; } } } //if nothing worked, then: //not found case return false; } // VERSION INCLUDING PREDEFINED ELEMENTS FOLLOWING A TRAJECTORY bool FindNodeOnMesh( array_1d<double,3>& position, array_1d<double,TDim+1>& N, Element::Pointer & pelement, GlobalPointersVector< Element >& elements_in_trajectory, unsigned int & number_of_elements_in_trajectory, unsigned int & check_from_element_number, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; const array_1d<double,3>& coords = position; array_1d<double,TDim+1> aux_N; //before using the bin to search for possible elements we check first the last element in which the particle was. Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry(); bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N); if(is_found_1 == true) { return true; //that was easy! } //if it was not found in the first element, we can proceed to check in the following elements (in the trajectory defined by previous particles that started from the same element. for (unsigned int i=(check_from_element_number);i!=number_of_elements_in_trajectory;i++) { Geometry<Node<3> >& geom = elements_in_trajectory[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N); if (is_found_2) { pelement=elements_in_trajectory(i)->shared_from_this(); N=aux_N; check_from_element_number = i+1 ; //now i element matches pelement, so to avoid cheching twice the same element we send the counter to the following element. return true; } } //now we check the neighbour elements: auto& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS); //the first we check is the one that has negative shape function, because it means it went outside in this direction: //commented, it is not faster than simply checking all the neighbours (branching) /* unsigned int checked_element=0; for (unsigned int i=0;i!=(TDim+1);i++) { if (N[i]<0.0) { checked_element=i; Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N); if (is_found_2) { pelement=Element::Pointer(((neighb_elems(i)))); N=aux_N; return true; } break; } } */ //we check all the neighbour elements for (unsigned int i=0;i!=(neighb_elems.size());i++) { Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if (is_found_2) { pelement=neighb_elems(i)->shared_from_this(); if (number_of_elements_in_trajectory<20) { elements_in_trajectory(number_of_elements_in_trajectory)=pelement; number_of_elements_in_trajectory++; check_from_element_number = number_of_elements_in_trajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the elements_in_trajectory list. we are the particle that is adding elements to the list } return true; } } //if checking all the neighbour elements did not work, we have to use the bins //ask to the container for the list of candidate elements SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults ); if(results_found>0) { //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i< results_found; i++) { Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry(); //find local position bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if(is_found == true) { pelement=Element::Pointer((*(result_begin+i))); if (number_of_elements_in_trajectory<20) { elements_in_trajectory(number_of_elements_in_trajectory)=pelement; number_of_elements_in_trajectory++; check_from_element_number = number_of_elements_in_trajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the elements_in_trajectory list. we are the particle that is adding elements to the list } return true; } } } //not found case return false; } //*************************************** //*************************************** inline bool CalculatePosition(Geometry<Node < 3 > >&geom, const double xc, const double yc, const double zc, array_1d<double, 3 > & N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double area = CalculateVol(x0, y0, x1, y1, x2, y2); double inv_area = 0.0; if (area == 0.0) { KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", ""); } else { inv_area = 1.0 / area; } N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area; N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area; N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area; //KRATOS_WATCH(N); if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } //////////// //using the pre loaded nodal coordinates inline bool CalculatePosition(const array_1d<double,3*(TDim+1)>& nodes_positions, const double xc, const double yc, const double zc, array_1d<double, 3 > & N ) { const double& x0 = nodes_positions[0]; const double& y0 = nodes_positions[1]; const double& x1 = nodes_positions[3]; const double& y1 = nodes_positions[4]; const double& x2 = nodes_positions[6]; const double& y2 = nodes_positions[7]; double area = CalculateVol(x0, y0, x1, y1, x2, y2); double inv_area = 0.0; if (area == 0.0) { KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", ""); } else { inv_area = 1.0 / area; } N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area; N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area; N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area; //KRATOS_WATCH(N); if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } //*************************************** //*************************************** inline bool CalculatePosition(Geometry<Node < 3 > >&geom, const double xc, const double yc, const double zc, array_1d<double, 4 > & N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); double inv_vol = 0.0; if (vol < 0.000000000000000000000000000001) { KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", ""); } else { inv_vol = 1.0 / vol; } N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol; N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol; N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol; N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } /////////////////// //using the pre loaded nodal coordinates inline bool CalculatePosition(const array_1d<double,3*(TDim+1)>& nodes_positions, const double xc, const double yc, const double zc, array_1d<double, 4 > & N ) { const double& x0 = nodes_positions[0]; const double& y0 = nodes_positions[1]; const double& z0 = nodes_positions[2]; const double& x1 = nodes_positions[3]; const double& y1 = nodes_positions[4]; const double& z1 = nodes_positions[5]; const double& x2 = nodes_positions[6]; const double& y2 = nodes_positions[7]; const double& z2 = nodes_positions[8]; const double& x3 = nodes_positions[9]; const double& y3 = nodes_positions[10]; const double& z3 = nodes_positions[11]; double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); double inv_vol = 0.0; if (vol < 0.000000000000000000000000000001) { KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", ""); } else { inv_vol = 1.0 / vol; } N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol; N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol; N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol; N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } inline double CalculateVol(const double x0, const double y0, const double x1, const double y1, const double x2, const double y2 ) { return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0)); } //*************************************** //*************************************** inline double CalculateVol(const double x0, const double y0, const double z0, const double x1, const double y1, const double z1, const double x2, const double y2, const double z2, const double x3, const double y3, const double z3 ) { double x10 = x1 - x0; double y10 = y1 - y0; double z10 = z1 - z0; double x20 = x2 - x0; double y20 = y2 - y0; double z20 = z2 - z0; double x30 = x3 - x0; double y30 = y3 - y0; double z30 = z3 - z0; double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30; return detJ * 0.1666666666666666666667; } void ComputeGaussPointPositions_4(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos,BoundedMatrix<double, 7, 3 > & N) { double one_third = 1.0 / 3.0; double one_sixt = 0.15; //1.0 / 6.0; double two_third = 0.7; //2.0 * one_third; N(0, 0) = one_sixt; N(0, 1) = one_sixt; N(0, 2) = two_third; N(1, 0) = two_third; N(1, 1) = one_sixt; N(1, 2) = one_sixt; N(2, 0) = one_sixt; N(2, 1) = two_third; N(2, 2) = one_sixt; N(3, 0) = one_third; N(3, 1) = one_third; N(3, 2) = one_third; //first pos(0, 0) = one_sixt * geom[0].X() + one_sixt * geom[1].X() + two_third * geom[2].X(); pos(0, 1) = one_sixt * geom[0].Y() + one_sixt * geom[1].Y() + two_third * geom[2].Y(); pos(0, 2) = one_sixt * geom[0].Z() + one_sixt * geom[1].Z() + two_third * geom[2].Z(); //second pos(1, 0) = two_third * geom[0].X() + one_sixt * geom[1].X() + one_sixt * geom[2].X(); pos(1, 1) = two_third * geom[0].Y() + one_sixt * geom[1].Y() + one_sixt * geom[2].Y(); pos(1, 2) = two_third * geom[0].Z() + one_sixt * geom[1].Z() + one_sixt * geom[2].Z(); //third pos(2, 0) = one_sixt * geom[0].X() + two_third * geom[1].X() + one_sixt * geom[2].X(); pos(2, 1) = one_sixt * geom[0].Y() + two_third * geom[1].Y() + one_sixt * geom[2].Y(); pos(2, 2) = one_sixt * geom[0].Z() + two_third * geom[1].Z() + one_sixt * geom[2].Z(); //fourth pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X(); pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y(); pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z(); } void ComputeGaussPointPositionsForPostReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos,BoundedMatrix<double, 7, 3 > & N) //2d { double one_third = 1.0 / 3.0; double one_eight = 0.12; //1.0 / 6.0; double three_quarters = 0.76; //2.0 * one_third; N(0, 0) = one_eight; N(0, 1) = one_eight; N(0, 2) = three_quarters; N(1, 0) = three_quarters; N(1, 1) = one_eight; N(1, 2) = one_eight; N(2, 0) = one_eight; N(2, 1) = three_quarters; N(2, 2) = one_eight; N(3, 0) = one_third; N(3, 1) = one_third; N(3, 2) = one_third; N(4, 0) = one_eight; N(4, 1) = 0.44; N(4, 2) = 0.44; N(5, 0) = 0.44; N(5, 1) = one_eight; N(5, 2) = 0.44; N(6, 0) = 0.44; N(6, 1) = 0.44; N(6, 2) = one_eight; //first pos(0, 0) = one_eight * geom[0].X() + one_eight * geom[1].X() + three_quarters * geom[2].X(); pos(0, 1) = one_eight * geom[0].Y() + one_eight * geom[1].Y() + three_quarters * geom[2].Y(); pos(0, 2) = one_eight * geom[0].Z() + one_eight * geom[1].Z() + three_quarters * geom[2].Z(); //second pos(1, 0) = three_quarters * geom[0].X() + one_eight * geom[1].X() + one_eight * geom[2].X(); pos(1, 1) = three_quarters * geom[0].Y() + one_eight * geom[1].Y() + one_eight * geom[2].Y(); pos(1, 2) = three_quarters * geom[0].Z() + one_eight * geom[1].Z() + one_eight * geom[2].Z(); //third pos(2, 0) = one_eight * geom[0].X() + three_quarters * geom[1].X() + one_eight * geom[2].X(); pos(2, 1) = one_eight * geom[0].Y() + three_quarters * geom[1].Y() + one_eight * geom[2].Y(); pos(2, 2) = one_eight * geom[0].Z() + three_quarters * geom[1].Z() + one_eight * geom[2].Z(); //fourth pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X(); pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y(); pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z(); //fifth pos(4, 0) = one_eight * geom[0].X() + 0.44 * geom[1].X() + 0.44 * geom[2].X(); pos(4, 1) = one_eight * geom[0].Y() + 0.44 * geom[1].Y() + 0.44 * geom[2].Y(); pos(4, 2) = one_eight * geom[0].Z() + 0.44 * geom[1].Z() + 0.44 * geom[2].Z(); //sixth pos(5, 0) = 0.44 * geom[0].X() + one_eight * geom[1].X() + 0.44 * geom[2].X(); pos(5, 1) = 0.44 * geom[0].Y() + one_eight * geom[1].Y() + 0.44 * geom[2].Y(); pos(5, 2) = 0.44 * geom[0].Z() + one_eight * geom[1].Z() + 0.44 * geom[2].Z(); //seventh pos(6, 0) = 0.44 * geom[0].X() + 0.44 * geom[1].X() + one_eight * geom[2].X(); pos(6, 1) = 0.44 * geom[0].Y() + 0.44 * geom[1].Y() + one_eight * geom[2].Y(); pos(6, 2) = 0.44 * geom[0].Z() + 0.44 * geom[1].Z() + one_eight * geom[2].Z(); } void ComputeGaussPointPositionsForPostReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 9, 3 > & pos,BoundedMatrix<double, 9, 4 > & N) //3D { double one_quarter = 0.25; double small_fraction = 0.1; //1.0 / 6.0; double big_fraction = 0.7; //2.0 * one_third; double mid_fraction = 0.3; //2.0 * one_third; N(0, 0) = big_fraction; N(0, 1) = small_fraction; N(0, 2) = small_fraction; N(0, 3) = small_fraction; N(1, 0) = small_fraction; N(1, 1) = big_fraction; N(1, 2) = small_fraction; N(1, 3) = small_fraction; N(2, 0) = small_fraction; N(2, 1) = small_fraction; N(2, 2) = big_fraction; N(2, 3) = small_fraction; N(3, 0) = small_fraction; N(3, 1) = small_fraction; N(3, 2) = small_fraction; N(3, 3) = big_fraction; N(4, 0) = one_quarter; N(4, 1) = one_quarter; N(4, 2) = one_quarter; N(4, 3) = one_quarter; N(5, 0) = small_fraction; N(5, 1) = mid_fraction; N(5, 2) = mid_fraction; N(5, 3) = mid_fraction; N(6, 0) = mid_fraction; N(6, 1) = small_fraction; N(6, 2) = mid_fraction; N(6, 3) = mid_fraction; N(7, 0) = mid_fraction; N(7, 1) = mid_fraction; N(7, 2) = small_fraction; N(7, 3) = mid_fraction; N(8, 0) = mid_fraction; N(8, 1) = mid_fraction; N(8, 2) = mid_fraction; N(8, 3) = small_fraction; pos=ZeroMatrix(9,3); for (unsigned int i=0; i!=4; i++) //going through the 4 nodes { array_1d<double, 3 > & coordinates = geom[i].Coordinates(); for (unsigned int j=0; j!=9; j++) //going through the 9 particles { for (unsigned int k=0; k!=3; k++) //x,y,z pos(j,k) += N(j,i) * coordinates[k]; } } } void ComputeGaussPointPositionsForPreReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 3, 3 > & pos,BoundedMatrix<double, 3, 3 > & N) //2D { N(0, 0) = 0.5; N(0, 1) = 0.25; N(0, 2) = 0.25; N(1, 0) = 0.25; N(1, 1) = 0.5; N(1, 2) = 0.25; N(2, 0) = 0.25; N(2, 1) = 0.25; N(2, 2) = 0.5; //first pos(0, 0) = 0.5 * geom[0].X() + 0.25 * geom[1].X() + 0.25 * geom[2].X(); pos(0, 1) = 0.5 * geom[0].Y() + 0.25 * geom[1].Y() + 0.25 * geom[2].Y(); pos(0, 2) = 0.5 * geom[0].Z() + 0.25 * geom[1].Z() + 0.25 * geom[2].Z(); //second pos(1, 0) = 0.25 * geom[0].X() + 0.5 * geom[1].X() + 0.25 * geom[2].X(); pos(1, 1) = 0.25 * geom[0].Y() + 0.5 * geom[1].Y() + 0.25 * geom[2].Y(); pos(1, 2) = 0.25 * geom[0].Z() + 0.5 * geom[1].Z() + 0.25 * geom[2].Z(); //third pos(2, 0) = 0.25 * geom[0].X() + 0.25 * geom[1].X() + 0.5 * geom[2].X(); pos(2, 1) = 0.25 * geom[0].Y() + 0.25 * geom[1].Y() + 0.5 * geom[2].Y(); pos(2, 2) = 0.25 * geom[0].Z() + 0.25 * geom[1].Z() + 0.5 * geom[2].Z(); } void ComputeGaussPointPositionsForPreReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 4, 3 > & pos,BoundedMatrix<double, 4, 4 > & N) //3D { //creating 4 particles, each will be closer to a node and equidistant to the other nodes N(0, 0) = 0.4; N(0, 1) = 0.2; N(0, 2) = 0.2; N(0, 3) = 0.2; N(1, 0) = 0.2; N(1, 1) = 0.4; N(1, 2) = 0.2; N(1, 3) = 0.2; N(2, 0) = 0.2; N(2, 1) = 0.2; N(2, 2) = 0.4; N(2, 3) = 0.2; N(3, 0) = 0.2; N(3, 1) = 0.2; N(3, 2) = 0.2; N(3, 3) = 0.4; pos=ZeroMatrix(4,3); for (unsigned int i=0; i!=4; i++) //going through the 4 nodes { array_1d<double, 3 > & coordinates = geom[i].Coordinates(); for (unsigned int j=0; j!=4; j++) //going through the 4 particles { for (unsigned int k=0; k!=3; k++) //x,y,z pos(j,k) += N(j,i) * coordinates[k]; } } } void ComputeGaussPointPositions_45(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 45, 3 > & pos,BoundedMatrix<double, 45, 3 > & N) { //std::cout << "NEW ELEMENT" << std::endl; unsigned int counter=0; for (unsigned int i=0; i!=9;i++) { for (unsigned int j=0; j!=(9-i);j++) { N(counter,0)=0.05+double(i)*0.1; N(counter,1)=0.05+double(j)*0.1; N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z(); //std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl; counter++; } } } void ComputeGaussPointPositions_initial(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 15, 3 > & pos,BoundedMatrix<double, 15, 3 > & N) //2D { //std::cout << "NEW ELEMENT" << std::endl; unsigned int counter=0; for (unsigned int i=0; i!=5;i++) { for (unsigned int j=0; j!=(5-i);j++) { N(counter,0)=0.05+double(i)*0.2; N(counter,1)=0.05+double(j)*0.2; N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z(); //std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl; counter++; } } } void ComputeGaussPointPositions_initial(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 20, 3 > & pos,BoundedMatrix<double, 20, 4 > & N) //3D { //std::cout << "NEW ELEMENT" << std::endl; //double total; double fraction_increment; unsigned int counter=0; for (unsigned int i=0; i!=4;i++) //going to build a particle "pyramid"(tetrahedra) by layers. the first layer will be made by a triangle of 4 base X 4 height. since it is a triangle, it means it will have 10 particles { //std::cout << "inside i" << i << std::endl; for (unsigned int j=0; j!=(4-i);j++) { //std::cout << "inside j" << j << std::endl; for (unsigned int k=0; k!=(4-i-j);k++) { //std::cout << "inside k" << k << std::endl; N(counter,0)= 0.27 * ( 0.175 + double(i) ) ; //this is our "surface" in which we will build each layer, so we must construct a triangle using what's left of the shape functions total (a total of 1) //total = 1.0 - N(counter,0); fraction_increment = 0.27; // N(counter,1)=fraction_increment * (0.175 + double(j)); N(counter,2)=fraction_increment * (0.175 + double(k)); N(counter,3)=1.0 - ( N(counter,0)+ N(counter,1) + N(counter,2) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X() + N(counter,3) * geom[3].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y() + N(counter,3) * geom[3].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z() + N(counter,3) * geom[3].Z(); //std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl; counter++; } } } } template<class T> bool InvertMatrix(const T& input, T& inverse) { typedef permutation_matrix<std::size_t> pmatrix; // create a working copy of the input T A(input); // create a permutation matrix for the LU-factorization pmatrix pm(A.size1()); // perform LU-factorization int res = lu_factorize(A, pm); if (res != 0) return false; // create identity matrix of "inverse" inverse.assign(identity_matrix<double> (A.size1())); // backsubstitute to get the inverse lu_substitute(A, pm, inverse); return true; } bool InvertMatrix3x3(const BoundedMatrix<double, TDim+1 , TDim+1 >& A, BoundedMatrix<double, TDim+1 , TDim+1 >& result) { double determinant = +A(0,0)*(A(1,1)*A(2,2)-A(2,1)*A(1,2)) -A(0,1)*(A(1,0)*A(2,2)-A(1,2)*A(2,0)) +A(0,2)*(A(1,0)*A(2,1)-A(1,1)*A(2,0)); double invdet = 1/determinant; result(0,0) = (A(1,1)*A(2,2)-A(2,1)*A(1,2))*invdet; result(1,0) = -(A(0,1)*A(2,2)-A(0,2)*A(2,1))*invdet; result(2,0) = (A(0,1)*A(1,2)-A(0,2)*A(1,1))*invdet; result(0,1) = -(A(1,0)*A(2,2)-A(1,2)*A(2,0))*invdet; result(1,1) = (A(0,0)*A(2,2)-A(0,2)*A(2,0))*invdet; result(2,1) = -(A(0,0)*A(1,2)-A(1,0)*A(0,2))*invdet; result(0,2) = (A(1,0)*A(2,1)-A(2,0)*A(1,1))*invdet; result(1,2) = -(A(0,0)*A(2,1)-A(2,0)*A(0,1))*invdet; result(2,2) = (A(0,0)*A(1,1)-A(1,0)*A(0,1))*invdet; return true; } virtual int Check() { KRATOS_TRY ProcessInfo& rCurrentProcessInfo = mr_model_part.GetProcessInfo(); if (rCurrentProcessInfo.Has(CONVECTION_DIFFUSION_SETTINGS)==false) KRATOS_THROW_ERROR(std::logic_error, "no CONVECTION_DIFFUSION_SETTINGS in model_part", ""); //std::cout << "ConvDiff::Check(). If crashes, check CONVECTION_DIFFUSION_SETTINGS is defined" << std::endl; ConvectionDiffusionSettings::Pointer my_settings = rCurrentProcessInfo.GetValue(CONVECTION_DIFFUSION_SETTINGS); //UNKNOWN VARIABLE if(my_settings->IsDefinedUnknownVariable()==true) { if (mr_model_part.NodesBegin()->SolutionStepsDataHas(my_settings->GetUnknownVariable()) == false) KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Unknown Variable defined but not contained in the model part", ""); } else KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Unknown Variable not defined!", ""); //PROJECTION VARIABLE //used as intermediate variable, is the variable at time n+1 but only accounting for the convective term. if(my_settings->IsDefinedProjectionVariable()==true) { if (mr_model_part.NodesBegin()->SolutionStepsDataHas(my_settings->GetProjectionVariable()) == false) KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Projection Variable defined but not contained in the model part", ""); } else KRATOS_THROW_ERROR(std::logic_error, "No Projection variable assigned for ConvDiff!", ""); //CONVECTION VELOCITY VARIABLE //CURRENTLY WE ARE USING (VELOCITY -MESH_VELOCITY) TO CONVECT, so the ConvectionVariable must not be used: //if(my_settings->IsDefinedConvectionVariable()==true) //{ // if (BaseType::GetModelPart().NodesBegin()->SolutionStepsDataHas(my_settings->GetConvectionVariable()) == false) // KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Convection Variable defined but not contained in the model part", ""); //} //else // std::cout << "No Projection variable assigned for ConvDiff. Assuming Convection=0" << std::endl; if(my_settings->IsDefinedConvectionVariable()==true) KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: ConvectionVariable not used. Use VelocityVariable instead", ""); //VELOCITY VARIABLE if(my_settings->IsDefinedVelocityVariable()==true) { if (mr_model_part.NodesBegin()->SolutionStepsDataHas(my_settings->GetVelocityVariable()) == false) KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Velocity Variable defined but not contained in the model part", ""); } else KRATOS_THROW_ERROR(std::logic_error, "No Velocity variable assigned for ConvDiff!", ""); if (mr_model_part.NodesBegin()->SolutionStepsDataHas(MEAN_SIZE) == false) KRATOS_THROW_ERROR(std::logic_error, "Add MEAN_SIZE variable to model part!", ""); if (mr_model_part.NodesBegin()->SolutionStepsDataHas(DELTA_SCALAR1) == false) KRATOS_THROW_ERROR(std::logic_error, "Add DELTA_SCALAR1 variable to model part!", ""); return 0; KRATOS_CATCH("") } ModelPart& mr_model_part; int m_nparticles; int mnelems; int moffset; //vector<double> mareas_vector; UNUSED SO COMMENTED int max_nsubsteps; double max_substep_dt; int mmaximum_number_of_particles; std::vector< Convection_Particle > mparticles_vector; //Point<3> int mlast_elem_id; bool modd_timestep; bool mparticle_printing_tool_initialized; unsigned int mfilter_factor; unsigned int mlast_node_id; //ModelPart& mr_particle_model_part; vector<int> mnumber_of_particles_in_elems; vector<int> mnumber_of_particles_in_elems_aux; //vector<ParticlePointerVector*> mpointers_to_particle_pointers_vectors; //pointing to the GetValue of each element vector<ParticlePointerVector> mvector_of_particle_pointers_vectors; typename BinsObjectDynamic<Configure>::Pointer mpBinsObjectDynamic; const Variable<double>& mUnknownVar; const Variable<double>& mProjectionVar; const Variable<array_1d<double,3> >& mVelocityVar; const Variable<array_1d<double,3> >& mMeshVelocityVar; }; } // namespace Kratos. #endif // KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_TRANSPORT_INCLUDED defined
PAddOP.h
#ifndef PAddOP #define PAddOP /* * PAddOP.h: * (pointwise) add * * Created on: June 13, 2017 * Author: mszhang */ #include "Eigen/Dense" #include "MyLib.h" #include "Node.h" #include "Graph.h" class PAddNode : public Node { public: vector<PNode> ins; ~PAddNode() { ins.clear(); } public: PAddNode() : Node() { ins.clear(); node_type = "point-add"; } inline void clearValue() { ins.clear(); Node::clearValue(); } public: void forward(Graph *cg, const vector<PNode>& x) { if (x.size() == 0) { std::cout << "empty inputs for add" << std::endl; return; } ins.clear(); for (int i = 0; i < x.size(); i++) { if (x[i]->val.dim == dim) { ins.push_back(x[i]); } else { std::cout << "dim does not match" << std::endl; } } degree = 0; int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { ins[i]->addParent(this); } cg->addNode(this); } void forward(Graph *cg, PNode x1) { ins.clear(); if (x1->dim == dim) { ins.push_back(x1); } else { std::cout << "dim does not match" << std::endl; } degree = 0; int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { ins[i]->addParent(this); } cg->addNode(this); } void forward(Graph *cg, PNode x1, PNode x2) { ins.clear(); if (x1->dim == dim) { ins.push_back(x1); } else { std::cout << "dim does not match" << std::endl; } if (x2->dim == dim) { ins.push_back(x2); } else { std::cout << "dim does not match" << std::endl; } degree = 0; int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { ins[i]->addParent(this); } cg->addNode(this); } void forward(Graph *cg, PNode x1, PNode x2, PNode x3) { ins.clear(); if (x1->dim == dim) { ins.push_back(x1); } else { std::cout << "dim does not match" << std::endl; } if (x2->dim == dim) { ins.push_back(x2); } else { std::cout << "dim does not match" << std::endl; } if (x3->dim == dim) { ins.push_back(x3); } else { std::cout << "dim does not match" << std::endl; } degree = 0; int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { ins[i]->addParent(this); } cg->addNode(this); } void forward(Graph *cg, PNode x1, PNode x2, PNode x3, PNode x4) { ins.clear(); if (x1->dim == dim) { ins.push_back(x1); } else { std::cout << "dim does not match" << std::endl; } if (x2->dim == dim) { ins.push_back(x2); } else { std::cout << "dim does not match" << std::endl; } if (x3->dim == dim) { ins.push_back(x3); } else { std::cout << "dim does not match" << std::endl; } if (x4->dim == dim) { ins.push_back(x4); } else { std::cout << "dim does not match" << std::endl; } degree = 0; int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { ins[i]->addParent(this); } cg->addNode(this); } void forward(Graph *cg, PNode x1, PNode x2, PNode x3, PNode x4, PNode x5) { ins.clear(); if (x1->dim == dim) { ins.push_back(x1); } else { std::cout << "dim does not match" << std::endl; } if (x2->dim == dim) { ins.push_back(x2); } else { std::cout << "dim does not match" << std::endl; } if (x3->dim == dim) { ins.push_back(x3); } else { std::cout << "dim does not match" << std::endl; } if (x4->dim == dim) { ins.push_back(x4); } else { std::cout << "dim does not match" << std::endl; } if (x5->dim == dim) { ins.push_back(x5); } else { std::cout << "dim does not match" << std::endl; } degree = 0; int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { ins[i]->addParent(this); } cg->addNode(this); } void forward(Graph *cg, PNode x1, PNode x2, PNode x3, PNode x4, PNode x5, PNode x6) { ins.clear(); if (x1->dim == dim) { ins.push_back(x1); } else { std::cout << "dim does not match" << std::endl; } if (x2->dim == dim) { ins.push_back(x2); } else { std::cout << "dim does not match" << std::endl; } if (x3->dim == dim) { ins.push_back(x3); } else { std::cout << "dim does not match" << std::endl; } if (x4->dim == dim) { ins.push_back(x4); } else { std::cout << "dim does not match" << std::endl; } if (x5->dim == dim) { ins.push_back(x5); } else { std::cout << "dim does not match" << std::endl; } if (x6->dim == dim) { ins.push_back(x6); } else { std::cout << "dim does not match" << std::endl; } degree = 0; int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { ins[i]->addParent(this); } cg->addNode(this); } public: inline void compute() { int nSize = ins.size(); val.zero(); for (int i = 0; i < nSize; ++i) { for (int idx = 0; idx < dim; idx++) { val[idx] += ins[i]->val[idx]; } } } void backward() { int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { for (int idx = 0; idx < dim; idx++) { ins[i]->loss[idx] += loss[idx]; } } } public: inline PExecute generate(bool bTrain, dtype cur_drop_factor); // better to rewrite for deep understanding inline bool typeEqual(PNode other) { return Node::typeEqual(other); } }; class PAddExecute : public Execute { public: bool bTrain; public: inline void forward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->compute(); batch[idx]->forward_drop(bTrain, drop_factor); } } inline void backward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->backward_drop(); batch[idx]->backward(); } } }; inline PExecute PAddNode::generate(bool bTrain, dtype cur_drop_factor) { PAddExecute* exec = new PAddExecute(); exec->batch.push_back(this); exec->bTrain = bTrain; exec->drop_factor = cur_drop_factor; return exec; } #endif
lighting.c
#include "image.h" #include <stdlib.h> #include <assert.h> #include <memory.h> #include <kazmath/vec3.h> static float _occlusion_scale = 1.0f; void heman_lighting_set_occlusion_scale(float s) { _occlusion_scale = s; } heman_image* heman_lighting_compute_normals(heman_image* heightmap) { assert(heightmap->nbands == 1); int width = heightmap->width; int height = heightmap->height; heman_image* result = heman_image_create(width, height, 3); HEMAN_FLOAT invh = 1.0f / height; HEMAN_FLOAT invw = 1.0f / width; int maxx = width - 1; int maxy = height - 1; kmVec3* normals = (kmVec3*) result->data; #pragma omp parallel for for (int y = 0; y < height; y++) { HEMAN_FLOAT v = y * invh; int y1 = MIN(y + 1, maxy); kmVec3 p; kmVec3 px; kmVec3 py; kmVec3* n = normals + y * width; for (int x = 0; x < width; x++, n++) { HEMAN_FLOAT u = x * invw; int x1 = MIN(x + 1, maxx); p.x = u; p.y = v; p.z = *heman_image_texel(heightmap, x, y); px.x = u + invw; px.y = v; px.z = *heman_image_texel(heightmap, x1, y); py.x = u; py.y = v + invh; py.z = *heman_image_texel(heightmap, x, y1); kmVec3Subtract(&px, &px, &p); kmVec3Subtract(&py, &py, &p); kmVec3Cross(n, &px, &py); kmVec3Normalize(n, n); n->y *= -1; } } return result; } heman_image* heman_lighting_apply(heman_image* heightmap, heman_image* albedo, float occlusion, float diffuse, float diffuse_softening, const float* light_position) { assert(heightmap->nbands == 1); int width = heightmap->width; int height = heightmap->height; heman_image* final = heman_image_create(width, height, 3); heman_image* normals = heman_lighting_compute_normals(heightmap); heman_image* occ = heman_lighting_compute_occlusion(heightmap); if (albedo) { assert(albedo->nbands == 3); assert(albedo->width == width); assert(albedo->height == height); } static float default_pos[] = {-0.5f, 0.5f, 1.0f}; if (!light_position) { light_position = default_pos; } kmVec3* colors = (kmVec3*) final->data; HEMAN_FLOAT invgamma = 1.0f / _gamma; kmVec3 L; L.x = light_position[0]; L.y = light_position[1]; L.z = light_position[2]; kmVec3Normalize(&L, &L); #pragma omp parallel for for (int y = 0; y < height; y++) { kmVec3* color = colors + y * width; for (int x = 0; x < width; x++, color++) { kmVec3* N = (kmVec3*) heman_image_texel(normals, x, y); kmVec3Lerp(N, N, &KM_VEC3_POS_Z, diffuse_softening); HEMAN_FLOAT df = 1 - diffuse * (1 - kmClamp(kmVec3Dot(N, &L), 0, 1)); HEMAN_FLOAT of = 1 - occlusion * (1 - *heman_image_texel(occ, x, y)); if (albedo) { *color = *((kmVec3*) heman_image_texel(albedo, x, y)); } else { color->x = color->y = color->z = 1; } color->x = pow(color->x, _gamma); color->y = pow(color->y, _gamma); color->z = pow(color->z, _gamma); kmVec3Scale(color, color, df * of); color->x = pow(color->x, invgamma); color->y = pow(color->y, invgamma); color->z = pow(color->z, invgamma); } } heman_image_destroy(normals); heman_image_destroy(occ); return final; } #define NUM_SCANS (16) #define INV_SCANS (1.0f / 16.0f) static HEMAN_FLOAT azimuth_slope(kmVec3 a, kmVec3 b) { kmVec3 d; kmVec3Subtract(&d, &a, &b); HEMAN_FLOAT x = kmVec3Length(&d); HEMAN_FLOAT y = b.z - a.z; return y / x; } static HEMAN_FLOAT compute_occlusion(kmVec3 thispt, kmVec3 horizonpt) { kmVec3 direction; kmVec3Subtract(&direction, &horizonpt, &thispt); kmVec3Normalize(&direction, &direction); HEMAN_FLOAT dot = kmVec3Dot(&direction, &KM_VEC3_POS_Z); return atan(MAX(dot, 0.0f)) * TWO_OVER_PI; } static void horizon_scan( heman_image* heightmap, heman_image* result, int* startpts, int dx, int dy) { int w = heightmap->width, h = heightmap->height; int sx = SGN(dx), sy = SGN(dy); int ax = abs(dx), ay = abs(dy); // Generate the start positions for each sweep line. The start positions // occur just outside the image boundary. int nsweeps = ay * w + ax * h - (ax + ay - 1); int* p = startpts; for (int x = -ax; x < w - ax; x++) { for (int y = -ay; y < h - ay; y++) { if (x >= 0 && x < w && y >= 0 && y < h) { continue; } *p++ = (sx < 0) ? (w - x - 1) : x; *p++ = (sy < 0) ? (h - y - 1) : y; } } assert(nsweeps == (p - startpts) / 2); // Compute the number of steps by doing a mock sweep. int pathlen = 0; int i = startpts[0], j = startpts[1]; do { i += dx; j += dy; ++pathlen; } while (i >= 0 && i < w && j >= 0 && j < h); // Each cell in the grid has a certain width and height. These can be // multiplied by row / column indices to get world-space X / Y values, // which are in the same coordinate system as the height values. HEMAN_FLOAT cellw = _occlusion_scale / MAX(w, h); HEMAN_FLOAT cellh = _occlusion_scale / MAX(w, h); // Initialize a stack of candidate horizon points, one for each sweep. In a // serial implementation we wouldn't need to allocate this much memory, but // we're trying to make life easy for multithreading. kmVec3* hull_buffer = malloc(sizeof(kmVec3) * pathlen * nsweeps); // Finally, perform the actual sweeps. We're careful to touch each pixel // exactly once, which makes this embarassingly threadable. #pragma omp parallel for for (int sweep = 0; sweep < nsweeps; sweep++) { kmVec3* convex_hull = hull_buffer + sweep * pathlen; int* p = startpts + sweep * 2; int i = p[0]; int j = p[1]; kmVec3 thispt, horizonpt; thispt.x = i * cellw; thispt.y = j * cellh; thispt.z = *heman_image_texel(heightmap, EDGE(i, w), EDGE(j, h)); int stack_top = 0; convex_hull[0] = thispt; i += dx, j += dy; while (i >= 0 && i < w && j >= 0 && j < h) { thispt.x = i * cellw; thispt.y = j * cellh; thispt.z = *heman_image_texel(heightmap, i, j); while (stack_top > 0) { HEMAN_FLOAT s1 = azimuth_slope(thispt, convex_hull[stack_top]); HEMAN_FLOAT s2 = azimuth_slope(thispt, convex_hull[stack_top - 1]); if (s1 >= s2) { break; } stack_top--; } horizonpt = convex_hull[stack_top++]; assert(stack_top < pathlen); convex_hull[stack_top] = thispt; HEMAN_FLOAT occlusion = compute_occlusion(thispt, horizonpt); *heman_image_texel(result, i, j) += INV_SCANS * occlusion; i += dx; j += dy; } } free(hull_buffer); } heman_image* heman_lighting_compute_occlusion(heman_image* heightmap) { assert(heightmap->nbands == 1); int width = heightmap->width; int height = heightmap->height; heman_image* result = heman_image_create(width, height, 1); memset(result->data, 0, sizeof(HEMAN_FLOAT) * width * height); // Define sixteen 2D vectors, used for the sweep directions. const int scans[NUM_SCANS * 2] = { 1, 0, 0, 1, -1, 0, 0, -1, // Rook 1, 1, -1, -1, 1, -1, -1, 1, // Bishop 2, 1, 2, -1, -2, 1, -2, -1, 1, 2, 1, -2, -1, 2, -1, -2 // Knight }; // Allocate memory that will store the starting positions of each sweep. int* startpts = malloc(sizeof(int) * 2 * 3 * kmMax(width, height)); // Make each sweep serially, accumulating the result. for (int i = 0; i < NUM_SCANS; i++) { int dx = scans[i * 2]; int dy = scans[i * 2 + 1]; horizon_scan(heightmap, result, startpts, dx, dy); } // Invert the occlusion values and make sure they are valid. for (int i = 0; i < width * height; i++) { result->data[i] = 1.0f - result->data[i]; assert(result->data[i] >= 0.0 && result->data[i] <= 1.0f); } free(startpts); return result; }
SystemMatrix.h
/***************************************************************************** * * Copyright (c) 2003-2020 by The University of Queensland * http://www.uq.edu.au * * Primary Business: Queensland, Australia * Licensed under the Apache License, version 2.0 * http://www.apache.org/licenses/LICENSE-2.0 * * Development until 2012 by Earth Systems Science Computational Center (ESSCC) * Development 2012-2013 by School of Earth Sciences * Development from 2014-2017 by Centre for Geoscience Computing (GeoComp) * Development from 2019 by School of Earth and Environmental Sciences ** *****************************************************************************/ /****************************************************************************/ /* Paso: SystemMatrix */ /****************************************************************************/ /* Copyrights by ACcESS Australia 2003,2004,2005,2006 */ /* Author: Lutz Gross, l.gross@uq.edu.au */ /****************************************************************************/ #ifndef __PASO_SYSTEMMATRIX_H__ #define __PASO_SYSTEMMATRIX_H__ #include "SparseMatrix.h" #include "SystemMatrixPattern.h" #include <escript/AbstractSystemMatrix.h> namespace paso { struct Options; template <class T> class SystemMatrix; template <typename T> using SystemMatrix_ptr = boost::shared_ptr<SystemMatrix<T> >; template <typename T> using const_SystemMatrix_ptr = boost::shared_ptr<const SystemMatrix<T> >; typedef int SystemMatrixType; /// this class holds a (distributed) stiffness matrix template <class T> class SystemMatrix : public escript::AbstractSystemMatrix { public: /// default constructor - throws exception. SystemMatrix(); SystemMatrix(SystemMatrixType type, SystemMatrixPattern_ptr pattern, dim_t rowBlockSize, dim_t columnBlockSize, bool patternIsUnrolled, const escript::FunctionSpace& rowFS, const escript::FunctionSpace& colFS); ~SystemMatrix(); /// Nullifies rows and columns in the matrix. /// The rows and columns are marked by positive values in mask_row and /// mask_col. Values on the main diagonal which are marked to set to /// zero by both mask_row and mask_col are set to main_diagonal_value. virtual void nullifyRowsAndCols(escript::Data& mask_row, escript::Data& mask_col, double main_diagonal_value); virtual inline void saveMM(const std::string& filename) const { if (mpi_info->size > 1) { //throw PasoException("SystemMatrix::saveMM: Only single rank supported."); SparseMatrix_ptr<T> merged(mergeSystemMatrix()); if (mpi_info->rank == 0) merged->saveMM(filename.c_str()); } else { mainBlock->saveMM(filename.c_str()); } } virtual inline void saveHB(const std::string& filename) const { if (mpi_info->size > 1) { throw PasoException("SystemMatrix::saveHB: Only single rank supported."); } else if (!(type & MATRIX_FORMAT_CSC)) { throw PasoException("SystemMatrix::saveHB: Only CSC format supported."); } else { mainBlock->saveHB_CSC(filename.c_str()); } } virtual void resetValues(bool preserveSolverData = false); /// Nullifies rows in the matrix. /// The rows are marked by positive values in mask_row. Values on the /// main diagonal which are marked to set to zero by mask_row are set /// to main_diagonal_value. void nullifyRows(double* mask_row, double main_diagonal_value); void add(dim_t, index_t*, dim_t, dim_t, index_t*, dim_t, double*); void makeZeroRowSums(double* left_over); /// copies the col_coupleBlock into row_coupleBlock. /// WARNING: this method uses mpi_requests of the coupler attached to the /// matrix. No reordering on the received columns is performed. /// In practice this means that components in /// row_coupleBlock->pattern->index and /// row_coupler->connector->recv->shared /// are ordered by increasing value. /// Note that send and receive row_coupler->connectors are swapping roles. void copyColCoupleBlock(); void copyRemoteCoupleBlock(bool recreatePattern); void fillWithGlobalCoordinates(double f1); void print() const; /// Merges the system matrix which is distributed on several MPI ranks /// into a complete sparse matrix on rank 0. Used by the Merged Solver. SparseMatrix_ptr<T> mergeSystemMatrix() const; void mergeMainAndCouple(index_t** p_ptr, index_t** p_idx, double** p_val) const; void mergeMainAndCouple_CSR_OFFSET0(index_t** p_ptr, index_t** p_idx, double** p_val) const; void mergeMainAndCouple_CSR_OFFSET0_Block(index_t** p_ptr, index_t** p_idx, double** p_val) const; void mergeMainAndCouple_CSC_OFFSET1(index_t** p_ptr, index_t** p_idx, double** p_val) const; void copyMain_CSC_OFFSET1(index_t** p_ptr, index_t** p_idx, double** p_val); void extendedRowsForST(dim_t* degree_ST, index_t* offset_ST, index_t* ST); void applyBalanceInPlace(double* x, bool RHS) const; void applyBalance(double* x_out, const double* x, bool RHS) const; void balance(); double getGlobalSize() const; void setPreconditioner(Options* options); /// Applies the preconditioner. /// This method needs to be called within a parallel region. /// Barrier synchronization is performed before the evaluation to make /// sure that the input vector is available void solvePreconditioner(double* x, double* b); void freePreconditioner(); index_t* borrowMainDiagonalPointer() const; inline void startCollect(const double* in) const { startColCollect(in); } inline double* finishCollect() const { return finishColCollect(); } inline void startColCollect(const double* in) const { col_coupler->startCollect(in); } inline double* finishColCollect() const { return col_coupler->finishCollect(); } inline void startRowCollect(const double* in) { row_coupler->startCollect(in); } inline double* finishRowCollect() { return row_coupler->finishCollect(); } inline dim_t getNumRows() const { return mainBlock->numRows; } inline dim_t getNumCols() const { return mainBlock->numCols; } inline dim_t getTotalNumRows() const { return getNumRows() * row_block_size; } inline dim_t getTotalNumCols() const { return getNumCols() * col_block_size; } inline dim_t getRowOverlap() const { return row_coupler->getNumOverlapComponents(); } inline dim_t getColOverlap() const { return col_coupler->getNumOverlapComponents(); } inline dim_t getGlobalNumRows() const { if (type & MATRIX_FORMAT_CSC) { return pattern->input_distribution->getGlobalNumComponents(); } return pattern->output_distribution->getGlobalNumComponents(); } inline dim_t getGlobalNumCols() const { if (type & MATRIX_FORMAT_CSC) { return pattern->output_distribution->getGlobalNumComponents(); } return pattern->input_distribution->getGlobalNumComponents(); } inline dim_t getGlobalTotalNumRows() const { return getGlobalNumRows() * row_block_size; } inline dim_t getGlobalTotalNumCols() const { return getGlobalNumCols() * col_block_size; } inline double getSparsity() const { return getGlobalSize() / ((double)getGlobalTotalNumRows()*getGlobalTotalNumCols()); } inline dim_t getNumOutput() const { return pattern->getNumOutput(); } inline void copyBlockFromMainDiagonal(double* out) const { mainBlock->copyBlockFromMainDiagonal(out); } inline void copyBlockToMainDiagonal(const double* in) { mainBlock->copyBlockToMainDiagonal(in); } inline void copyFromMainDiagonal(double* out) const { mainBlock->copyFromMainDiagonal(out); } inline void copyToMainDiagonal(const double* in) { mainBlock->copyToMainDiagonal(in); } inline void setValues(double value) { mainBlock->setValues(value); col_coupleBlock->setValues(value); row_coupleBlock->setValues(value); is_balanced = false; } inline void rowSum(double* row_sum) const { if ((type & MATRIX_FORMAT_CSC) || (type & MATRIX_FORMAT_OFFSET1)) { throw PasoException("SystemMatrix::rowSum: No normalization " "available for compressed sparse column or index offset 1."); } else { const dim_t nrow = mainBlock->numRows*row_block_size; #pragma omp parallel for for (index_t irow=0; irow<nrow; ++irow) { row_sum[irow]=0.; } mainBlock->addRow_CSR_OFFSET0(row_sum); col_coupleBlock->addRow_CSR_OFFSET0(row_sum); } } void MatrixVector(double alpha, const T* in, double beta, T* out) const; void MatrixVector_CSR_OFFSET0(double alpha, const double* in, double beta, double* out) const; static SystemMatrix_ptr<double> loadMM_toCSR(const char* filename); static SystemMatrix_ptr<double> loadMM_toCSC(const char* filename); static int getSystemMatrixTypeId(int solver, int preconditioner, int package, bool is_complex, bool symmetry, const escript::JMPI& mpi_info); SystemMatrixType type; SystemMatrixPattern_ptr pattern; dim_t logical_row_block_size; dim_t logical_col_block_size; dim_t row_block_size; dim_t col_block_size; dim_t block_size; escript::Distribution_ptr row_distribution; escript::Distribution_ptr col_distribution; escript::JMPI mpi_info; Coupler_ptr<real_t> col_coupler; Coupler_ptr<real_t> row_coupler; /// main block SparseMatrix_ptr<T> mainBlock; /// coupling to neighbouring processors (row - col) SparseMatrix_ptr<T> col_coupleBlock; /// coupling to neighbouring processors (col - row) SparseMatrix_ptr<T> row_coupleBlock; /// coupling of rows-cols on neighbouring processors (may not be valid) SparseMatrix_ptr<T> remote_coupleBlock; bool is_balanced; /// matrix may be balanced by a diagonal matrix D=diagonal(balance_vector) /// if is_balanced is true, the matrix stored is D*A*D where A is the /// original matrix. /// When the system of linear equations is solved we solve D*A*D*y=c. /// So to solve A*x=b one needs to set c=D*b and x=D*y. double* balance_vector; /// stores the global ids for all cols in col_coupleBlock mutable index_t* global_id; /// package code controlling the solver pointer mutable index_t solver_package; /// pointer to data needed by a solver void* solver_p; private: virtual void setToSolution(escript::Data& out, escript::Data& in, boost::python::object& options) const; virtual void ypAx(escript::Data& y, escript::Data& x) const; void solve(T* out, T* in, Options* options) const; }; void RHS_loadMM_toCSR(const char* filename, double* b, dim_t size); } // namespace paso #include "Options.h" #include "Solver.h" #include <escript/Data.h> namespace paso { template <> SparseMatrix_ptr<double> PASO_DLL_API SystemMatrix<double>::mergeSystemMatrix() const; template <> SparseMatrix_ptr<cplx_t> PASO_DLL_API SystemMatrix<cplx_t>::mergeSystemMatrix() const; template <> void PASO_DLL_API SystemMatrix<double>::MatrixVector(double alpha, const double* in, double beta, double* out) const; template <> void PASO_DLL_API SystemMatrix<cplx_t>::MatrixVector(double alpha, const cplx_t* in, double beta, cplx_t* out) const; template <> void PASO_DLL_API SystemMatrix<double>::solve(double* out, double* in, Options* options) const; template <> void PASO_DLL_API SystemMatrix<cplx_t>::solve(cplx_t* out, cplx_t* in, Options* options) const; template <class T> SystemMatrix<T>::SystemMatrix() { throw PasoException("SystemMatrix: Illegal to generate default SystemMatrix."); } /// Allocates a SystemMatrix of given type using the given matrix pattern. /// Values are initialized with zero. /// If patternIsUnrolled and type & MATRIX_FORMAT_BLK1, it is assumed /// that the pattern is already unrolled to match the requested block size /// and offsets. Otherwise unrolling and offset adjustment will be performed. template <class T> SystemMatrix<T>::SystemMatrix(SystemMatrixType ntype, SystemMatrixPattern_ptr npattern, dim_t rowBlockSize, dim_t colBlockSize, bool patternIsUnrolled, const escript::FunctionSpace& rowFS, const escript::FunctionSpace& colFS) : escript::AbstractSystemMatrix(rowBlockSize, rowFS, colBlockSize, colFS), type(ntype), logical_row_block_size(rowBlockSize), logical_col_block_size(colBlockSize), is_balanced(false), balance_vector(NULL), global_id(NULL), solver_package(PASO_PASO), solver_p(NULL) { if (patternIsUnrolled) { if ((ntype & MATRIX_FORMAT_OFFSET1) != (npattern->type & MATRIX_FORMAT_OFFSET1)) { throw PasoException("SystemMatrix: requested offset and pattern offset do not match."); } } // do we need to apply unrolling? bool unroll // we don't like non-square blocks = (rowBlockSize != colBlockSize) #ifndef ESYS_HAVE_LAPACK // or any block size bigger than 3 || (colBlockSize > 3) #endif // or if block size one requested and the block size is not 1 || ((ntype & MATRIX_FORMAT_BLK1) && colBlockSize > 1) // or the offsets don't match || ((ntype & MATRIX_FORMAT_OFFSET1) != (npattern->type & MATRIX_FORMAT_OFFSET1)); SystemMatrixType pattern_format_out = (ntype & MATRIX_FORMAT_OFFSET1) ? MATRIX_FORMAT_OFFSET1 : MATRIX_FORMAT_DEFAULT; mpi_info = npattern->mpi_info; if (ntype & MATRIX_FORMAT_CSC) { if (unroll) { if (patternIsUnrolled) { pattern=npattern; } else { pattern = npattern->unrollBlocks(pattern_format_out, colBlockSize, rowBlockSize); } row_block_size = 1; col_block_size = 1; } else { pattern = npattern->unrollBlocks(pattern_format_out, 1, 1); row_block_size = rowBlockSize; col_block_size = colBlockSize; } row_distribution = pattern->input_distribution; col_distribution = pattern->output_distribution; } else { if (unroll) { if (patternIsUnrolled) { pattern = npattern; } else { pattern = npattern->unrollBlocks(pattern_format_out, rowBlockSize, colBlockSize); } row_block_size = 1; col_block_size = 1; } else { pattern = npattern->unrollBlocks(pattern_format_out, 1, 1); row_block_size = rowBlockSize; col_block_size = colBlockSize; } row_distribution = pattern->output_distribution; col_distribution = pattern->input_distribution; } if (ntype & MATRIX_FORMAT_DIAGONAL_BLOCK) { block_size = std::min(row_block_size, col_block_size); } else { block_size = row_block_size*col_block_size; } col_coupler.reset(new Coupler<real_t>(pattern->col_connector, col_block_size, mpi_info)); row_coupler.reset(new Coupler<real_t>(pattern->row_connector, row_block_size, mpi_info)); mainBlock.reset(new SparseMatrix<T>(type, pattern->mainPattern, row_block_size, col_block_size, true)); col_coupleBlock.reset(new SparseMatrix<T>(type, pattern->col_couplePattern, row_block_size, col_block_size, true)); row_coupleBlock.reset(new SparseMatrix<T>(type, pattern->row_couplePattern, row_block_size, col_block_size, true)); const dim_t n_norm = std::max(mainBlock->numCols*col_block_size, mainBlock->numRows*row_block_size); balance_vector = new double[n_norm]; #pragma omp parallel for for (dim_t i=0; i<n_norm; ++i) balance_vector[i] = 1.; } // deallocates a SystemMatrix template <class T> SystemMatrix<T>::~SystemMatrix() { solve_free(this); delete[] balance_vector; delete[] global_id; } template <class T> int SystemMatrix<T>::getSystemMatrixTypeId(int solver, int preconditioner, int package, bool is_complex, bool symmetry, const escript::JMPI& mpi_info) { int out = -1; int true_package = Options::getPackage(Options::mapEscriptOption(solver), Options::mapEscriptOption(package), symmetry, mpi_info); switch(true_package) { case PASO_PASO: out = MATRIX_FORMAT_DEFAULT; break; case PASO_MKL: out = MATRIX_FORMAT_BLK1 | MATRIX_FORMAT_OFFSET1; break; case PASO_UMFPACK: if (mpi_info->size > 1) { throw PasoException("The selected solver UMFPACK " "requires CSC format which is not supported with " "more than one rank."); } else { out = MATRIX_FORMAT_CSC | MATRIX_FORMAT_BLK1; } break; case PASO_MUMPS: out = MATRIX_FORMAT_BLK1 | MATRIX_FORMAT_OFFSET1; break; default: throw PasoException("unknown package code"); } if (out > 0 && is_complex) out |= MATRIX_FORMAT_COMPLEX; return out; } template <class T> void SystemMatrix<T>::nullifyRowsAndCols(escript::Data& row_q, escript::Data& col_q, double main_diagonal_value) { if (row_q.isComplex() || col_q.isComplex()) { throw PasoException("SystemMatrix::nullifyRowsAndCols: complex arguments not supported"); } if (col_q.getDataPointSize() != getColumnBlockSize()) { throw PasoException("nullifyRowsAndCols: column block size does not match the number of components of column mask."); } else if (row_q.getDataPointSize() != getRowBlockSize()) { throw PasoException("nullifyRowsAndCols: row block size does not match the number of components of row mask."); } else if (col_q.getFunctionSpace() != getColumnFunctionSpace()) { throw PasoException("nullifyRowsAndCols: column function space and function space of column mask don't match."); } else if (row_q.getFunctionSpace() != getRowFunctionSpace()) { throw PasoException("nullifyRowsAndCols: row function space and function space of row mask don't match."); } row_q.expand(); col_q.expand(); row_q.requireWrite(); col_q.requireWrite(); double* mask_row = row_q.getExpandedVectorReference(static_cast<escript::DataTypes::real_t>(0)).data(); double* mask_col = col_q.getExpandedVectorReference(static_cast<escript::DataTypes::real_t>(0)).data(); if (mpi_info->size > 1) { if (type & MATRIX_FORMAT_CSC) { throw PasoException("SystemMatrix::nullifyRowsAndCols: " "CSC is not supported with MPI."); } startColCollect(mask_col); startRowCollect(mask_row); if (col_block_size==1 && row_block_size==1) { mainBlock->nullifyRowsAndCols_CSR_BLK1(mask_row, mask_col, main_diagonal_value); double* remote_values = finishColCollect(); col_coupleBlock->nullifyRowsAndCols_CSR_BLK1(mask_row, remote_values, 0.); remote_values = finishRowCollect(); row_coupleBlock->nullifyRowsAndCols_CSR_BLK1(remote_values, mask_col, 0.); } else { mainBlock->nullifyRowsAndCols_CSR(mask_row, mask_col, main_diagonal_value); double* remote_values = finishColCollect(); col_coupleBlock->nullifyRowsAndCols_CSR(mask_row, remote_values, 0.); remote_values = finishRowCollect(); row_coupleBlock->nullifyRowsAndCols_CSR(remote_values, mask_col, 0.); } } else { if (col_block_size==1 && row_block_size==1) { if (type & MATRIX_FORMAT_CSC) { mainBlock->nullifyRowsAndCols_CSC_BLK1(mask_row, mask_col, main_diagonal_value); } else { mainBlock->nullifyRowsAndCols_CSR_BLK1(mask_row, mask_col, main_diagonal_value); } } else { if (type & MATRIX_FORMAT_CSC) { mainBlock->nullifyRowsAndCols_CSC(mask_row, mask_col, main_diagonal_value); } else { mainBlock->nullifyRowsAndCols_CSR(mask_row, mask_col, main_diagonal_value); } } } } template <class T> void SystemMatrix<T>::resetValues(bool preserveSolverData) { setValues(0.); if (!preserveSolverData) solve_free(this); } template <class T> void SystemMatrix<T>::setToSolution(escript::Data& out, escript::Data& in, boost::python::object& options) const { #if !defined(ESYS_HAVE_MUMPS) if (in.isComplex() || out.isComplex()) { throw PasoException("SystemMatrix::setToSolution: complex arguments not supported."); } #endif options.attr("resetDiagnostics")(); Options paso_options(options); if (out.getDataPointSize() != getColumnBlockSize()) { throw PasoException("solve: column block size does not match the number of components of solution."); } else if (in.getDataPointSize() != getRowBlockSize()) { throw PasoException("solve: row block size does not match the number of components of right hand side."); } else if (out.getFunctionSpace() != getColumnFunctionSpace()) { throw PasoException("solve: column function space and function space of solution don't match."); } else if (in.getFunctionSpace() != getRowFunctionSpace()) { throw PasoException("solve: row function space and function space of right hand side don't match."); } out.expand(); in.expand(); out.requireWrite(); in.requireWrite(); T* out_dp = out.getExpandedVectorReference(static_cast<T>(0)).data(); T* in_dp = in.getExpandedVectorReference(static_cast<T>(0)).data(); solve(out_dp, in_dp, &paso_options); paso_options.updateEscriptDiagnostics(options); } template <class T> void SystemMatrix<T>::ypAx(escript::Data& y, escript::Data& x) const { #if !defined(ESYS_HAVE_MUMPS) if (x.isComplex() || y.isComplex()) { throw PasoException("SystemMatrix::ypAx: complex arguments not supported."); } #endif if (x.getDataPointSize() != getColumnBlockSize()) { throw PasoException("matrix vector product: column block size does not match the number of components in input."); } else if (y.getDataPointSize() != getRowBlockSize()) { throw PasoException("matrix vector product: row block size does not match the number of components in output."); } else if (x.getFunctionSpace() != getColumnFunctionSpace()) { throw PasoException("matrix vector product: column function space and function space of input don't match."); } else if (y.getFunctionSpace() != getRowFunctionSpace()) { throw PasoException("matrix vector product: row function space and function space of output don't match."); } x.expand(); y.expand(); x.requireWrite(); y.requireWrite(); T* x_dp = x.getExpandedVectorReference(static_cast<T>(0)).data(); T* y_dp = y.getExpandedVectorReference(static_cast<T>(0)).data(); MatrixVector(1., x_dp, 1., y_dp); } } // namespace paso #endif // __PASO_SYSTEMMATRIX_H__
sbessel.h
// Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file sbessel.h * * \brief Contains implementation of sirius::Spherical_Bessel_functions and sirius::sbessel_approx classes. */ #ifndef __SBESSEL_PW_H__ #define __SBESSEL_PW_H__ #include <gsl/gsl_sf_bessel.h> #include "eigenproblem.h" #include "Unit_cell/unit_cell.h" namespace sirius { /// Spherical Bessel functions \f$ j_{\ell}(q x) \f$ up to lmax. class Spherical_Bessel_functions { private: int lmax_{-1}; double q_{0}; Radial_grid<double> const* rgrid_{nullptr}; std::vector<Spline<double>> sbessel_; public: Spherical_Bessel_functions() { } Spherical_Bessel_functions(int lmax__, Radial_grid<double> const& rgrid__, double q__) : lmax_(lmax__) , q_(q__) , rgrid_(&rgrid__) { assert(q_ >= 0); sbessel_ = std::vector<Spline<double>>(lmax__ + 2); for (int l = 0; l <= lmax__ + 1; l++) { sbessel_[l] = Spline<double>(rgrid__); } std::vector<double> jl(lmax__ + 2); for (int ir = 0; ir < rgrid__.num_points(); ir++) { double t = rgrid__[ir] * q__; gsl_sf_bessel_jl_array(lmax__ + 1, t, &jl[0]); for (int l = 0; l <= lmax__ + 1; l++) { sbessel_[l](ir) = jl[l]; } } for (int l = 0; l <= lmax__ + 1; l++) { sbessel_[l].interpolate(); } } static void sbessel(int lmax__, double t__, double* jl__) { gsl_sf_bessel_jl_array(lmax__, t__, jl__); } static void sbessel_deriv_q(int lmax__, double q__, double x__, double* jl_dq__) { std::vector<double> jl(lmax__ + 2); sbessel(lmax__ + 1, x__ * q__, &jl[0]); for (int l = 0; l <= lmax__; l++) { if (q__ != 0) { jl_dq__[l] = (l / q__) * jl[l] - x__ * jl[l + 1]; } else { if (l == 1) { jl_dq__[l] = x__ / 3; } else { jl_dq__[l] = 0; } } } } Spline<double> const& operator[](int l__) const { assert(l__ <= lmax_); return sbessel_[l__]; } /// Derivative of Bessel function with respect to q. /** \f[ * \frac{\partial j_{\ell}(q x)}{\partial q} = \frac{\ell}{q} j_{\ell}(q x) - x j_{\ell+1}(q x) * \f] */ Spline<double> deriv_q(int l__) { assert(l__ <= lmax_); assert(q_ >= 0); Spline<double> s(*rgrid_); if (q_ != 0) { for (int ir = 0; ir < rgrid_->num_points(); ir++) { s(ir) = (l__ / q_) * sbessel_[l__](ir) - (*rgrid_)[ir] * sbessel_[l__ + 1](ir); } } else { if (l__ == 1) { for (int ir = 0; ir < rgrid_->num_points(); ir++) { s(ir) = (*rgrid_)[ir] / 3; } } } s.interpolate(); return std::move(s); } }; class sbessel_approx { private: Unit_cell const& unit_cell_; int lmax_; mdarray<std::vector<double>, 2> qnu_; mdarray<double, 4> coeffs_; int nqnu_max_; static double sbessel_l2norm(double nu, int l, double R) { if (std::abs(nu) < 1e-10) TERMINATE_NOT_IMPLEMENTED; if (l == 0) { return (nu * R * 2 - std::sin(nu * R * 2)) / 4 / std::pow(nu, 3); } else { double jl[l + 2]; gsl_sf_bessel_jl_array(l + 1, R * nu, &jl[0]); return std::pow(R, 3) * (jl[l] * jl[l] - jl[l + 1] * jl[l - 1]) / 2; } } public: sbessel_approx(Unit_cell const& unit_cell__, int lmax__, double const qmin__, double const qmax__, double const eps__) : unit_cell_(unit_cell__), lmax_(lmax__) { PROFILE("sirius::sbessel_approx"); qnu_ = mdarray<std::vector<double>, 2>(lmax_ + 1, unit_cell_.num_atom_types()); for (int l = 0; l <= lmax_; l++) { for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) { qnu_(l, iat) = build_approx_freq(qmin__, qmax__, l, unit_cell_.atom_type(iat).mt_radius(), eps__); } } nqnu_max_ = 0; for (int l = 0; l <= lmax_; l++) { for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) { nqnu_max_ = std::max(nqnu_max_, static_cast<int>(qnu_(l, iat).size())); } } } void approximate(std::vector<double> const& q__) { PROFILE("sirius::sbessel_approx::approximate"); coeffs_ = mdarray<double, 4>(nqnu_max_, q__.size(), lmax_ + 1, unit_cell_.num_atom_types()); #pragma omp parallel for for (int l = 0; l <= lmax_; l++) { for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) { int n = nqnu(l, iat); mdarray<double, 2> A(n, n); for (int iq = 0; iq < n; iq++) { for (int jq = 0; jq <= iq; jq++) { A(jq, iq) = A(iq, jq) = overlap(qnu_(l, iat)[jq], qnu_(l, iat)[iq], l, unit_cell_.atom_type(iat).mt_radius()); } for (int j = 0; j < (int)q__.size(); j++) { if (std::abs(q__[j]) < 1e-12) { coeffs_(iq, j, l, iat) = 0; } else { coeffs_(iq, j, l, iat) = overlap(qnu_(l, iat)[iq], q__[j], l, unit_cell_.atom_type(iat).mt_radius()); } } } linalg<CPU>::gesv(n, (int)q__.size(), A.at<CPU>(), A.ld(), &coeffs_(0, 0, l, iat), coeffs_.ld()); } } } inline double qnu(int const iq, int const l, int const iat) { return qnu_(l, iat)[iq]; } inline int nqnu(int const l, int const iat) { return static_cast<int>(qnu_(l, iat).size()); } inline int nqnu_max() { return nqnu_max_; } inline double coeff(int const iq, int const j, int const l, int const iat) { return coeffs_(iq, j, l, iat); } // \int_0^{R} j(nu1 * r) * j(nu2 * r) * r^2 dr // this integral can be computed analytically static double overlap(double nu1__, double nu2__, int l__, double R__) { if (std::abs(nu1__) < 1e-10 || std::abs(nu2__) < 1e-10) TERMINATE_NOT_IMPLEMENTED; if (std::abs(nu1__ - nu2__) < 1e-12) { if (l__ == 0) { return (nu2__ * R__ * 2 - std::sin(nu2__ * R__ * 2)) / 4 / std::pow(nu2__, 3); } else { double jl[l__ + 2]; gsl_sf_bessel_jl_array(l__ + 1, R__ * nu2__, &jl[0]); return std::pow(R__, 3) * (jl[l__] * jl[l__] - jl[l__ + 1] * jl[l__ - 1]) / 2; } } else { if (l__ == 0) { return (nu2__ * std::cos(nu2__ * R__) * std::sin(nu1__ * R__) - nu1__ * std::cos(nu1__ * R__) * std::sin(nu2__ * R__)) / (std::pow(nu1__, 3) * nu2__ - nu1__ * std::pow(nu2__, 3)); } else { double j1[l__ + 2]; gsl_sf_bessel_jl_array(l__ + 1, R__ * nu1__, &j1[0]); double j2[l__ + 2]; gsl_sf_bessel_jl_array(l__ + 1, R__ * nu2__, &j2[0]); return std::pow(R__, 2) * (nu2__ * j2[l__ - 1] * j1[l__] - nu1__ * j1[l__ - 1] * j2[l__]) / (std::pow(nu1__, 2) - std::pow(nu2__, 2)); } } } std::vector<double> build_approx_freq(double const qmin__, double const qmax__, int const l__, double const R__, double const eps__) { std::vector<double> qnu; double min_val; int n = 2; do { n++; qnu.resize(n); for (int i = 0; i < n; i++) qnu[i] = qmin__ + (qmax__ - qmin__) * i / (n - 1); dmatrix<double_complex> ovlp(n, n); for (int i = 0; i < n; i++) { for (int j = 0; j <= i; j++) { double o = overlap(qnu[j], qnu[i], l__, R__); ovlp(j, i) = o / sbessel_l2norm(qnu[i], l__, R__) / sbessel_l2norm(qnu[j], l__, R__); } } std::vector<double> eval(n); dmatrix<double_complex> z(n, n); Eigensolver_lapack<double_complex> solver; solver.solve(n, ovlp, &eval[0], z); min_val = eval[0]; } while (min_val > eps__); return qnu; } }; class Spherical_Bessel_approximant { private: int lmax_; double R_; /// List of Bessel function scaling factors for each angular momentum. std::vector< std::vector<double> > qnu_; //mdarray<double, 4> coeffs_; int nqnu_max_; static double sbessel_l2norm(double nu, int l, double R) { if (std::abs(nu) < 1e-10) { if (l == 0) return std::pow(R, 3) / 3.0; return 0; } if (l == 0) { return (nu * R * 2 - std::sin(nu * R * 2)) / 4 / std::pow(nu, 3); } else { double jl[l + 2]; gsl_sf_bessel_jl_array(l + 1, R * nu, &jl[0]); return std::pow(R, 3) * (jl[l] * jl[l] - jl[l + 1] * jl[l - 1]) / 2; } } public: Spherical_Bessel_approximant(int lmax__, double R__, double const qmin__, double const qmax__, double const eps__) : lmax_(lmax__), R_(R__) { PROFILE("sirius::Spherical_Bessel_approximant"); qnu_ = std::vector< std::vector<double> >(lmax_ + 1); #pragma omp parallel for for (int l = 0; l <= lmax_; l++) qnu_[l] = build_approx_freq(qmin__, qmax__, l, R_, eps__); nqnu_max_ = 0; for (int l = 0; l <= lmax_; l++) nqnu_max_ = std::max(nqnu_max_, nqnu(l)); } std::vector<double> approximate(int l__, double nu__) { int n = nqnu(l__); std::vector<double> x(n); matrix<double> A(n, n); for (int iq = 0; iq < n; iq++) { for (int jq = 0; jq <= iq; jq++) { A(jq, iq) = A(iq, jq) = overlap(qnu(jq, l__), qnu(iq, l__), l__, R_); } x[iq] = overlap(qnu(iq, l__), nu__, l__, R_); } linalg<CPU>::gesv(n, 1, A.at<CPU>(), A.ld(), &x[0], n); return x; } void approximate(std::vector<double> const& q__) { //runtime::Timer t("sirius::sbessel_approx::approximate"); //coeffs_ = mdarray<double, 4>(nqnu_max_, q__.size(), lmax_ + 1, unit_cell_.num_atom_types()); // //#pragma omp parallel for //for (int l = 0; l <= lmax_; l++) //{ // for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) // { // int n = nqnu(l, iat); // mdarray<double, 2> A(n, n); // for (int iq = 0; iq < n; iq++) // { // for (int jq = 0; jq <= iq; jq++) // { // A(jq, iq) = A(iq, jq) = overlap(qnu_(l, iat)[jq], qnu_(l, iat)[iq], l, // unit_cell_.atom_type(iat).mt_radius()); // } // for (int j = 0; j < (int)q__.size(); j++) // { // if (std::abs(q__[j]) < 1e-12) // { // coeffs_(iq, j, l, iat) = 0; // } // else // { // coeffs_(iq, j, l, iat) = overlap(qnu_(l, iat)[iq], q__[j], l, // unit_cell_.atom_type(iat).mt_radius()); // } // } // } // linalg<CPU>::gesv(n, (int)q__.size(), A.at<CPU>(), A.ld(), &coeffs_(0, 0, l, iat), coeffs_.ld()); // } //} } inline double qnu(int const iq, int const l) const { return qnu_[l][iq]; } inline int nqnu(int const l) const { return static_cast<int>(qnu_[l].size()); } inline int nqnu_max() const { return nqnu_max_; } //inline double coeff(int const iq, int const j, int const l, int const iat) //{ // return coeffs_(iq, j, l, iat); //} // // \int_0^{R} j(nu1 * r) * j(nu2 * r) * r^2 dr // this integral can be computed analytically static double overlap(double nu1__, double nu2__, int l__, double R__) { if (std::abs(nu1__) < 1e-10 && std::abs(nu2__) < 1e-10 && l__ == 0) return std::pow(R__, 3) / 3.0; if ((std::abs(nu1__) < 1e-10 || std::abs(nu2__) < 1e-10) && l__ > 0) return 0; if ((std::abs(nu1__) < 1e-10 || std::abs(nu2__) < 1e-10) && l__ == 0) { double nu = std::max(nu1__, nu2__); double nuR = nu * R__; return (std::sin(nuR) - nuR * std::cos(nuR)) / std::pow(nu, 3); } if (std::abs(nu1__ - nu2__) < 1e-12) { if (l__ == 0) { return (nu2__ * R__ * 2 - std::sin(nu2__ * R__ * 2)) / 4 / std::pow(nu2__, 3); } else { double jl[l__ + 2]; gsl_sf_bessel_jl_array(l__ + 1, R__ * nu2__, &jl[0]); return std::pow(R__, 3) * (jl[l__] * jl[l__] - jl[l__ + 1] * jl[l__ - 1]) / 2; } } else { if (l__ == 0) { return (nu2__ * std::cos(nu2__ * R__) * std::sin(nu1__ * R__) - nu1__ * std::cos(nu1__ * R__) * std::sin(nu2__ * R__)) / (std::pow(nu1__, 3) * nu2__ - nu1__ * std::pow(nu2__, 3)); } else { double j1[l__ + 2]; gsl_sf_bessel_jl_array(l__ + 1, R__ * nu1__, &j1[0]); double j2[l__ + 2]; gsl_sf_bessel_jl_array(l__ + 1, R__ * nu2__, &j2[0]); return std::pow(R__, 2) * (nu2__ * j2[l__ - 1] * j1[l__] - nu1__ * j1[l__ - 1] * j2[l__]) / (std::pow(nu1__, 2) - std::pow(nu2__, 2)); } } TERMINATE("this is wrong"); return -1; } std::vector<double> build_approx_freq(double const qmin__, double const qmax__, int const l__, double const R__, double const eps__) { std::vector<double> qnu; double min_val; int n = 2; do { n++; qnu.resize(n); for (int i = 0; i < n; i++) qnu[i] = qmin__ + (qmax__ - qmin__) * i / (n - 1); dmatrix<double> ovlp(n, n); for (int i = 0; i < n; i++) { for (int j = 0; j <= i; j++) { double o = overlap(qnu[j], qnu[i], l__, R__); ovlp(j, i) = o / sbessel_l2norm(qnu[i], l__, R__) / sbessel_l2norm(qnu[j], l__, R__); } } std::vector<double> eval(n); dmatrix<double> z(n, n); Eigensolver_lapack<double> solver; solver.solve(n, ovlp, &eval[0], z); min_val = eval[0]; } while (min_val > eps__); return qnu; } }; class Spherical_Bessel_approximant2 { private: int lmax_; double R_; /// List of Bessel function scaling factors for each angular momentum. std::vector<double> qnu_; static double sbessel_l2norm(double nu, int l, double R) { if (std::abs(nu) < 1e-10) { if (l == 0) return std::pow(R, 3) / 3.0; return 0; } if (l == 0) { return (nu * R * 2 - std::sin(nu * R * 2)) / 4 / std::pow(nu, 3); } else { double jl[l + 2]; gsl_sf_bessel_jl_array(l + 1, R * nu, &jl[0]); return std::pow(R, 3) * (jl[l] * jl[l] - jl[l + 1] * jl[l - 1]) / 2; } } public: Spherical_Bessel_approximant2(int lmax__, double R__, double const qmin__, double const qmax__, int nq__) : lmax_(lmax__), R_(R__) { PROFILE("sirius::Spherical_Bessel_approximant"); int nq = nfreq(qmin__, qmax__, 0, R__, 1e-12); qnu_.resize(nq); for (int i = 0; i < nq; i++) qnu_[i] = qmin__ + (qmax__ - qmin__) * i / (nq - 1); } std::vector<double> approximate(int l__, double nu__) { int n = nqnu(); std::vector<double> x(n); matrix<double> A(n, n); for (int iq = 0; iq < n; iq++) { for (int jq = 0; jq <= iq; jq++) { A(jq, iq) = A(iq, jq) = overlap(qnu(jq), qnu(iq), l__, R_); } x[iq] = overlap(qnu(iq), nu__, l__, R_); } linalg<CPU>::gesv(n, 1, A.at<CPU>(), A.ld(), &x[0], n); return x; } inline double qnu(int const iq) const { return qnu_[iq]; } inline int nqnu() const { return static_cast<int>(qnu_.size()); } // \int_0^{R} j(nu1 * r) * j(nu2 * r) * r^2 dr // this integral can be computed analytically static double overlap(double nu1__, double nu2__, int l__, double R__) { if (std::abs(nu1__) < 1e-10 && std::abs(nu2__) < 1e-10 && l__ == 0) return std::pow(R__, 3) / 3.0; if ((std::abs(nu1__) < 1e-10 || std::abs(nu2__) < 1e-10) && l__ > 0) return 0; if ((std::abs(nu1__) < 1e-10 || std::abs(nu2__) < 1e-10) && l__ == 0) { double nu = std::max(nu1__, nu2__); double nuR = nu * R__; return (std::sin(nuR) - nuR * std::cos(nuR)) / std::pow(nu, 3); } if (std::abs(nu1__ - nu2__) < 1e-12) { if (l__ == 0) { return (nu2__ * R__ * 2 - std::sin(nu2__ * R__ * 2)) / 4 / std::pow(nu2__, 3); } else { std::vector<double> jl(l__ + 2); gsl_sf_bessel_jl_array(l__ + 1, R__ * nu2__, &jl[0]); return std::pow(R__, 3) * (jl[l__] * jl[l__] - jl[l__ + 1] * jl[l__ - 1]) / 2; } } else { if (l__ == 0) { return (nu2__ * std::cos(nu2__ * R__) * std::sin(nu1__ * R__) - nu1__ * std::cos(nu1__ * R__) * std::sin(nu2__ * R__)) / (std::pow(nu1__, 3) * nu2__ - nu1__ * std::pow(nu2__, 3)); } else { std::vector<double> j1(l__ + 2); gsl_sf_bessel_jl_array(l__ + 1, R__ * nu1__, &j1[0]); std::vector<double> j2(l__ + 2); gsl_sf_bessel_jl_array(l__ + 1, R__ * nu2__, &j2[0]); return std::pow(R__, 2) * (nu2__ * j2[l__ - 1] * j1[l__] - nu1__ * j1[l__ - 1] * j2[l__]) / (std::pow(nu1__, 2) - std::pow(nu2__, 2)); } } TERMINATE("this is wrong"); return -1; } int nfreq(double const qmin__, double const qmax__, int const l__, double const R__, double const eps__) { std::vector<double> qnu; double min_val; int n = 2; do { n++; qnu.resize(n); for (int i = 0; i < n; i++) qnu[i] = qmin__ + (qmax__ - qmin__) * i / (n - 1); dmatrix<double> ovlp(n, n); for (int i = 0; i < n; i++) { for (int j = 0; j <= i; j++) { double o = overlap(qnu[j], qnu[i], l__, R__); ovlp(j, i) = o / sbessel_l2norm(qnu[i], l__, R__) / sbessel_l2norm(qnu[j], l__, R__); } } std::vector<double> eval(n); dmatrix<double> z(n, n); Eigensolver_lapack<double> solver; solver.solve(n, ovlp, &eval[0], z); min_val = eval[0]; if (n > 100) return 100; } while (min_val > eps__); return n; } }; }; #endif
truedep1-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // This one has race condition due to true dependence #include <stdlib.h> #include <stdio.h> int main(int argc, char* argv[]) { int i; int len=100; if (argc>1) len = atoi(argv[1]); int a[len]; for (i=0;i<len;i++) a[i]=i; #pragma omp parallel for for (i=0;i<len-1;i++) a[i+1]=a[i]+1; return 0; }
yuv_to_rgb2.c
/* * YUV to RGB convert * * Copyright (C) 2019 Hiroshi Kuwagata <kgt9221@gamil.com> */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #define ROTATE0 0x00000000 #define ROTATE90 0x00000001 #define ROTATE180 0x00000002 #define ROTATE270 0x00000003 #define FLIP 0x00000080 #ifdef ENABLE_NEON #if defined(__ARM_NEON) || defined(__ARM_NEON__) #include <arm_neon.h> #else /* defined(__ARM_NEON) || defined(__ARM_NEON__) */ #error "ARM NEON instruction is not supported." #endif /* defined(__ARM_NEON) || defined(__ARM_NEON__) */ #endif /* defined(ENABLE_NEON) */ #ifdef ENABLE_SSE2 #if defined(__SSE2__) #if defined(_MSC_VER) #include <intrin.h> #elif defined(__GNUC__) #include <x86intrin.h> #endif /* defined(*) */ #else /* defined(__SSE2__) */ #error "SSE2 instruction is not supported." #endif /* defined(__SSE2__) */ #endif /* defined(ENABLE_SSE2) */ #define SATURATE8(x) (uint8_t)(((x) < 0)? 0:(((x) > 255)? 255:(x))) struct dest_info { uint8_t* b0; uint8_t* g0; uint8_t* r0; uint8_t* b1; uint8_t* g1; uint8_t* r1; uint8_t* b2; uint8_t* g2; uint8_t* r2; uint8_t* b3; uint8_t* g3; uint8_t* r3; }; static inline void set0(uint8_t*base, int wd, int ht, int y, struct dest_info* di) { uint8_t* p; p = base + (y * (wd * 3)); di->r0 = p + 0; di->g0 = p + 1; di->b0 = p + 2; di->r1 = p + 3; di->g1 = p + 4; di->b1 = p + 5; p += (wd *3); di->r2 = p + 0; di->g2 = p + 1; di->b2 = p + 2; di->r3 = p + 3; di->g3 = p + 4; di->b3 = p + 5; } static inline void set0f(uint8_t*base, int wd, int ht, int y, struct dest_info* di) { uint8_t* p; p = base + ((y + 1) * (wd * 3)); di->r0 = p - 6; di->g0 = p - 5; di->b0 = p - 4; di->r1 = p - 3; di->g1 = p - 2; di->b1 = p - 1; p += (wd * 3); di->r2 = p - 6; di->g2 = p - 5; di->b2 = p - 4; di->r3 = p - 3; di->g3 = p - 2; di->b3 = p - 1; } static inline void inc0(int wd, int ht, struct dest_info* di) { di->b0 += 6; di->g0 += 6; di->r0 += 6; di->b1 += 6; di->g1 += 6; di->r1 += 6; di->b2 += 6; di->g2 += 6; di->r2 += 6; di->b3 += 6; di->g3 += 6; di->r3 += 6; } static inline void set90(uint8_t*base, int wd, int ht, int y, struct dest_info* di) { uint8_t* p; p = base + ((ht - y) * 3); di->r0 = p - 3; di->g0 = p - 2; di->b0 = p - 1; di->r2 = p - 6; di->g2 = p - 5; di->b2 = p - 4; p += (ht * 3); di->r1 = p - 3; di->g1 = p - 2; di->b1 = p - 1; di->r3 = p - 6; di->g3 = p - 5; di->b3 = p - 4; } static inline void set90f(uint8_t*base, int wd, int ht, int y, struct dest_info* di) { uint8_t* p; p = base + (y * 3); di->r0 = p + 0; di->g0 = p + 1; di->b0 = p + 2; di->r2 = p + 3; di->g2 = p + 4; di->b2 = p + 5; p += (ht * 3); di->r1 = p + 0; di->g1 = p + 1; di->b1 = p + 2; di->r3 = p + 3; di->g3 = p + 4; di->b3 = p + 5; } static inline void inc90(int wd, int ht, struct dest_info* di) { int st; st = ht * 6; di->b0 += st; di->g0 += st; di->r0 += st; di->b1 += st; di->g1 += st; di->r1 += st; di->b2 += st; di->g2 += st; di->r2 += st; di->b3 += st; di->g3 += st; di->r3 += st; } static inline void set180(uint8_t*base, int wd, int ht, int y, struct dest_info* di) { uint8_t* p; p = base + ((ht - y) * (wd * 3)); di->r0 = p - 3; di->g0 = p - 2; di->b0 = p - 1; di->r2 = p - 6; di->g2 = p - 5; di->b2 = p - 4; p -= (wd * 3); di->r1 = p - 3; di->g1 = p - 2; di->b1 = p - 1; di->r3 = p - 6; di->g3 = p - 5; di->b3 = p - 4; } static inline void set180f(uint8_t*base, int wd, int ht, int y, struct dest_info* di) { uint8_t* p; p = base + ((ht - (y + 1)) * (wd * 3)); di->r0 = p + 0; di->g0 = p + 1; di->b0 = p + 2; di->r2 = p + 3; di->g2 = p + 4; di->b2 = p + 5; p -= (wd * 3); di->r1 = p + 0; di->g1 = p + 1; di->b1 = p + 2; di->r3 = p + 3; di->g3 = p + 4; di->b3 = p + 5; } static void inc180(int wd, int ht, struct dest_info* di) { di->b0 -= 6; di->g0 -= 6; di->r0 -= 6; di->b1 -= 6; di->g1 -= 6; di->r1 -= 6; di->b2 -= 6; di->g2 -= 6; di->r2 -= 6; di->b3 -= 6; di->g3 -= 6; di->r3 -= 6; } static void set270(uint8_t* base, int wd, int ht, int y, struct dest_info* di) { uint8_t* p; p = base + (((ht * (wd - 1)) + y) * 3); di->r0 = p + 0; di->g0 = p + 1; di->b0 = p + 2; di->r2 = p + 3; di->g2 = p + 4; di->b2 = p + 5; p -= (ht * 3); di->r1 = p + 0; di->g1 = p + 1; di->b1 = p + 2; di->r3 = p + 3; di->g3 = p + 4; di->b3 = p + 5; } static void set270f(uint8_t* base, int wd, int ht, int y, struct dest_info* di) { uint8_t* p; p = base + (((ht * wd) - y) * 3); di->r0 = p - 3; di->g0 = p - 2; di->b0 = p - 1; di->r2 = p - 6; di->g2 = p - 5; di->b2 = p - 4; p -= (ht * 3); di->r1 = p - 3; di->g1 = p - 2; di->b1 = p - 1; di->r3 = p - 6; di->g3 = p - 5; di->b3 = p - 4; } static void inc270(int wd, int ht, struct dest_info* di) { int st; st = ht * 6; di->b0 -= st; di->g0 -= st; di->r0 -= st; di->b1 -= st; di->g1 -= st; di->r1 -= st; di->b2 -= st; di->g2 -= st; di->r2 -= st; di->b3 -= st; di->g3 -= st; di->r3 -= st; } #ifdef ENABLE_NEON /* * 2x2ピクセルを1ユニットとして処理する。 * ピクセルに対するレジスタのレーン配置は以下の通り。 * * 0 1 * 2 3 * * YUVからRGBへの変換式は以下の通り * * R = (1.164f * (y - 16)) + (1.596f * (v - 128)) * G = (1.164f * (y - 16)) - (0.813f * (v - 128)) - (0.391f * (u - 128)) * B = (1.164f * (y - 16)) + (2.018f * (u - 128)) * * 上記を、整数演算化による高速化を狙って以下の様に実装する。 * * R = ((1192 * (y - 16)) + (1634 * (v - 128))) >> 10 * G = ((1192 * (y - 16)) - ( 833 * (v - 128)) - (400 * (u - 128))) >> 10 * B = ((1192 * (y - 16)) + (2066 * (u - 128))) >> 10 * */ static inline void conv(uint8_t* y1, uint8_t* y2, uint8_t* u, uint8_t* v, int32x4_t c16, int32x4_t min, int32x4_t max, struct dest_info* di) { int32x4_t tl; // as "temporary for load" int32x4_t vy; int32x4_t vr; int32x4_t vg; int32x4_t vb; /* * Y */ tl = vsetq_lane_s32(y1[0], tl, 0); tl = vsetq_lane_s32(y1[1], tl, 1); tl = vsetq_lane_s32(y2[0], tl, 2); tl = vsetq_lane_s32(y2[1], tl, 3); tl = vsubq_s32(tl, c16); vy = vmulq_n_s32(tl, 1192); /* * U */ tl = vmovq_n_s32(u[0] - 128); vg = vmlsq_n_s32(vy, tl, 400); vb = vmlaq_n_s32(vy, tl, 2066); /* * V */ tl = vmovq_n_s32(v[0] - 128); vr = vmlaq_n_s32(vy, tl, 1634); vg = vmlsq_n_s32(vg, tl, 833); /* * スケールの戻しと飽和処理 */ vr = vshrq_n_s32(vr, 10); vr = vmaxq_s32(vr, min); vr = vminq_s32(vr, max); vg = vshrq_n_s32(vg, 10); vg = vmaxq_s32(vg, min); vg = vminq_s32(vg, max); vb = vshrq_n_s32(vb, 10); vb = vmaxq_s32(vb, min); vb = vminq_s32(vb, max); /* * output RGB pixels */ *(di->r0) = vgetq_lane_s32(vr, 0); *(di->g0) = vgetq_lane_s32(vg, 0); *(di->b0) = vgetq_lane_s32(vb, 0); *(di->r1) = vgetq_lane_s32(vr, 1); *(di->g1) = vgetq_lane_s32(vg, 1); *(di->b1) = vgetq_lane_s32(vb, 1); *(di->r2) = vgetq_lane_s32(vr, 2); *(di->g2) = vgetq_lane_s32(vg, 2); *(di->b2) = vgetq_lane_s32(vb, 2); *(di->r3) = vgetq_lane_s32(vr, 3); *(di->g3) = vgetq_lane_s32(vg, 3); *(di->b3) = vgetq_lane_s32(vb, 3); } void i420_to_rgb_0(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; int32x4_t c16; int32x4_t min; int32x4_t max; c16 = vmovq_n_s32(16); min = vmovq_n_s32(0); max = vmovq_n_s32(255); #pragma omp parallel for private(j) shared(c16,min,max) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set0(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, c16, min, max, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointer */ inc0(wd, ht, &di); } } } void i420_to_rgb_0f(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; int32x4_t c16; int32x4_t min; int32x4_t max; c16 = vmovq_n_s32(16); min = vmovq_n_s32(0); max = vmovq_n_s32(255); #pragma omp parallel for private(j) shared(c16,min,max) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set0f(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, c16, min, max, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointer */ inc180(wd, ht, &di); } } } void i420_to_rgb_90(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; int32x4_t c16; int32x4_t min; int32x4_t max; c16 = vmovq_n_s32(16); min = vmovq_n_s32(0); max = vmovq_n_s32(255); #pragma omp parallel for private(j) shared(c16,min,max) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set90(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, c16, min, max, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointer */ inc90(wd, ht, &di); } } } void i420_to_rgb_90f(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; int32x4_t c16; int32x4_t min; int32x4_t max; struct dest_info di; c16 = vmovq_n_s32(16); min = vmovq_n_s32(0); max = vmovq_n_s32(255); #pragma omp parallel for private(j) shared(c16,min,max) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set90f(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, c16, min, max, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointer */ inc90(wd, ht, &di); } } } void i420_to_rgb_180(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; int32x4_t c16; int32x4_t min; int32x4_t max; c16 = vmovq_n_s32(16); min = vmovq_n_s32(0); max = vmovq_n_s32(255); #pragma omp parallel for private(j) shared(c16,min,max) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set180(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, c16, min, max, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointer */ inc180(wd, ht, &di); } } } void i420_to_rgb_180f(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; int32x4_t c16; int32x4_t min; int32x4_t max; c16 = vmovq_n_s32(16); min = vmovq_n_s32(0); max = vmovq_n_s32(255); #pragma omp parallel for private(j) shared(c16,min,max) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set180f(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, c16, min, max, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointer */ inc0(wd, ht, &di); } } } void i420_to_rgb_270(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; int32x4_t c16; int32x4_t min; int32x4_t max; c16 = vmovq_n_s32(16); min = vmovq_n_s32(0); max = vmovq_n_s32(255); #pragma omp parallel for private(j) shared(c16,min,max) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set270(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, c16, min, max, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointer */ inc270(wd, ht, &di); } } } void i420_to_rgb_270f(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; int32x4_t c16; int32x4_t min; int32x4_t max; c16 = vmovq_n_s32(16); min = vmovq_n_s32(0); max = vmovq_n_s32(255); #pragma omp parallel for private(j) shared(c16,min,max) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set270f(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, c16, min, max, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointer */ inc270(wd, ht, &di); } } } #else /* defined(__ARM_NEON) || defined(__ARM_NEON__) */ /* * 2x2ピクセルを1ユニットとして処理する。 * YUVからRGBへの変換式は以下の通り * * R = (1.164f * (y - 16)) + (1.596f * (v - 128)) * G = (1.164f * (y - 16)) - (0.813f * (v - 128)) - (0.391f * (u - 128)) * B = (1.164f * (y - 16)) + (2.018f * (u - 128)) * * 上記を、整数演算化による高速化を狙って以下の様に実装する。 * * R = ((1192 * (y - 16)) + (1634 * (v - 128))) >> 10 * G = ((1192 * (y - 16)) - ( 833 * (v - 128)) - (400 * (u - 128))) >> 10 * B = ((1192 * (y - 16)) + (2066 * (u - 128))) >> 10 */ static inline void conv(uint8_t* y1, uint8_t* y2, uint8_t* u, uint8_t* v, struct dest_info* di) { int c; int d; int e; int r0; int g0; int b0; int r; int g; int b; d = ((int)u[0]) - 128; e = ((int)v[0]) - 128; r0 = (e * 1634); g0 = (d * 400) + (e * 833); b0 = (d * 2066); /* * 0,0 */ c = (((int)y1[0]) - 16) * 1192; r = (c + r0) >> 10; g = (c - g0) >> 10; b = (c + b0) >> 10; *(di->r0) = SATURATE8(r); *(di->g0) = SATURATE8(g); *(di->b0) = SATURATE8(b); /* * 0,1 */ c = (((int)y1[1]) - 16) * 1192; r = (c + r0) >> 10; g = (c - g0) >> 10; b = (c + b0) >> 10; *(di->r1) = SATURATE8(r); *(di->g1) = SATURATE8(g); *(di->b1) = SATURATE8(b); /* * 1,0 */ c = (((int)y2[0]) - 16) * 1192; r = (c + r0) >> 10; g = (c - g0) >> 10; b = (c + b0) >> 10; *(di->r2) = SATURATE8(r); *(di->g2) = SATURATE8(g); *(di->b2) = SATURATE8(b); /* * 1,1 */ c = (((int)y2[1]) - 16) * 1192; r = (c + r0) >> 10; g = (c - g0) >> 10; b = (c + b0) >> 10; *(di->r3) = SATURATE8(r); *(di->g3) = SATURATE8(g); *(di->b3) = SATURATE8(b); } void i420_to_rgb_0(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; #pragma omp parallel for private(j) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set0(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointers */ inc0(wd, ht, &di); } } } void i420_to_rgb_0f(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; #pragma omp parallel for private(j) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set0f(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointers */ inc180(wd, ht, &di); } } } void i420_to_rgb_90(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; #pragma omp parallel for private(j) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set90(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointers */ inc90(wd, ht, &di); } } } void i420_to_rgb_90f(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; #pragma omp parallel for private(j) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set90f(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointers */ inc90(wd, ht, &di); } } } void i420_to_rgb_180(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; #pragma omp parallel for private(j) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set180(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointers */ inc180(wd, ht, &di); } } } void i420_to_rgb_180f(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; #pragma omp parallel for private(j) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set180f(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointers */ inc0(wd, ht, &di); } } } void i420_to_rgb_270(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; #pragma omp parallel for private(j) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set270(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointers */ inc270(wd, ht, &di); } } } void i420_to_rgb_270f(uint8_t* _y, uint8_t* _u, uint8_t* _v, int wd, int ht, uint8_t* _d) { int i; int j; #pragma omp parallel for private(j) for (i = 0; i < ht; i += 2) { uint8_t* y1; uint8_t* y2; uint8_t* u; uint8_t* v; struct dest_info di; y1 = _y + (i * wd); y2 = y1 + wd; u = _u + ((i / 2) * (wd / 2)); v = _v + ((i / 2) * (wd / 2)); set270f(_d, wd, ht, i, &di); for (j = 0; j < wd; j += 2) { /* * do convert */ conv(y1, y2, u, v, &di); /* * increase source pointers */ y1 += 2; y2 += 2; u += 1; v += 1; /* * increase destination pointers */ inc270(wd, ht, &di); } } } #endif /* defined(__ARM_NEON) || defined(__ARM_NEON__) */
impact_acc.c
/* impact_acc.c A Basilisk script to model the impact of a droplet of water impacting onto a moving plate. The domain is set to be in an accelerating frame with the plate, so an additional body force is added. */ #include "parameters.h" // Includes all defined parameters #include "axi.h" // Axisymmetric coordinates #include "navier-stokes/centered.h" // To solve the Navier-Stokes #include "two-phase.h" // Implements two-phase flow #include "view.h" // Creating movies using bview #include "tension.h" // Surface tension of droplet #include "tag.h" // For removing small droplets #include <omp.h> // For openMP parallel /* Computational constants derived from parameters */ double MIN_CELL_SIZE; // Size of the smallest cell double DROP_REFINED_WIDTH; // Width of the refined area around the droplet double PLATE_REFINED_WIDTH; // Width of the refined area around the plate double DROP_CENTRE; // Initial centre of the droplet double IMPACT_TIME; // Theoretical time of impact double MAX_TIME; // Maximum time to run the simulation for /* Global variables */ double start_wall_time; // Time the simulation was started double end_wall_time; // Time the simulation finished int gfs_output_no = 1; // Records how many GFS files have been outputted int plate_output_no = 1; // Records how many plate data files there have been int interface_output_no = 1; // Records how many interface files there have been // Stores time the interface was outputted char interface_time_filename[80] = "interface_times.txt"; double pinch_off_time = 0.; // Time pinch-off of the entrapped bubble occurs double drop_thresh = 1e-4; // Remove droplets threshold /* Force averaging */ double *forces_array; // Forces of the previous timesteps double force_avg; // Average force double current_force; // Current force on plate /* Plate position variables */ double s_previous = 0.; // Value of s at previous timestep double s_current = 0.; // Value of s at current timestep double s_next; // Values of s at next timestep double ds_dt; // First time derivative of s double d2s_dt2; // Second time derivative of s /* Boundary conditions */ // Conditions for entry from above u.n[right] = neumann(0.); // Free flow condition p[right] = dirichlet(0.); // 0 pressure far from surface // Conditions far from the droplet in the radial direction u.n[top] = neumann(0.); // Allows outflow through boundary u.t[top] = dirichlet(0.); // Stationary vertical flow p[top] = dirichlet(0.); // 0 pressure far from surface // Conditions on surface u.n[left] = dirichlet(0.); // No flow through surface u.t[left] = dirichlet(0.); // No slip at surface void remove_droplets_region(struct RemoveDroplets p,\ double ignore_region_x_limit, double ignore_region_y_limit); /* INITIALISATION EVENTS These events initialse the problem */ int main() { /* Main function to set up the simulation */ /* Create the computational domain */ init_grid(1 << MINLEVEL); // Create grid according to the minimum level size(BOX_WIDTH); // Size of the domain /* Set physical constants */ rho1 = 1.; // Density of water phase rho2 = RHO_R; // Density of air phase mu1 = 1. / REYNOLDS; // Viscosity of water phase mu2 = mu1 * MU_R; // Viscosity of air phase f.sigma = 1. / WEBER; // Surface tension at interface /* Derived constants */ MIN_CELL_SIZE = BOX_WIDTH / pow(2, MAXLEVEL); // Size of the smallest cell DROP_REFINED_WIDTH = 0.05; // Refined region around droplet PLATE_REFINED_WIDTH \ = PLATE_REFINE_NO * MIN_CELL_SIZE; // Refined region width around plate DROP_CENTRE = INITIAL_DROP_HEIGHT + DROP_RADIUS; IMPACT_TIME = INITIAL_DROP_HEIGHT / (-DROP_VEL); /* Maximum time is shortly after Wagner theory would predict the turnover point reaches the radius of the droplet */ double wagner_max_time = 2.0 * (IMPACT_TIME + 1. / 3.); MAX_TIME = min(HARD_MAX_TIME, wagner_max_time); /* Initialises interface time file */ FILE* interface_time_file = fopen(interface_time_filename, "w"); fclose(interface_time_file); /* Allocates array for forces */ forces_array = malloc(sizeof(double) * AVG_FORCE_NO); for (int j = 0; j < AVG_FORCE_NO - 1; j++) forces_array[j] = 0; run(); // Runs the simulation } event init(t = 0) { /* Initialises the flow as a spherical droplet falling downwards */ // Records the wall time start_wall_time = omp_get_wtime(); /* Refines around the droplet */ refine(sq(x - DROP_CENTRE) + sq(y) < sq(DROP_RADIUS + DROP_REFINED_WIDTH) \ && sq(x - DROP_CENTRE) + sq(y) > sq(DROP_RADIUS - DROP_REFINED_WIDTH) \ && level < MAXLEVEL); /* Initialises the droplet volume fraction */ fraction(f, -sq(x - DROP_CENTRE) - sq(y) + sq(DROP_RADIUS)); /* Initialise the droplet velocity downwards */ foreach() { u.x[] = DROP_VEL * f[]; } boundary ((scalar *){u}); } /* CALCULATION AND OUTPUT EVENTS These events calculate statistics (such as force) and output data about the system, but do not alter it */ event forces(i++) { /* Calculates the force felt on the plate */ // Initialises the current_force variable current_force = 0; // Iterates over the solid boundary foreach_boundary(left, reduction(+:current_force)) { if (y < PLATE_WIDTH) { // Viscosity average in the cell above the plate double avg_mu = f[] * (mu1 - mu2) + mu2; // Viscous stress in the cell above the plate double viscous_stress = \ - 2 * avg_mu * (u.x[1, 0] - u.x[]) / Delta; // Adds the contribution to the force using trapeze rule current_force += y * Delta * (p[] + viscous_stress); } } // Integrates force over the angular part current_force = 2 * pi * current_force; /* Force averaging. Calculates the average force over the last AVG_FORCE_NO timesteps */ force_avg = 0; // Initialise to be zero /* Loops over the forces_array, discarding the oldest element and adding the newest */ #pragma omp critical for (int j = 0; j < AVG_FORCE_NO - 1; j++) { // Shifts the items one back forces_array[j] = forces_array[j + 1]; // Increments the average force_avg += forces_array[j]; } // Adds the current foce value to the array and force average forces_array[AVG_FORCE_NO - 1] = current_force; force_avg += forces_array[AVG_FORCE_NO - 1]; // Calculates the force average force_avg = force_avg / ((double) AVG_FORCE_NO); } event plate_derivatives(i++) { /* Calculates the derivatives of the plate position */ /* Calculates force term. If we are before FORCE_DELAY_TIME, we apply no force, else we apply the averaged force, force_avg */ double force_term; if (t < FORCE_DELAY_TIME) { force_term = 0.; } else { force_term = force_avg; } /* Solves the ODE for the updated plate position and acceleration, with the averaged force */ s_next = (dt * dt * force_term \ + (2. * ALPHA - dt * dt * GAMMA) * s_current \ - (ALPHA - dt * BETA / 2.) * s_previous) \ / (ALPHA + dt * BETA / 2.); /* Updates values of s and its derivatives */ ds_dt = (s_next - s_previous) / (2. * dt); d2s_dt2 = (s_next - 2 * s_current + s_previous) / (dt * dt); s_previous = s_current; s_current = s_next; /* OVERRIDE: CONSTANT ACCELERATION */ if (CONST_ACC) { if (t < IMPACT_TIME) { d2s_dt2 = 0.; ds_dt = 0.; s_current = 0.; } else { d2s_dt2 = PLATE_ACC; ds_dt = d2s_dt2 * (t - IMPACT_TIME); s_current = 0.5 * d2s_dt2 * (t - IMPACT_TIME) * (t - IMPACT_TIME); } } } event output_data (t += PLATE_OUTPUT_TIMESTEP) { /* Outputs data about the flow*/ if ((t >= START_OUTPUT_TIME) && (t <= END_OUTPUT_TIME)) { char plate_output_filename[80]; sprintf(plate_output_filename, "plate_output_%d.txt", plate_output_no); FILE *plate_output_file = fopen(plate_output_filename, "w"); // Adds the time to the first line of the file fprintf(plate_output_file, "t = %g\n", t); // Iterates over the solid boundary foreach_boundary(left) { if (y < PLATE_WIDTH) { // Viscosity average in the cell above the plate double avg_mu = f[] * (mu1 - mu2) + mu2; // Viscous stress in the cell above the plate double viscous_stress = \ - 2 * avg_mu * (u.x[1, 0] - u.x[]) / Delta; /* Plate output */ fprintf(plate_output_file, "y = %g, x = %g, p = %g, strss = %g, f = %g\n",\ y, x, p[], viscous_stress, f[]); } } // Close plate output file fclose(plate_output_file); plate_output_no++; // Increments output number fprintf(stderr, \ "t = %.8f, v = %.8f, F = %.8f, F_avg = %g, s = %g, ds_dt = %g, d2s_dt2 = %g\n", \ t, 2 * pi * statsf(f).sum, current_force, force_avg, s_current, ds_dt, d2s_dt2); } } event output_interface (t += INTERFACE_OUTPUT_TIMESTEP) { /* Outputs the interface locations of the droplet */ if ((t >= START_OUTPUT_TIME) && (t <= END_OUTPUT_TIME)) { // Creates text file to save output to char interface_filename[80]; sprintf(interface_filename, "interface_%d.txt", interface_output_no); FILE *interface_file = fopen(interface_filename, "w"); // Outputs the interface locations and closes the file output_facets(f, interface_file); fclose(interface_file); // Appends the interface time file with the time and plate position (0) FILE *interface_time_file = fopen(interface_time_filename, "a"); fprintf(interface_time_file, "%d, %g, %g\n", \ interface_output_no, t, 0.); fclose(interface_time_file); interface_output_no++; } } event gfs_output (t += GFS_OUTPUT_TIMESTEP) { /* Saves a gfs file */ if ((t >= START_OUTPUT_TIME) && (t <= END_OUTPUT_TIME)) { char gfs_filename[80]; sprintf(gfs_filename, "gfs_output_%d.gfs", gfs_output_no); output_gfs(file = gfs_filename); gfs_output_no++; } } event movies (t += 0.001) { /* Produces movies using bview */ if (MOVIES) { // Creates a string with the time to put on the plots char time_str[80]; sprintf(time_str, "t = %g\n", t); // RC Changed so that the view is zoomed in on the target region // Can leave general view as well, but for debugging this is more informative // Set up bview box view (width = 1024, height = 1024, fov = 5.0, ty = -0.1, \ quat = {0, 0, -0.707, 0.707}); /* Movie of the volume fraction of the droplet */ clear(); draw_vof("f", lw = 2); squares("f", linear = true, spread = -1, linear = true, map = cool_warm); // RC - minor changes here and beyond mirror ({0,1}) { draw_vof("f", lw = 2); squares("f", linear = true, spread = -1, linear = true, map = cool_warm); } draw_string(time_str, pos=1, lc= { 0, 0, 0 }, lw=2); save ("tracer.mp4"); /* Movie of the vertical velocity */ clear(); draw_vof("f", lw = 2); squares("u.x", linear = false, spread = -1, linear = true, map = cool_warm); mirror ({0,1}) { draw_vof("f", lw = 2); squares("u.x", linear = false, spread = -1, linear = true, map = cool_warm); } draw_string(time_str, pos=1, lc= { 0, 0, 0 }, lw=2); save ("vertical_vel.mp4"); /* Movie of the horizontal velocity */ clear(); draw_vof("f", lw = 2); squares("u.y", linear = false, spread = -1, linear = true, map = cool_warm); mirror ({0,1}) { draw_vof("f", lw = 2); squares("u.y", linear = false, spread = -1, linear = true, map = cool_warm); } draw_string(time_str, pos=1, lc= { 0, 0, 0 }, lw=2); save ("horizontal_vel.mp4"); /* Movie of the pressure */ clear(); draw_vof("f", lw = 2); squares("p", linear = false, spread = -1, linear = true, map = cool_warm); mirror ({0,1}) { draw_vof("f", lw = 2); squares("p", linear = false, spread = -1, linear = true, map = cool_warm); } draw_string(time_str, pos=1, lc= { 0, 0, 0 }, lw=2); save ("pressure.mp4"); } } /* ALTERING EVENTS These events alter the flow in some way, through refinement, adding boundary conditions, adding acceleration terms and removal. */ event refinement (i++) { /* Refines the grid where appropriate */ /* Adapts with respect to velocities and volume fraction */ adapt_wavelet ({u.x, u.y, f}, (double[]){1e-2, 1e-2, 1e-4}, minlevel = MINLEVEL, maxlevel = MAXLEVEL); /* Refines above the plate */ refine((y < PLATE_WIDTH) && (x <= PLATE_REFINED_WIDTH) \ && level < MAXLEVEL); /* Refines box around the origin for entrapped bubble */ refine((y < 0.05 * DROP_RADIUS) && (x < 0.05 * DROP_RADIUS) \ && level < MAXLEVEL); } event moving_plate (i++) { /* Moves the plate as a function of the force on it */ /* Updates velocity BC's */ u.t[top] = dirichlet(ds_dt); u.n[left] = y < PLATE_WIDTH ? dirichlet(0.) : dirichlet(ds_dt); boundary ((scalar *){u}); // Redefine boundary conditions for u } event acceleration (i++) { /* Adds acceleration due to gravity and the moving plate at each time step */ face vector av = a; // Acceleration at each face /* Adds acceleration due to gravity and the plate */ foreach_face(x){ av.x[] += d2s_dt2 - 1./sq(FR); } } event small_droplet_removal (i++) { /* Removes any small droplets or bubbles that have formed, that are smaller than a specific size */ /* Minimum diameter (in cells) a droplet/bubble has to be, else it will be removed */ int drop_min_cell_width = 16; /* Region to ignore */ double ignore_region_x_limit = 0.02; double ignore_region_y_limit = 0.02; /* Counts the number of bubbles there are */ scalar bubbles[]; foreach() { bubbles[] = 1. - f[] > drop_thresh; } int bubble_no = tag(bubbles); /* Determines if we are before or after the pinch-off time */ if (pinch_off_time == 0.) { /* The first time the bubble number is above 1, we define it to be the pinch off time */ if (bubble_no > 1) { pinch_off_time = t; } } else if (t >= pinch_off_time + REMOVAL_DELAY) { /* If we are a certain time after the pinch-off time, remove drops and bubbles below the specified minimum size */ struct RemoveDroplets remove_struct; remove_struct.f = f; remove_struct.minsize = drop_min_cell_width; remove_struct.threshold = drop_thresh; remove_struct.bubbles = false; // Remove droplets remove_droplets_region(remove_struct, ignore_region_x_limit, ignore_region_y_limit); // Remove bubbles remove_struct.bubbles = true; remove_droplets_region(remove_struct, ignore_region_x_limit, ignore_region_y_limit); } // // Remove any bubbles left in the ignore region // foreach() { // if (x < ignore_region_x_limit && y < ignore_region_y_limit) { // f[] = 1.; // } // } } /* END */ event end (t = MAX_TIME) { /* Ends the simulation */ end_wall_time = omp_get_wtime(); // Records the time of finish free(forces_array); fprintf(stderr, "Finished after %g seconds\n", \ end_wall_time - start_wall_time); } /* FUNCTION DECLARATIONS */ /* Alternative remove_droplets definitions */ void remove_droplets_region(struct RemoveDroplets p,\ double ignore_region_x_limit, double ignore_region_y_limit) { scalar d[], f = p.f; double threshold = p.threshold ? p.threshold : 1e-4; foreach() { d[] = (p.bubbles ? 1. - f[] : f[]) > threshold; } int n = tag (d), size[n], keep_tags[n]; for (int i = 0; i < n; i++) { size[i] = 0; keep_tags[i] = 1; } foreach_leaf() { if (d[] > 0) { int j = ((int) d[]) - 1; size[j]++; if ((x < ignore_region_x_limit) && (y < ignore_region_y_limit)) { keep_tags[j] = 0; } } } #if _MPI MPI_Allreduce (MPI_IN_PLACE, size, n, MPI_INT, MPI_SUM, MPI_COMM_WORLD); #endif int minsize = pow (p.minsize ? p.minsize : 3, dimension); foreach() { int j = ((int) d[]) - 1; if (d[] > 0 && size[j] < minsize && keep_tags[j] == 1) f[] = p.bubbles; } boundary ({f}); }
time_dgetrf-task.c
/** * * @generated d Tue Jan 7 11:45:24 2014 * **/ #define _TYPE double #define _PREC double #define _LAMCH LAPACKE_dlamch_work #define _NAME "PLASMA_dgetrf_Tile" /* See Lawn 41 page 120 */ #define _FMULS FMULS_GETRF(M, N) #define _FADDS FADDS_GETRF(M, N) #include "./timing.inc" static double RunTest(real_Double_t *t_, struct user_parameters* params) { double t; int64_t N = params->matrix_size; int64_t NB = params->blocksize; int check = params->check; double check_res = 0; /* Allocate Data */ PLASMA_desc *descA = NULL; double* ptr = malloc(N * N * sizeof(double)); PLASMA_Desc_Create(&descA, ptr, PlasmaRealDouble, NB, NB, NB*NB, N, N, 0, 0, N, N); int* piv = (int*)malloc(N * sizeof(double)); #pragma omp parallel #pragma omp master plasma_pdpltmg_quark(*descA, 3456); /* Save AT in lapack layout for check */ double *A = NULL; if(check) { A = (double*)malloc(N * N * sizeof(double)); plasma_pdtile_to_lapack_quark(*descA, (void*)A, N); } START_TIMING(); #pragma omp parallel #pragma omp master plasma_pdgetrf_rectil_quark(*descA, piv); STOP_TIMING(); /* Check the solution */ if ( check ) { PLASMA_desc *descB = NULL; double* ptr = (double*)malloc(N * sizeof(double)); PLASMA_Desc_Create(&descB, ptr, PlasmaRealDouble, NB, NB, NB*NB, N, 1, 0, 0, N, 1); plasma_pdpltmg_seq(*descB, 7732 ); double* b = (double*)malloc(N * sizeof(double)); plasma_pdtile_to_lapack_quark(*descB, (void*)b, N); PLASMA_dgetrs_Tile( PlasmaNoTrans, descA, piv, descB ); double* x = (double*)malloc(N * sizeof(double)); plasma_pdtile_to_lapack_quark(*descB, (void*)x, N); check_res = d_check_solution(N, N, 1, A, N, b, x, N); PASTE_CODE_FREE_MATRIX( descB ); free(A); free(b); free(x); } PASTE_CODE_FREE_MATRIX( descA ); free( piv ); return check_res; }
GB_binop__lor_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lor_int64) // A.*B function (eWiseMult): GB (_AemultB_08__lor_int64) // A.*B function (eWiseMult): GB (_AemultB_02__lor_int64) // A.*B function (eWiseMult): GB (_AemultB_04__lor_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_int64) // A*D function (colscale): GB (_AxD__lor_int64) // D*A function (rowscale): GB (_DxB__lor_int64) // C+=B function (dense accum): GB (_Cdense_accumB__lor_int64) // C+=b function (dense accum): GB (_Cdense_accumb__lor_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_int64) // C=scalar+B GB (_bind1st__lor_int64) // C=scalar+B' GB (_bind1st_tran__lor_int64) // C=A+scalar GB (_bind2nd__lor_int64) // C=A'+scalar GB (_bind2nd_tran__lor_int64) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) || (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_INT64 || GxB_NO_LOR_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lor_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lor_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lor_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lor_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lor_int64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lor_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lor_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lor_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lor_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lor_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lor_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lor_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lor_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lor_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 24; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-8,12),ceild(4*t2-Nz-11,24));t3<=min(min(floord(4*Nt+Ny-9,24),floord(2*t1+Ny-3,24)),floord(4*t2+Ny-9,24));t3++) { for (t4=max(max(ceild(t1-1020,1024),ceild(4*t2-Nz-2035,2048)),ceild(24*t3-Ny-2035,2048));t4<=min(min(min(floord(4*Nt+Nx-9,2048),floord(2*t1+Nx-3,2048)),floord(4*t2+Nx-9,2048)),floord(24*t3+Nx+11,2048));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(2048*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) { lbv=max(2048*t4,4*t5+4); ubv=min(2048*t4+2047,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
rt_dttmqr.c
#include "runtime.h" void RT_CORE_dttmqr(Quark *quark, Quark_Task_Flags *task_flags, PLASMA_enum side, PLASMA_enum trans, int m1, int n1, int m2, int n2, int k, int ib, int nb, double *A1, int lda1, double *A2, int lda2, const double *V, int ldv, const double *T, int ldt) { plasma_context_t *plasma; plasma = plasma_context_self(); if (plasma->runtime == PLASMA_QUARK) { QUARK_CORE_dttmqr( quark, task_flags, side, trans, m1, n1, m2, n2, k, ib, nb, A1, lda1, A2, lda2, V, ldv, T, ldt); } else if (plasma->runtime == PLASMA_OMPSS) { double *WORK = malloc(ib*nb*sizeof(double)); int ldwork; #pragma omp target device (smp) copy_deps #pragma omp task inout([nb*nb]A1, [nb*nb]A2) in([nb*nb]V, [ib*nb]T) label (dttmqr) CORE_dttmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, WORK, ldwork); } }
zkboo_prove.c
/* Name: zkboo_prove.c Author: Tan Teik Guan Description: Prove function for ZKBoo for baseline comparison. Modified from MPC_SHA256.c */ /* ============================================================================ Name : MPC_SHA256.c Author : Sobuno Version : 0.1 Description : MPC SHA256 for one block only ============================================================================ */ #include <stdbool.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include "zkboo_shared.h" #include "omp.h" #define CH(e,f,g) ((e & f) ^ ((~e) & g)) int totalRandom = 0; int totalSha = 0; int totalSS = 0; int totalHash = 0; int NUM_ROUNDS = 100; uint32_t rand32() { uint32_t x; x = rand() & 0xff; x |= (rand() & 0xff) << 8; x |= (rand() & 0xff) << 16; x |= (rand() & 0xff) << 24; return x; } void printbits(uint32_t n) { if (n) { printbits(n >> 1); printf("%d", n & 1); } } void mpc_XOR(uint32_t x[3], uint32_t y[3], uint32_t z[3]) { z[0] = x[0] ^ y[0]; z[1] = x[1] ^ y[1]; z[2] = x[2] ^ y[2]; } void mpc_AND(uint32_t x[3], uint32_t y[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t r[3] = { getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)}; *randCount += 4; uint32_t t[3] = { 0 }; t[0] = (x[0] & y[1]) ^ (x[1] & y[0]) ^ (x[0] & y[0]) ^ r[0] ^ r[1]; t[1] = (x[1] & y[2]) ^ (x[2] & y[1]) ^ (x[1] & y[1]) ^ r[1] ^ r[2]; t[2] = (x[2] & y[0]) ^ (x[0] & y[2]) ^ (x[2] & y[2]) ^ r[2] ^ r[0]; z[0] = t[0]; z[1] = t[1]; z[2] = t[2]; views[0].y[*countY] = z[0]; views[1].y[*countY] = z[1]; views[2].y[*countY] = z[2]; (*countY)++; } void mpc_NEGATE(uint32_t x[3], uint32_t z[3]) { z[0] = ~x[0]; z[1] = ~x[1]; z[2] = ~x[2]; } void mpc_ADD(uint32_t x[3], uint32_t y[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t c[3] = { 0 }; uint32_t r[3] = { getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)}; *randCount += 4; uint8_t a[3], b[3]; uint8_t t; for(int i=0;i<31;i++) { a[0]=GETBIT(x[0]^c[0],i); a[1]=GETBIT(x[1]^c[1],i); a[2]=GETBIT(x[2]^c[2],i); b[0]=GETBIT(y[0]^c[0],i); b[1]=GETBIT(y[1]^c[1],i); b[2]=GETBIT(y[2]^c[2],i); t = (a[0]&b[1]) ^ (a[1]&b[0]) ^ GETBIT(r[1],i); SETBIT(c[0],i+1, t ^ (a[0]&b[0]) ^ GETBIT(c[0],i) ^ GETBIT(r[0],i)); t = (a[1]&b[2]) ^ (a[2]&b[1]) ^ GETBIT(r[2],i); SETBIT(c[1],i+1, t ^ (a[1]&b[1]) ^ GETBIT(c[1],i) ^ GETBIT(r[1],i)); t = (a[2]&b[0]) ^ (a[0]&b[2]) ^ GETBIT(r[0],i); SETBIT(c[2],i+1, t ^ (a[2]&b[2]) ^ GETBIT(c[2],i) ^ GETBIT(r[2],i)); } z[0]=x[0]^y[0]^c[0]; z[1]=x[1]^y[1]^c[1]; z[2]=x[2]^y[2]^c[2]; views[0].y[*countY] = c[0]; views[1].y[*countY] = c[1]; views[2].y[*countY] = c[2]; *countY += 1; } void mpc_ADDK(uint32_t x[3], uint32_t y, uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t c[3] = { 0 }; uint32_t r[3] = { getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)}; *randCount += 4; uint8_t a[3], b[3]; uint8_t t; for(int i=0;i<31;i++) { a[0]=GETBIT(x[0]^c[0],i); a[1]=GETBIT(x[1]^c[1],i); a[2]=GETBIT(x[2]^c[2],i); b[0]=GETBIT(y^c[0],i); b[1]=GETBIT(y^c[1],i); b[2]=GETBIT(y^c[2],i); t = (a[0]&b[1]) ^ (a[1]&b[0]) ^ GETBIT(r[1],i); SETBIT(c[0],i+1, t ^ (a[0]&b[0]) ^ GETBIT(c[0],i) ^ GETBIT(r[0],i)); t = (a[1]&b[2]) ^ (a[2]&b[1]) ^ GETBIT(r[2],i); SETBIT(c[1],i+1, t ^ (a[1]&b[1]) ^ GETBIT(c[1],i) ^ GETBIT(r[1],i)); t = (a[2]&b[0]) ^ (a[0]&b[2]) ^ GETBIT(r[0],i); SETBIT(c[2],i+1, t ^ (a[2]&b[2]) ^ GETBIT(c[2],i) ^ GETBIT(r[2],i)); } z[0]=x[0]^y^c[0]; z[1]=x[1]^y^c[1]; z[2]=x[2]^y^c[2]; views[0].y[*countY] = c[0]; views[1].y[*countY] = c[1]; views[2].y[*countY] = c[2]; *countY += 1; } int sha256(unsigned char* result, unsigned char* input, int numBits) { uint32_t hA[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 }; if (numBits > 447) { printf("Input too long, aborting!"); return -1; } int chars = numBits >> 3; unsigned char* chunk = calloc(64, 1); //512 bits memcpy(chunk, input, chars); chunk[chars] = 0x80; //Last 8 chars used for storing length of input without padding, in big-endian. //Since we only care for one block, we are safe with just using last 9 bits and 0'ing the rest //chunk[60] = numBits >> 24; //chunk[61] = numBits >> 16; chunk[62] = numBits >> 8; chunk[63] = numBits; uint32_t w[64]; int i; for (i = 0; i < 16; i++) { w[i] = (chunk[i * 4] << 24) | (chunk[i * 4 + 1] << 16) | (chunk[i * 4 + 2] << 8) | chunk[i * 4 + 3]; } uint32_t s0, s1; for (i = 16; i < 64; i++) { s0 = RIGHTROTATE(w[i - 15], 7) ^ RIGHTROTATE(w[i - 15], 18) ^ (w[i - 15] >> 3); s1 = RIGHTROTATE(w[i - 2], 17) ^ RIGHTROTATE(w[i - 2], 19) ^ (w[i - 2] >> 10); w[i] = w[i - 16] + s0 + w[i - 7] + s1; } uint32_t a, b, c, d, e, f, g, h, temp1, temp2, maj; a = hA[0]; b = hA[1]; c = hA[2]; d = hA[3]; e = hA[4]; f = hA[5]; g = hA[6]; h = hA[7]; for (i = 0; i < 64; i++) { s1 = RIGHTROTATE(e,6) ^ RIGHTROTATE(e, 11) ^ RIGHTROTATE(e, 25); temp1 = h + s1 + CH(e, f, g) + k[i] + w[i]; s0 = RIGHTROTATE(a,2) ^ RIGHTROTATE(a, 13) ^ RIGHTROTATE(a, 22); maj = (a & (b ^ c)) ^ (b & c); temp2 = s0 + maj; h = g; g = f; f = e; e = d + temp1; d = c; c = b; b = a; a = temp1 + temp2; } hA[0] += a; hA[1] += b; hA[2] += c; hA[3] += d; hA[4] += e; hA[5] += f; hA[6] += g; hA[7] += h; for (i = 0; i < 8; i++) { result[i * 4] = (hA[i] >> 24); result[i * 4 + 1] = (hA[i] >> 16); result[i * 4 + 2] = (hA[i] >> 8); result[i * 4 + 3] = hA[i]; } return 0; } void mpc_RIGHTROTATE(uint32_t x[], int i, uint32_t z[]) { z[0] = RIGHTROTATE(x[0], i); z[1] = RIGHTROTATE(x[1], i); z[2] = RIGHTROTATE(x[2], i); } void mpc_RIGHTSHIFT(uint32_t x[3], int i, uint32_t z[3]) { z[0] = x[0] >> i; z[1] = x[1] >> i; z[2] = x[2] >> i; } void mpc_MAJ(uint32_t a[], uint32_t b[3], uint32_t c[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t t0[3]; uint32_t t1[3]; mpc_XOR(a, b, t0); mpc_XOR(a, c, t1); mpc_AND(t0, t1, z, randomness, randCount, views, countY); mpc_XOR(z, a, z); } void mpc_CH(uint32_t e[], uint32_t f[3], uint32_t g[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t t0[3]; //e & (f^g) ^ g mpc_XOR(f,g,t0); mpc_AND(e,t0,t0, randomness, randCount, views, countY); mpc_XOR(t0,g,z); } int mpc_sha256(unsigned char* results[3], unsigned char* inputs[3], unsigned char * addMsg, int numBits, unsigned char *randomness[3], int* randCount, View views[3], int* countY) { if (numBits > 447) { printf("Input too long, aborting!"); return -1; } int chars = numBits >> 3; unsigned char* chunks[3]; uint32_t w[64][3]; uint32_t msg[MSG_SIZE/4]; if (addMsg) { for (int j=0;j<(numBits/32);j++) { msg[j] = (addMsg[j*4]<<24) | (addMsg[j*4+1]<<16) | (addMsg[j*4+2] << 8) | (addMsg[j*4+3]); } } for (int i =0; i<64;i++) { w[i][0]=w[i][1]=w[i][2] = 0; } for (int i = 0; i < 3; i++) { chunks[i] = calloc(64, 1); //512 bits memcpy(chunks[i], inputs[i], chars); chunks[i][chars] = 0x80; //Last 8 chars used for storing length of input without padding, in big-endian. //Since we only care for one block, we are safe with just using last 9 bits and 0'ing the rest //chunk[60] = numBits >> 24; //chunk[61] = numBits >> 16; chunks[i][62] = numBits >> 8; chunks[i][63] = numBits; memcpy(views[i].x, chunks[i], 64); for (int j = 0; j < 16; j++) { w[j][i] = (chunks[i][j * 4] << 24) | (chunks[i][j * 4 + 1] << 16) | (chunks[i][j * 4 + 2] << 8) | chunks[i][j * 4 + 3]; } free(chunks[i]); } if (addMsg) { for (int j=0;j<(MSG_SIZE/4);j++) { mpc_ADDK(w[j], msg[j], w[j], randomness, randCount, views, countY); } } uint32_t s0[3], s1[3]; uint32_t t0[3], t1[3]; for (int j = 16; j < 64; j++) { //s0[i] = RIGHTROTATE(w[i][j-15],7) ^ RIGHTROTATE(w[i][j-15],18) ^ (w[i][j-15] >> 3); mpc_RIGHTROTATE(w[j-15], 7, t0); mpc_RIGHTROTATE(w[j-15], 18, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTSHIFT(w[j-15], 3, t1); mpc_XOR(t0, t1, s0); //s1[i] = RIGHTROTATE(w[i][j-2],17) ^ RIGHTROTATE(w[i][j-2],19) ^ (w[i][j-2] >> 10); mpc_RIGHTROTATE(w[j-2], 17, t0); mpc_RIGHTROTATE(w[j-2], 19, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTSHIFT(w[j-2], 10, t1); mpc_XOR(t0, t1, s1); //w[i][j] = w[i][j-16]+s0[i]+w[i][j-7]+s1[i]; mpc_ADD(w[j-16], s0, t1, randomness, randCount, views, countY); mpc_ADD(w[j-7], t1, t1, randomness, randCount, views, countY); mpc_ADD(t1, s1, w[j], randomness, randCount, views, countY); } uint32_t a[3] = { hA[0],hA[0],hA[0] }; uint32_t b[3] = { hA[1],hA[1],hA[1] }; uint32_t c[3] = { hA[2],hA[2],hA[2] }; uint32_t d[3] = { hA[3],hA[3],hA[3] }; uint32_t e[3] = { hA[4],hA[4],hA[4] }; uint32_t f[3] = { hA[5],hA[5],hA[5] }; uint32_t g[3] = { hA[6],hA[6],hA[6] }; uint32_t h[3] = { hA[7],hA[7],hA[7] }; uint32_t temp1[3], temp2[3], maj[3]; for (int i = 0; i < 64; i++) { //s1 = RIGHTROTATE(e,6) ^ RIGHTROTATE(e,11) ^ RIGHTROTATE(e,25); mpc_RIGHTROTATE(e, 6, t0); mpc_RIGHTROTATE(e, 11, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTROTATE(e, 25, t1); mpc_XOR(t0, t1, s1); //ch = (e & f) ^ ((~e) & g); //temp1 = h + s1 + CH(e,f,g) + k[i]+w[i]; //t0 = h + s1 mpc_ADD(h, s1, t0, randomness, randCount, views, countY); mpc_CH(e, f, g, t1, randomness, randCount, views, countY); //t1 = t0 + t1 (h+s1+ch) mpc_ADD(t0, t1, t1, randomness, randCount, views, countY); mpc_ADDK(t1, k[i], t1, randomness, randCount, views, countY); mpc_ADD(t1, w[i], temp1, randomness, randCount, views, countY); //s0 = RIGHTROTATE(a,2) ^ RIGHTROTATE(a,13) ^ RIGHTROTATE(a,22); mpc_RIGHTROTATE(a, 2, t0); mpc_RIGHTROTATE(a, 13, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTROTATE(a, 22, t1); mpc_XOR(t0, t1, s0); mpc_MAJ(a, b, c, maj, randomness, randCount, views, countY); //temp2 = s0+maj; mpc_ADD(s0, maj, temp2, randomness, randCount, views, countY); memcpy(h, g, sizeof(uint32_t) * 3); memcpy(g, f, sizeof(uint32_t) * 3); memcpy(f, e, sizeof(uint32_t) * 3); //e = d+temp1; mpc_ADD(d, temp1, e, randomness, randCount, views, countY); memcpy(d, c, sizeof(uint32_t) * 3); memcpy(c, b, sizeof(uint32_t) * 3); memcpy(b, a, sizeof(uint32_t) * 3); //a = temp1+temp2; mpc_ADD(temp1, temp2, a, randomness, randCount, views, countY); } uint32_t hHa[8][3] = { { hA[0],hA[0],hA[0] }, { hA[1],hA[1],hA[1] }, { hA[2],hA[2],hA[2] }, { hA[3],hA[3],hA[3] }, { hA[4],hA[4],hA[4] }, { hA[5],hA[5],hA[5] }, { hA[6],hA[6],hA[6] }, { hA[7],hA[7],hA[7] } }; mpc_ADD(hHa[0], a, hHa[0], randomness, randCount, views, countY); mpc_ADD(hHa[1], b, hHa[1], randomness, randCount, views, countY); mpc_ADD(hHa[2], c, hHa[2], randomness, randCount, views, countY); mpc_ADD(hHa[3], d, hHa[3], randomness, randCount, views, countY); mpc_ADD(hHa[4], e, hHa[4], randomness, randCount, views, countY); mpc_ADD(hHa[5], f, hHa[5], randomness, randCount, views, countY); mpc_ADD(hHa[6], g, hHa[6], randomness, randCount, views, countY); mpc_ADD(hHa[7], h, hHa[7], randomness, randCount, views, countY); for (int i = 0; i < 8; i++) { mpc_RIGHTSHIFT(hHa[i], 24, t0); results[0][i * 4] = t0[0]; results[1][i * 4] = t0[1]; results[2][i * 4] = t0[2]; mpc_RIGHTSHIFT(hHa[i], 16, t0); results[0][i * 4 + 1] = t0[0]; results[1][i * 4 + 1] = t0[1]; results[2][i * 4 + 1] = t0[2]; mpc_RIGHTSHIFT(hHa[i], 8, t0); results[0][i * 4 + 2] = t0[0]; results[1][i * 4 + 2] = t0[1]; results[2][i * 4 + 2] = t0[2]; results[0][i * 4 + 3] = hHa[i][0]; results[1][i * 4 + 3] = hHa[i][1]; results[2][i * 4 + 3] = hHa[i][2]; } return 0; } int writeToFile(char filename[], void* data, int size, int numItems) { FILE *file; file = fopen(filename, "wb"); if (!file) { printf("Unable to open file!"); return 1; } fwrite(data, size, numItems, file); fclose(file); return 0; } int secretShare(unsigned char* input, int numBytes, unsigned char output[3][numBytes]) { if(RAND_bytes(output[0], numBytes) != 1) { printf("RAND_bytes failed crypto, aborting\n"); } if(RAND_bytes(output[1], numBytes) != 1) { printf("RAND_bytes failed crypto, aborting\n"); } for (int j = 0; j < numBytes; j++) { output[2][j] = input[j] ^ output[0][j] ^ output[1][j]; } return 0; } a commit(int numBytes, unsigned char shares[3][numBytes], unsigned char *randomness[3], unsigned char rs[3][4], View views[3]) { unsigned char* inputs[3]; inputs[0] = shares[0]; inputs[1] = shares[1]; inputs[2] = shares[2]; unsigned char* hashes[3]; hashes[0] = malloc(32); hashes[1] = malloc(32); hashes[2] = malloc(32); int* randCount = calloc(1, sizeof(int)); int* countY = calloc(1, sizeof(int)); *countY = 0; mpc_sha256(hashes, inputs, NULL, numBytes * 8, randomness, randCount, views, countY); //Explicitly add y to view free(randCount); for(int i = 0; i<8; i++) { views[0].y[*countY] = (hashes[0][i * 4] << 24) | (hashes[0][i * 4 + 1] << 16) | (hashes[0][i * 4 + 2] << 8) | hashes[0][i * 4 + 3]; views[1].y[*countY] = (hashes[1][i * 4] << 24) | (hashes[1][i * 4 + 1] << 16) | (hashes[1][i * 4 + 2] << 8) | hashes[1][i * 4 + 3]; views[2].y[*countY] = (hashes[2][i * 4] << 24) | (hashes[2][i * 4 + 1] << 16) | (hashes[2][i * 4 + 2] << 8) | hashes[2][i * 4 + 3]; *countY += 1; } free(countY); free(hashes[0]); free(hashes[1]); free(hashes[2]); uint32_t* result1 = malloc(32); output(views[0], result1); uint32_t* result2 = malloc(32); output(views[1], result2); uint32_t* result3 = malloc(32); output(views[2], result3); a a; memcpy(a.yp[0], result1, 32); memcpy(a.yp[1], result2, 32); memcpy(a.yp[2], result3, 32); free(result1); free(result2); free(result3); return a; } z prove(int e, unsigned char keys[3][16], unsigned char rs[3][4], View views[3]) { z z; memcpy(z.ke, keys[e], 16); memcpy(z.ke1, keys[(e + 1) % 3], 16); z.ve = views[e]; z.ve1 = views[(e + 1) % 3]; memcpy(z.re, rs[e],4); memcpy(z.re1, rs[(e + 1) % 3],4); return z; } int main(int argc, char * argv[]) { setbuf(stdout, NULL); srand((unsigned) time(NULL)); init_EVP(); openmp_thread_setup(); char CHALLENGE[MSG_SIZE+1]; //55 is max length as we only support 447 bits = 55.875 bytes // if (argc != 3) { printf("Usage: %s <number of rounds (e.g. 20, 40, 60, 80, 100)> <challenge (Max %d char)>\n",argv[0],MSG_SIZE); return -1; } NUM_ROUNDS = atoi(argv[1]); unsigned char garbage[4]; if(RAND_bytes(garbage, 4) != 1) { printf("RAND_bytes failed crypto, aborting\n"); return 0; } memset(CHALLENGE,0,MSG_SIZE+1); strncpy(CHALLENGE,argv[2],MSG_SIZE); int i = strlen(CHALLENGE); printf("Challenge length: %d\n", i); printf("Iterations of SHA: %d\n", NUM_ROUNDS); unsigned char input[MSG_SIZE]; memset(input,0,sizeof(input)); for(int j = 0; j<i; j++) { input[j] = CHALLENGE[j]; } struct timeval begin, delta; gettimeofday(&begin,NULL); unsigned char rs[NUM_ROUNDS][3][4]; unsigned char keys[NUM_ROUNDS][3][16]; a as[NUM_ROUNDS]; View localViews[NUM_ROUNDS][3]; int totalCrypto = 0; z* zs; for(int loops=0;loops<100;loops++) { //Generating keys if(RAND_bytes((unsigned char *) keys, NUM_ROUNDS*3*16) != 1) { printf("RAND_bytes failed crypto, aborting\n"); return 0; } if(RAND_bytes((unsigned char *)rs, NUM_ROUNDS*3*4) != 1) { printf("RAND_bytes failed crypto, aborting\n"); return 0; } //Sharing secrets unsigned char shares[NUM_ROUNDS][3][i]; if(RAND_bytes((unsigned char *)shares, NUM_ROUNDS*3*i) != 1) { printf("RAND_bytes failed crypto, aborting\n"); return 0; } #pragma omp parallel for for(int k=0; k<NUM_ROUNDS; k++) { for (int j = 0; j < i; j++) { shares[k][2][j] = input[j] ^ shares[k][0][j] ^ shares[k][1][j]; } } //Generating randomness unsigned char *randomness[NUM_ROUNDS][3]; #pragma omp parallel for for(int k=0; k<(NUM_ROUNDS); k++) { for(int j = 0; j<3; j++) { randomness[k][j] = malloc(2912*sizeof(unsigned char)); getAllRandomness(keys[k][j], randomness[k][j]); } } //Running MPC-SHA2 #pragma omp parallel for for(int k=0; k<NUM_ROUNDS; k++) { as[k] = commit(i, shares[k], randomness[k], rs[k], localViews[k]); for(int j=0; j<3; j++) { free(randomness[k][j]); } } //Committing #pragma omp parallel for for(int k=0; k<(NUM_ROUNDS); k++) { unsigned char hash1[SHA256_DIGEST_LENGTH]; memset(hash1,0,sizeof(hash1)); H(keys[k][0], localViews[k][0], rs[k][0], hash1); memcpy(as[k].h[0], &hash1, 32); H(keys[k][1], localViews[k][1], rs[k][1], hash1); memcpy(as[k].h[1], &hash1, 32); H(keys[k][2], localViews[k][2], rs[k][2], hash1); memcpy(as[k].h[2], &hash1, 32); } //Generating E int es[NUM_ROUNDS]; uint32_t finalHash[8]; for (int j = 0; j < 8; j++) { finalHash[j] = as[0].yp[0][j]^as[0].yp[1][j]^as[0].yp[2][j]; } printf("output H(Challenge) = "); for (int i = 0; i< 8;i++) { printf("%02X",finalHash[i]); } printf("\n"); H3(finalHash, as, NUM_ROUNDS, es); //Packing Z zs = malloc(sizeof(z)*NUM_ROUNDS); #pragma omp parallel for for(int i = 0; i<(NUM_ROUNDS); i++) { zs[i] = prove(es[i],keys[i],rs[i], localViews[i]); } } gettimeofday(&delta,NULL); unsigned long inMilli = (delta.tv_sec - begin.tv_sec)*1000000 + (delta.tv_usec - begin.tv_usec); inMilli /= 1000; //Writing to file FILE *file; char outputFile[3*sizeof(int) + 8]; sprintf(outputFile, "out%i.bin", NUM_ROUNDS); file = fopen(outputFile, "wb"); if (!file) { printf("Unable to open file!"); return 1; } fwrite(as, sizeof(a), NUM_ROUNDS, file); fwrite(zs, sizeof(z), NUM_ROUNDS, file); fclose(file); free(zs); printf("Total time taken for 100 loops: %d mili-seconds\n",inMilli); printf("Time taken for 1 loops: %d mili-seconds\n",inMilli/100); printf("\n"); printf("Proof output to file %s", outputFile); openmp_thread_cleanup(); cleanup_EVP(); return EXIT_SUCCESS; }
dnn.c
//------------------------------------------------------------------------------ // LAGraph/Test/DNN/dnn: run all neural networks from http://graphchallenge.org //------------------------------------------------------------------------------ /* LAGraph: graph algorithms based on GraphBLAS Copyright 2019 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact permission@sei.cmu.edu for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ //------------------------------------------------------------------------------ // LAGraph/Test/DNN/dnn: test for LAGraph_dnn. Contributed by Tim Davis, // Texas A&M University. // Usage: ./build/dnn nproblems // nproblems is the # of test problems to solve. If not present, it defaults // to 12 (run all 12 DNN's). The problems are solved in order from small to // big. The Makefile just runs the first and smallest problem. // NOTE: this test currently uses many GxB_* extensions in // SuiteSparse:GraphBLAS. It optionally uses OpenMP. #define LAGRAPH_EXPERIMENTAL_ASK_BEFORE_BENCHMARKING #include <LAGraph.h> #define LAGRAPH_FREE_ALL ; int main (int argc, char **argv) { //-------------------------------------------------------------------------- // start LAGraph and GraphBLAS //-------------------------------------------------------------------------- GrB_Info info ; LAGRAPH_OK (LAGraph_init ( )) ; //-------------------------------------------------------------------------- // problem size definitions //-------------------------------------------------------------------------- // The 12 problems and their sizes are hard-coded below. // It would be better to define these from the input files, but the problem // data files are not formatted in a way that makes this easy to do. A // Matrix Market file format would be better (which can specify the type // and size of each matrix), with the additional of a problem specification // file that defines each of the 12 problems to solve. // Each problem is defined by a set of files in the DNN_DATA directory, // which can be obtained from http://graphchallenge.org . The simplest way // to redefine the location of the data files is to make ./dnn_data a // symbolic link, and leave DNN_DATA unchanged. The .gitignore file will // prevent dnn_data from syncing to github, so you could also simply change // ./dnn_data to a true directory and place all files there. Or, change // the DNN_DATA macro to point to your data files. #define DNN_DATA "./dnn_data" // Each of the 12 problems is defined by the # of neurons at each layer, N // = (1024, 4096, 16384, 65536), and the # of layers, L = (120, 480, or // 1920). Each problem has the same number of features (F = 60000). The // input files for a given problem (N,L) are as follows: // Input feature vectors: an F-by-N sparse matrix // ./dnn_data/MNIST/sparse-images-(N).tsv // Neural network layers, for i = 1 to L, each an N-by-N sparse matrix: // ./dnn_data/DNN/neuron(N)/n(N)-l(i).tsv // True categories, a list of integers, one per line: // ./dnn_data/DNN/neuron(N)-l(L)-categories.tsv // The Bias vectors are defined with the single scalar, neuralNetBias[ ], // with one scalar for each value of N. This scalar is used to construct // the diagonal Bias matrices for each layer. All the layers share the // same matrix, but they are treated as different matrices here. In a more // general problem, the Bias matrices would differ for each layer and // perhaps for each neuron. As a result, this test is not permitted to // exploit the fact that all neurons are biased the same way. // Note that for a given number of neurons, N, each of the 3 problems for // different layers shares the same weight matrices for the first layers. // That is, the first 120 layers of the (1024,480) problem are the same as // the 120 layers of the (1024,120) problem. This is not exploited in // LAGraph_dnn, but it is exploited here, simply to reduce the time to load // the problems. int len = 1024 ; char filename [len] ; #define NMAXLAYERS 3 int maxLayers [NMAXLAYERS] = { 120, 480, 1920 } ; // #define NMAXNEURONS 1 // int Nneurons [NMAXNEURONS] = { 65536 } ; // double neuralNetBias [NMAXNEURONS] = { -0.45 } ; #define NMAXNEURONS 4 int Nneurons [NMAXNEURONS] = { 1024, 4096, 16384, 65536 } ; double neuralNetBias [NMAXNEURONS] = { -0.3, -0.35, -0.4, -0.45 } ; int nfeatures = 60000 ; GrB_Matrix Y0 = NULL, Y = NULL, W [65536], Bias [65536] ; GrB_Vector TrueCategories = NULL, Categories = NULL, C = NULL ; for (int layer = 0 ; layer < 65536 ; layer++) { W [layer] = NULL ; Bias [layer] = NULL ; } #undef LAGRAPH_FREE_ALL #define LAGRAPH_FREE_ALL \ { \ GrB_free (&TrueCategories) ; \ GrB_free (&Categories) ; \ GrB_free (&C) ; \ GrB_free (&Y) ; \ GrB_free (&Y0) ; \ for (int layer = 0 ; layer < 65536 ; layer++) \ { \ GrB_free (& (W [layer])) ; \ GrB_free (& (Bias [layer])) ; \ } \ } // select the type. GrB_FP32 is faster and passes all the tests. // GrB_Type type = GrB_FP64 ; GrB_Type type = GrB_FP32 ; printf ("type: ") ; if (type == GrB_FP64) printf ("double\n") ; else printf ("float\n") ; // get the max # of threads that can be used int nthreads_max ; LAGRAPH_OK (GxB_get (GxB_NTHREADS, &nthreads_max)) ; printf ("max # of nthreads: %d\n", nthreads_max) ; #define NNTHREADS 12 int nthreads_list [NNTHREADS] = { 1, 2, 4, 8, 16, 20, 32, 40, 64, 128, 160, 256 } ; // #define NNTHREADS 1 // int nthreads_list [NNTHREADS] = { 40 } ; // determine the # of problems to solve int nproblems = NMAXNEURONS * NMAXLAYERS ; if (argc > 1) { sscanf (argv [1], "%d", &nproblems) ; } printf ("# of problems to solve: %d\n", nproblems) ; int problem = 0 ; //-------------------------------------------------------------------------- // run all problems //-------------------------------------------------------------------------- for (int kn = 0 ; kn < NMAXNEURONS ; kn++) { //---------------------------------------------------------------------- // check if this problem is to be solved //---------------------------------------------------------------------- if (problem > nproblems) continue ; //---------------------------------------------------------------------- // get the number of nneurons and neural bias //---------------------------------------------------------------------- double tic [2] ; LAGraph_tic (tic) ; int nneurons = Nneurons [kn] ; double b = neuralNetBias [kn] ; printf ("\n# neurons: %d bias: %g\n", nneurons, b) ; //---------------------------------------------------------------------- // read in the initial feature vectors //---------------------------------------------------------------------- sprintf (filename, "%s/MNIST/sparse-images-%d.tsv", DNN_DATA, nneurons); FILE *f = fopen (filename, "r") ; if (!f) { printf ("cannot open %s\n", filename) ; abort ( ) ; } LAGRAPH_OK (LAGraph_tsvread (&Y0, f, type, nfeatures, nneurons)) ; fclose (f) ; double t = LAGraph_toc (tic) ; printf ("# features: %" PRIu64 " read time: %g sec\n", nfeatures, t) ; GrB_Index nvals ; LAGRAPH_OK (GrB_Matrix_nvals (&nvals, Y0)) ; printf ("# entries in Y0: %g million\n", (double) nvals / 1e6) ; fflush (stdout) ; //---------------------------------------------------------------------- // run each problem size (for all #'s of layers) //---------------------------------------------------------------------- for (int kl = 0 ; kl < NMAXLAYERS ; kl++) { //------------------------------------------------------------------ // check if this problem is to be solved //------------------------------------------------------------------ problem++ ; if (problem > nproblems) continue ; //------------------------------------------------------------------ // get the number of layers in this neural net //------------------------------------------------------------------ int nlayers = maxLayers [kl] ; printf ("\n--------------------------------------" "neurons per layer: %d layers: %d\n", nneurons, nlayers) ; //------------------------------------------------------------------ // read in the layers in parallel //------------------------------------------------------------------ LAGraph_tic (tic) ; int first_layer = (kl == 0) ? 0 : maxLayers [kl-1] ; bool ok = true ; // assume the I/O system can handle 2-way parallelism #pragma omp parallel for schedule(dynamic,1) reduction(&&:ok) \ num_threads (2) for (int layer = first_layer ; layer < nlayers ; layer++) { // read the neuron layer: W [layer] char my_filename [1024] ; sprintf (my_filename, "%s/DNN/neuron%d/n%d-l%d.tsv", DNN_DATA, nneurons, nneurons, layer+1) ; FILE *my_file = fopen (my_filename, "r") ; bool my_ok = true ; if (!my_file) { printf ("cannot open %s\n", my_filename) ; my_ok = false ; continue ; } GrB_Info my_info = LAGraph_tsvread (&(W [layer]), my_file, type, nneurons, nneurons) ; fclose (my_file) ; my_ok = my_ok && (my_info == GrB_SUCCESS) ; // construct the bias matrix: Bias [layer]. Note that all Bias // matrices are the same for all layers, and all diagonal // entries are also the same, but this test must not exploit // that fact. my_info = GrB_Matrix_new (&(Bias [layer]), type, nneurons, nneurons) ; my_ok = my_ok && (my_info == GrB_SUCCESS) ; for (int i = 0 ; i < nneurons ; i++) { my_info = GrB_Matrix_setElement (Bias [layer], b, i, i) ; my_ok = my_ok && (my_info == GrB_SUCCESS) ; } GrB_Index ignore ; my_info = GrB_Matrix_nvals (&ignore, Bias [layer]) ; my_ok = my_ok && (my_info == GrB_SUCCESS) ; ok = ok && my_ok ; } if (!ok) { printf ("neural read failure\n") ; abort ( ) ; } t = LAGraph_toc (tic) ; printf ("read net time %g sec\n", t) ; double nedges = 0 ; for (int layer = 0 ; layer < nlayers ; layer++) { GrB_Index nvals ; LAGRAPH_OK (GrB_Matrix_nvals (&nvals, W [layer])) ; nedges += nvals ; } printf ("# edges in all layers: %g million\n\n", (double) nedges / 1e6) ; fflush (stdout) ; // read TrueCategories as a boolean nfeatures-by-1 vector LAGRAPH_OK (GrB_Vector_new (&TrueCategories, GrB_BOOL, nfeatures)) ; sprintf (filename, "%s/DNN/neuron%d-l%d-categories.tsv", DNN_DATA, nneurons, nlayers) ; f = fopen (filename, "r") ; bool check_result = (f != NULL) ; if (check_result) { while (1) { int category ; if (fscanf (f, "%d\n", &category) == EOF) break ; LAGRAPH_OK (GrB_Vector_setElement (TrueCategories, (bool) true, category-1)) ; } fclose (f) ; } else { printf ("cannot open %s\n", filename) ; } //------------------------------------------------------------------ // solve the problem with 1, 2, 4, ..., nthreads_max threads //------------------------------------------------------------------ double t1 = 0, tcheck = 0 ; GrB_Index final_ynvals ; for (int kth = 0 ; kth < NNTHREADS ; kth++) { //-------------------------------------------------------------- // set the # of threads to use //-------------------------------------------------------------- int nthreads = nthreads_list [kth] ; if (nthreads > nthreads_max) break ; LAGRAPH_OK (GxB_set (GxB_NTHREADS, nthreads)) ; printf ("nthreads %2d: ", nthreads) ; fflush (stdout) ; //-------------------------------------------------------------- // solve the problem //-------------------------------------------------------------- LAGraph_tic (tic) ; LAGRAPH_OK (LAGraph_dnn (&Y, W, Bias, nlayers, Y0)) ; t = LAGraph_toc (tic) ; printf ("solution time %12.2f sec", t) ; if (nthreads == 1) { t1 = t ; } else { printf (" speedup %8.2f", t1/t) ; } //-------------------------------------------------------------- // check the result //-------------------------------------------------------------- // this is so fast, it's hardly worth timing ... LAGraph_tic (tic) ; LAGRAPH_OK (GrB_Matrix_nvals (&final_ynvals, Y)) ; // C = sum (Y) LAGRAPH_OK (GrB_Vector_new (&C, type, nfeatures)) ; LAGRAPH_OK (GrB_reduce (C, NULL, NULL, GrB_PLUS_FP64, Y, NULL)); // Categories = pattern of C LAGRAPH_OK (GrB_Vector_new (&Categories, GrB_BOOL, nfeatures)) ; LAGRAPH_OK (GrB_apply (Categories, NULL, NULL, GxB_ONE_BOOL, C, NULL)) ; // write out Categories, as a 1-based file sprintf (filename, "my_neuron%d-l%d-categories_threads%d.tsv", nneurons, nlayers, nthreads) ; FILE *ff = fopen (filename, "w") ; for (int i = 0 ; i < nfeatures ; i++) { bool c = false ; LAGRAPH_OK (GrB_Vector_extractElement (&c, Categories, i)) ; if (c) fprintf (ff, "%d\n", i + 1) ; } fclose (ff) ; if (check_result) { // check if Categories and TrueCategories are the same bool isequal ; LAGRAPH_OK (LAGraph_Vector_isequal (&isequal, TrueCategories, Categories, NULL)) ; if (!isequal) { // GxB_print (TrueCategories, 3) ; // GxB_print (Categories, 3) ; printf ("test failure!\n") ; // LAGRAPH_FREE_ALL ; // abort ( ) ; } else { printf (" test passed") ; } } printf ("\n") ; GrB_free (&Categories) ; GrB_free (&C) ; GrB_free (&Y) ; tcheck = LAGraph_toc (tic) ; } printf ("\n# entries in final Y: %g million\n", (double) final_ynvals / 1e6) ; printf ("check time: %g sec\n", tcheck) ; LAGRAPH_OK (GxB_set (GxB_NTHREADS, nthreads_max)) ; } //---------------------------------------------------------------------- // free the problem //---------------------------------------------------------------------- LAGRAPH_FREE_ALL ; } //-------------------------------------------------------------------------- // finalize LAGraph and GraphBLAS //-------------------------------------------------------------------------- LAGRAPH_OK (LAGraph_finalize ( )) ; printf ("all tests passed\n") ; return (GrB_SUCCESS) ; }
GB_unop__isfinite_bool_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__isfinite_bool_fp32) // op(A') function: GB (_unop_tran__isfinite_bool_fp32) // C type: bool // A type: float // cast: float cij = (aij) // unaryop: cij = isfinite (aij) #define GB_ATYPE \ float #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = isfinite (x) ; // casting #define GB_CAST(z, aij) \ float z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (aij) ; \ Cx [pC] = isfinite (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__isfinite_bool_fp32) ( bool *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = (aij) ; Cx [p] = isfinite (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = (aij) ; Cx [p] = isfinite (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__isfinite_bool_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_fc32_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fc32_uint64 // op(A') function: GB_unop_tran__identity_fc32_uint64 // C type: GxB_FC32_t // A type: uint64_t // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fc32_uint64 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fc32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Example_lock_owner.1.c
/* * @@name: lock_owner.1c * @@type: C * @@compilable: yes * @@linkable: yes * @@expect: success */ #include <stdlib.h> #include <stdio.h> #include <omp.h> int main() { int x; omp_lock_t lck; omp_init_lock (&lck); omp_set_lock (&lck); x = 0; #pragma omp parallel shared (x) { #pragma omp master { x = x + 1; omp_unset_lock (&lck); } /* Some more stuff. */ } omp_destroy_lock (&lck); return 0; }
fourier.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF OOO U U RRRR IIIII EEEEE RRRR % % F O O U U R R I E R R % % FFF O O U U RRRR I EEE RRRR % % F O O U U R R I E R R % % F OOO UUU R R IIIII EEEEE R R % % % % % % MagickCore Discrete Fourier Transform Methods % % % % Software Design % % Sean Burke % % Fred Weinhaus % % John Cristy % % July 2009 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/fourier.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/property.h" #include "magick/quantum-private.h" #include "magick/thread-private.h" #if defined(MAGICKCORE_FFTW_DELEGATE) #if defined(MAGICKCORE_HAVE_COMPLEX_H) #include <complex.h> #endif #include <fftw3.h> #if !defined(MAGICKCORE_HAVE_CABS) #define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1])) #endif #if !defined(MAGICKCORE_HAVE_CARG) #define carg(z) (atan2(cimag(z),creal(z))) #endif #if !defined(MAGICKCORE_HAVE_CIMAG) #define cimag(z) (z[1]) #endif #if !defined(MAGICKCORE_HAVE_CREAL) #define creal(z) (z[0]) #endif #endif /* Typedef declarations. */ typedef struct _FourierInfo { ChannelType channel; MagickBooleanType modulus; size_t width, height; ssize_t center; } FourierInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F o r w a r d F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ForwardFourierTransformImage() implements the discrete Fourier transform % (DFT) of the image either as a magnitude / phase or real / imaginary image % pair. % % The format of the ForwadFourierTransformImage method is: % % Image *ForwardFourierTransformImage(const Image *image, % const MagickBooleanType modulus,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o modulus: if true, return as transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType RollFourier(const size_t width,const size_t height, const ssize_t x_offset,const ssize_t y_offset,double *fourier) { double *roll; register ssize_t i, x; ssize_t u, v, y; /* Move zero frequency (DC, average color) from (0,0) to (width/2,height/2). */ roll=(double *) AcquireQuantumMemory((size_t) height,width*sizeof(*roll)); if (roll == (double *) NULL) return(MagickFalse); i=0L; for (y=0L; y < (ssize_t) height; y++) { if (y_offset < 0L) v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset; else v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height : y+y_offset; for (x=0L; x < (ssize_t) width; x++) { if (x_offset < 0L) u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset; else u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width : x+x_offset; roll[v*width+u]=fourier[i++]; } } (void) CopyMagickMemory(fourier,roll,height*width*sizeof(*roll)); roll=(double *) RelinquishMagickMemory(roll); return(MagickTrue); } static MagickBooleanType ForwardQuadrantSwap(const size_t width, const size_t height,double *source,double *destination) { MagickBooleanType status; register ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) floor((double) width/2L)+1L; status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L,source); if (status == MagickFalse) return(MagickFalse); for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L-1L); x++) destination[width*y+x+width/2L]=source[center*y+x]; for (y=1; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L-1L); x++) destination[width*(height-y)+width/2L-x-1L]=source[center*y+x+1L]; for (x=0L; x < (ssize_t) (width/2L); x++) destination[-x+width/2L-1L]=destination[x+width/2L+1L]; return(MagickTrue); } static void CorrectPhaseLHS(const size_t width,const size_t height, double *fourier) { register ssize_t x; ssize_t y; for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) fourier[y*width+x]*=(-1.0); } static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info, Image *image,double *magnitude,double *phase,ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *magnitude_source, *phase_source; Image *magnitude_image, *phase_image; MagickBooleanType status; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; ssize_t i, y; magnitude_image=GetFirstImageInList(image); phase_image=GetNextImageInList(image); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",image->filename); return(MagickFalse); } /* Create "Fourier Transform" image from constituent arrays. */ magnitude_source=(double *) AcquireQuantumMemory((size_t) fourier_info->height,fourier_info->width*sizeof(*magnitude_source)); if (magnitude_source == (double *) NULL) return(MagickFalse); (void) ResetMagickMemory(magnitude_source,0,fourier_info->height* fourier_info->width*sizeof(*magnitude_source)); phase_source=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*phase_source)); if (phase_source == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); magnitude_source=(double *) RelinquishMagickMemory(magnitude_source); return(MagickFalse); } status=ForwardQuadrantSwap(fourier_info->height,fourier_info->height, magnitude,magnitude_source); if (status != MagickFalse) status=ForwardQuadrantSwap(fourier_info->height,fourier_info->height,phase, phase_source); CorrectPhaseLHS(fourier_info->height,fourier_info->height,phase_source); if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_source[i]/=(2.0*MagickPI); phase_source[i]+=0.5; i++; } } magnitude_view=AcquireCacheView(magnitude_image); phase_view=AcquireCacheView(phase_image); i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->height,1UL, exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(magnitude_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q,ClampToQuantum(QuantumRange* magnitude_source[i])); break; } case GreenChannel: { SetPixelGreen(q,ClampToQuantum(QuantumRange* magnitude_source[i])); break; } case BlueChannel: { SetPixelBlue(q,ClampToQuantum(QuantumRange* magnitude_source[i])); break; } case OpacityChannel: { SetPixelOpacity(q,ClampToQuantum(QuantumRange* magnitude_source[i])); break; } case IndexChannel: { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange* magnitude_source[i])); break; } case GrayChannels: { SetPixelGray(q,ClampToQuantum(QuantumRange* magnitude_source[i])); break; } } i++; q++; } status=SyncCacheViewAuthenticPixels(magnitude_view,exception); if (status == MagickFalse) break; } i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->height,1UL, exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(phase_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q,ClampToQuantum(QuantumRange* phase_source[i])); break; } case GreenChannel: { SetPixelGreen(q,ClampToQuantum(QuantumRange* phase_source[i])); break; } case BlueChannel: { SetPixelBlue(q,ClampToQuantum(QuantumRange* phase_source[i])); break; } case OpacityChannel: { SetPixelOpacity(q,ClampToQuantum(QuantumRange* phase_source[i])); break; } case IndexChannel: { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange* phase_source[i])); break; } case GrayChannels: { SetPixelGray(q,ClampToQuantum(QuantumRange*phase_source[i])); break; } } i++; q++; } status=SyncCacheViewAuthenticPixels(phase_view,exception); if (status == MagickFalse) break; } phase_view=DestroyCacheView(phase_view); magnitude_view=DestroyCacheView(magnitude_view); phase_source=(double *) RelinquishMagickMemory(phase_source); magnitude_source=(double *) RelinquishMagickMemory(magnitude_source); return(status); } static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info, const Image *image,double *magnitude,double *phase,ExceptionInfo *exception) { CacheView *image_view; double n, *source; fftw_complex *fourier; fftw_plan fftw_r2c_plan; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t i, x; ssize_t y; /* Generate the forward Fourier transform. */ source=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*source)); if (source == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } ResetMagickMemory(source,0,fourier_info->height*fourier_info->width* sizeof(*source)); i=0L; image_view=AcquireCacheView(image); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { source[i]=QuantumScale*GetPixelRed(p); break; } case GreenChannel: { source[i]=QuantumScale*GetPixelGreen(p); break; } case BlueChannel: { source[i]=QuantumScale*GetPixelBlue(p); break; } case OpacityChannel: { source[i]=QuantumScale*GetPixelOpacity(p); break; } case IndexChannel: { source[i]=QuantumScale*GetPixelIndex(indexes+x); break; } case GrayChannels: { source[i]=QuantumScale*GetPixelGray(p); break; } } i++; p++; } } image_view=DestroyCacheView(image_view); fourier=(fftw_complex *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->center*sizeof(*fourier)); if (fourier == (fftw_complex *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); source=(double *) RelinquishMagickMemory(source); return(MagickFalse); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ForwardFourierTransform) #endif fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->width, source,fourier,FFTW_ESTIMATE); fftw_execute(fftw_r2c_plan); fftw_destroy_plan(fftw_r2c_plan); source=(double *) RelinquishMagickMemory(source); /* Normalize Fourier transform. */ n=(double) fourier_info->width*(double) fourier_info->width; i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier[i]/=n; #else fourier[i][0]/=n; fourier[i][1]/=n; #endif i++; } /* Generate magnitude and phase (or real and imaginary). */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude[i]=cabs(fourier[i]); phase[i]=carg(fourier[i]); i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude[i]=creal(fourier[i]); phase[i]=cimag(fourier[i]); i++; } fourier=(fftw_complex *) RelinquishMagickMemory(fourier); return(MagickTrue); } static MagickBooleanType ForwardFourierTransformChannel(const Image *image, const ChannelType channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { double *magnitude, *phase; fftw_complex *fourier; FourierInfo fourier_info; MagickBooleanType status; size_t extent; fourier_info.width=image->columns; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { extent=image->columns < image->rows ? image->rows : image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) floor((double) fourier_info.width/2.0)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; magnitude=(double *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*magnitude)); if (magnitude == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } phase=(double *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*phase)); if (phase == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); magnitude=(double *) RelinquishMagickMemory(magnitude); return(MagickFalse); } fourier=(fftw_complex *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*fourier)); if (fourier == (fftw_complex *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); phase=(double *) RelinquishMagickMemory(phase); magnitude=(double *) RelinquishMagickMemory(magnitude); return(MagickFalse); } status=ForwardFourierTransform(&fourier_info,image,magnitude,phase,exception); if (status != MagickFalse) status=ForwardFourier(&fourier_info,fourier_image,magnitude,phase, exception); fourier=(fftw_complex *) RelinquishMagickMemory(fourier); phase=(double *) RelinquishMagickMemory(phase); magnitude=(double *) RelinquishMagickMemory(magnitude); return(status); } #endif MagickExport Image *ForwardFourierTransformImage(const Image *image, const MagickBooleanType modulus,ExceptionInfo *exception) { Image *fourier_image; fourier_image=NewImageList(); #if !defined(MAGICKCORE_FFTW_DELEGATE) (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", image->filename); #else { Image *magnitude_image; size_t extent, width; width=image->columns; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { extent=image->columns < image->rows ? image->rows : image->columns; width=(extent & 0x01) == 1 ? extent+1UL : extent; } magnitude_image=CloneImage(image,width,width,MagickFalse,exception); if (magnitude_image != (Image *) NULL) { Image *phase_image; magnitude_image->storage_class=DirectClass; magnitude_image->depth=32UL; phase_image=CloneImage(image,width,width,MagickFalse,exception); if (phase_image == (Image *) NULL) magnitude_image=DestroyImage(magnitude_image); else { MagickBooleanType is_gray, status; phase_image->storage_class=DirectClass; phase_image->depth=32UL; AppendImageToList(&fourier_image,magnitude_image); AppendImageToList(&fourier_image,phase_image); status=MagickTrue; is_gray=IsGrayImage(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=ForwardFourierTransformChannel(image, GrayChannels,modulus,fourier_image,exception); else thread_status=ForwardFourierTransformChannel(image, RedChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, GreenChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, BlueChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->matte != MagickFalse) thread_status=ForwardFourierTransformChannel(image, OpacityChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->colorspace == CMYKColorspace) thread_status=ForwardFourierTransformChannel(image, IndexChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImageList(fourier_image); fftw_cleanup(); } } } #endif return(fourier_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n v e r s e F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InverseFourierTransformImage() implements the inverse discrete Fourier % transform (DFT) of the image either as a magnitude / phase or real / % imaginary image pair. % % The format of the InverseFourierTransformImage method is: % % Image *InverseFourierTransformImage(const Image *magnitude_image, % const Image *phase_image,const MagickBooleanType modulus, % ExceptionInfo *exception) % % A description of each parameter follows: % % o magnitude_image: the magnitude or real image. % % o phase_image: the phase or imaginary image. % % o modulus: if true, return transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType InverseQuadrantSwap(const size_t width, const size_t height,const double *source,double *destination) { register ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) floor((double) width/2.0)+1L; for (y=1L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L+1L); x++) destination[center*(height-y)-x+width/2L]=source[y*width+x]; for (y=0L; y < (ssize_t) height; y++) destination[center*y]=source[y*width+width/2L]; for (x=0L; x < center; x++) destination[x]=source[center-x-1L]; return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination)); } static MagickBooleanType InverseFourier(FourierInfo *fourier_info, const Image *magnitude_image,const Image *phase_image,fftw_complex *fourier, ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *magnitude, *phase, *magnitude_source, *phase_source; MagickBooleanType status; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t i, x; ssize_t y; /* Inverse fourier - read image and break down into a double array. */ magnitude_source=(double *) AcquireQuantumMemory((size_t) fourier_info->height,fourier_info->width*sizeof(*magnitude_source)); if (magnitude_source == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } phase_source=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*phase_source)); if (phase_source == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); magnitude_source=(double *) RelinquishMagickMemory(magnitude_source); return(MagickFalse); } i=0L; magnitude_view=AcquireCacheView(magnitude_image); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(magnitude_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { magnitude_source[i]=QuantumScale*GetPixelRed(p); break; } case GreenChannel: { magnitude_source[i]=QuantumScale*GetPixelGreen(p); break; } case BlueChannel: { magnitude_source[i]=QuantumScale*GetPixelBlue(p); break; } case OpacityChannel: { magnitude_source[i]=QuantumScale*GetPixelOpacity(p); break; } case IndexChannel: { magnitude_source[i]=QuantumScale*GetPixelIndex(indexes+x); break; } case GrayChannels: { magnitude_source[i]=QuantumScale*GetPixelGray(p); break; } } i++; p++; } } i=0L; phase_view=AcquireCacheView(phase_image); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(phase_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { phase_source[i]=QuantumScale*GetPixelRed(p); break; } case GreenChannel: { phase_source[i]=QuantumScale*GetPixelGreen(p); break; } case BlueChannel: { phase_source[i]=QuantumScale*GetPixelBlue(p); break; } case OpacityChannel: { phase_source[i]=QuantumScale*GetPixelOpacity(p); break; } case IndexChannel: { phase_source[i]=QuantumScale*GetPixelIndex(indexes+x); break; } case GrayChannels: { phase_source[i]=QuantumScale*GetPixelGray(p); break; } } i++; p++; } } if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_source[i]-=0.5; phase_source[i]*=(2.0*MagickPI); i++; } } magnitude_view=DestroyCacheView(magnitude_view); phase_view=DestroyCacheView(phase_view); magnitude=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->center*sizeof(*magnitude)); if (magnitude == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); magnitude_source=(double *) RelinquishMagickMemory(magnitude_source); phase_source=(double *) RelinquishMagickMemory(phase_source); return(MagickFalse); } status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, magnitude_source,magnitude); magnitude_source=(double *) RelinquishMagickMemory(magnitude_source); phase=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*phase)); if (phase == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); phase_source=(double *) RelinquishMagickMemory(phase_source); return(MagickFalse); } CorrectPhaseLHS(fourier_info->width,fourier_info->width,phase_source); if (status != MagickFalse) status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, phase_source,phase); phase_source=(double *) RelinquishMagickMemory(phase_source); /* Merge two sets. */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier[i]=magnitude[i]*cos(phase[i])+I*magnitude[i]*sin(phase[i]); #else fourier[i][0]=magnitude[i]*cos(phase[i]); fourier[i][1]=magnitude[i]*sin(phase[i]); #endif i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier[i]=magnitude[i]+I*phase[i]; #else fourier[i][0]=magnitude[i]; fourier[i][1]=phase[i]; #endif i++; } phase=(double *) RelinquishMagickMemory(phase); magnitude=(double *) RelinquishMagickMemory(magnitude); return(status); } static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info, fftw_complex *fourier,Image *image,ExceptionInfo *exception) { CacheView *image_view; double *source; fftw_plan fftw_c2r_plan; register IndexPacket *indexes; register PixelPacket *q; register ssize_t i, x; ssize_t y; source=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*source)); if (source == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_InverseFourierTransform) #endif { fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height, fourier,source,FFTW_ESTIMATE); fftw_execute(fftw_c2r_plan); fftw_destroy_plan(fftw_c2r_plan); } i=0L; image_view=AcquireCacheView(image); for (y=0L; y < (ssize_t) fourier_info->height; y++) { if (y >= (ssize_t) image->rows) break; q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width > image->columns ? image->columns : fourier_info->width,1UL,exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q,ClampToQuantum(QuantumRange*source[i])); break; } case GreenChannel: { SetPixelGreen(q,ClampToQuantum(QuantumRange*source[i])); break; } case BlueChannel: { SetPixelBlue(q,ClampToQuantum(QuantumRange*source[i])); break; } case OpacityChannel: { SetPixelOpacity(q,ClampToQuantum(QuantumRange*source[i])); break; } case IndexChannel: { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange* source[i])); break; } case GrayChannels: { SetPixelGray(q,ClampToQuantum(QuantumRange*source[i])); break; } } i++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } image_view=DestroyCacheView(image_view); source=(double *) RelinquishMagickMemory(source); return(MagickTrue); } static MagickBooleanType InverseFourierTransformChannel( const Image *magnitude_image,const Image *phase_image, const ChannelType channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { double *magnitude, *phase; fftw_complex *fourier; FourierInfo fourier_info; MagickBooleanType status; size_t extent; fourier_info.width=magnitude_image->columns; if ((magnitude_image->columns != magnitude_image->rows) || ((magnitude_image->columns % 2) != 0) || ((magnitude_image->rows % 2) != 0)) { extent=magnitude_image->columns < magnitude_image->rows ? magnitude_image->rows : magnitude_image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) floor((double) fourier_info.width/2.0)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; magnitude=(double *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*magnitude)); if (magnitude == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } phase=(double *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*phase)); if (phase == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); magnitude=(double *) RelinquishMagickMemory(magnitude); return(MagickFalse); } fourier=(fftw_complex *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*fourier)); if (fourier == (fftw_complex *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); phase=(double *) RelinquishMagickMemory(phase); magnitude=(double *) RelinquishMagickMemory(magnitude); return(MagickFalse); } status=InverseFourier(&fourier_info,magnitude_image,phase_image,fourier, exception); if (status != MagickFalse) status=InverseFourierTransform(&fourier_info,fourier,fourier_image, exception); fourier=(fftw_complex *) RelinquishMagickMemory(fourier); phase=(double *) RelinquishMagickMemory(phase); magnitude=(double *) RelinquishMagickMemory(magnitude); return(status); } #endif MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image, const Image *phase_image,const MagickBooleanType modulus, ExceptionInfo *exception) { Image *fourier_image; assert(magnitude_image != (Image *) NULL); assert(magnitude_image->signature == MagickSignature); if (magnitude_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", magnitude_image->filename); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",magnitude_image->filename); return((Image *) NULL); } #if !defined(MAGICKCORE_FFTW_DELEGATE) fourier_image=(Image *) NULL; (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", magnitude_image->filename); #else { fourier_image=CloneImage(magnitude_image,magnitude_image->columns, magnitude_image->rows,MagickFalse,exception); if (fourier_image != (Image *) NULL) { MagickBooleanType is_gray, status; status=MagickTrue; is_gray=IsGrayImage(magnitude_image,exception); if (is_gray != MagickFalse) is_gray=IsGrayImage(phase_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GrayChannels,modulus,fourier_image,exception); else thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,RedChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GreenChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,BlueChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->matte != MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,OpacityChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->colorspace == CMYKColorspace) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,IndexChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImage(fourier_image); } fftw_cleanup(); } #endif return(fourier_image); }
GB_subassign_04.c
//------------------------------------------------------------------------------ // GB_subassign_04: C(I,J) += A ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 04: C(I,J) += A ; using S // M: NULL // Mask_comp: false // C_replace: false // accum: present // A: matrix // S: constructed // C: not bitmap: use GB_bitmap_assign instead // A: any sparsity structure. #include "GB_subassign_methods.h" GrB_Info GB_subassign_04 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_BinaryOp accum, const GrB_Matrix A, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_aliased (C, A)) ; // NO ALIAS of C==A //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_MATRIX_WAIT_IF_JUMBLED (A) ; GB_GET_C ; // C must not be bitmap GB_GET_A ; GB_GET_S ; GB_GET_ACCUM ; //-------------------------------------------------------------------------- // Method 04: C(I,J) += A ; using S //-------------------------------------------------------------------------- // Time: Close to Optimal. Every entry in A must be visited, and the // corresponding entry in S must then be found. Time for this phase is // Omega(nnz(A)), but S has already been constructed, in Omega(nnz(S)) // time. This method simply traverses all of A+S (like GB_add for // computing A+S), the same as Method 02. Time taken is O(nnz(A)+nnz(S)). // The only difference is that the traversal of A+S can terminate if A is // exhausted. Entries in S but not A do not actually require any work // (unlike Method 02, which must visit all entries in A+S). // Method 02 and Method 04 are somewhat similar. They differ on how C is // modified when the entry is present in S but not A. // TODO: phase2 of Method 02 and 04 are identical and could be // done in a single function. // Compare with Method 16, which computes C(I,J)<!M> += A, using S. //-------------------------------------------------------------------------- // Parallel: A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20) //-------------------------------------------------------------------------- if (A_is_bitmap) { // all of IxJ must be examined GB_SUBASSIGN_IXJ_SLICE ; } else { // traverse all A+S GB_SUBASSIGN_TWO_SLICE (A, S) ; } //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- if (A_is_bitmap) { //---------------------------------------------------------------------- // phase1: A is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iA_start:iA_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; int64_t pA_start = j * Avlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j) //-------------------------------------------------------------- for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t pA = pA_start + iA ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; bool Afound = Ab [pA] ; if (Sfound && !Afound) { // ----[C . 1] or [X . 1]------------------------------- // S (i,j) is present but A (i,j) is not // [C . 1]: action: ( C ): no change, with accum // [X . 1]: action: ( X ): still a zombie GB_NEXT (S) ; } else if (!Sfound && Afound) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) task_pending++ ; } else if (Sfound && Afound) { // ----[C A 1] or [X A 1]------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_withaccum_C_A_1_matrix ; GB_NEXT (S) ; } } } GB_PHASE1_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase1: A is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE1 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get A(:,j) and S(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and A(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression // int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iA = GBI (Ai, pA, Avlen) ; if (iS < iA) { // ----[C . 1] or [X . 1]------------------------------- // S (i,j) is present but A (i,j) is not // [C . 1]: action: ( C ): no change, with accum // [X . 1]: action: ( X ): still a zombie GB_NEXT (S) ; } else if (iA < iS) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) task_pending++ ; GB_NEXT (A) ; } else { // ----[C A 1] or [X A 1]------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_withaccum_C_A_1_matrix ; GB_NEXT (S) ; GB_NEXT (A) ; } } // ignore the remainder of S (:,j) // List A (:,j) has entries. List S (:,j) exhausted. task_pending += (pA_end - pA) ; } GB_PHASE1_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; if (A_is_bitmap) { //---------------------------------------------------------------------- // phase2: A is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iA_start:iA_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; int64_t pA_start = j * Avlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t pA = pA_start + iA ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; bool Afound = Ab [pA] ; if (!Sfound && Afound) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; GB_NEXT (A) ; } else if (Sfound) { // S (i,j) present GB_NEXT (S) ; } } } GB_PHASE2_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase2: A is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE2 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get A(:,j) and S(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and A(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iA = GBI (Ai, pA, Avlen) ; if (iS < iA) { GB_NEXT (S) ; } else if (iA < iS) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; GB_NEXT (A) ; } else { GB_NEXT (S) ; GB_NEXT (A) ; } } // ignore the remainder of S (:,j) // while list A (:,j) has entries. List S (:,j) exhausted. while (pA < pA_end) { // ----[. A 1]---------------------------------------------- // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iA = GBI (Ai, pA, Avlen) ; int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; GB_NEXT (A) ; } } GB_PHASE2_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 32; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
max_active_levels_serialized.c
// RUN: %libomp-compile-and-run | FileCheck %s // RUN: %libomp-compile-and-run | %sort-threads | FileCheck --check-prefix=THREADS %s // REQUIRES: ompt // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7 #include "callback.h" #include <omp.h> int main() { omp_set_nested(1); omp_set_max_active_levels(1); #pragma omp parallel num_threads(2) { print_ids(0); print_ids(1); #pragma omp parallel num_threads(2) { print_ids(0); print_ids(1); print_ids(2); } print_fuzzy_address(1); } print_fuzzy_address(2); // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released' // THREADS: 0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: parallel_data initially not null // CHECK-NOT: 0: task_data initially not null // CHECK-NOT: 0: thread_data initially not null // THREADS: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=2, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER:[0-9]+]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=2, codeptr_ra=[[NESTED_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[NESTED_RETURN_ADDRESS]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=2, codeptr_ra=[[NESTED_RETURN_ADDRESS]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}[[THREAD_ID]]: fuzzy_address={{.*}}[[NESTED_RETURN_ADDRESS]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] return 0; }
host_as_target.c
// Check that specifying device as omp_get_initial_device(): // - Doesn't cause the runtime to fail. // - Offloads code to the host. // - Doesn't transfer data. In this case, just check that neither host data nor // default device data are affected by the specified transfers. // - Works whether it's specified directly or as the default device. // RUN: %libomptarget-compile-run-and-check-generic // amdgpu does not have a working printf definition // XFAIL: amdgcn-amd-amdhsa // XFAIL: amdgcn-amd-amdhsa-newDriver #include <stdio.h> #include <omp.h> static void check(char *X, int Dev) { printf(" host X = %c\n", *X); #pragma omp target device(Dev) printf("device X = %c\n", *X); } #define CHECK_DATA() check(&X, DevDefault) int main(void) { int DevDefault = omp_get_default_device(); int DevInit = omp_get_initial_device(); //-------------------------------------------------- // Initialize data on the host and default device. //-------------------------------------------------- // CHECK: host X = h // CHECK-NEXT: device X = d char X = 'd'; #pragma omp target enter data map(to:X) X = 'h'; CHECK_DATA(); //-------------------------------------------------- // Check behavior when specifying host directly. //-------------------------------------------------- // CHECK-NEXT: omp_is_initial_device() = 1 // CHECK-NEXT: host X = h // CHECK-NEXT: device X = d #pragma omp target device(DevInit) map(always,tofrom:X) printf("omp_is_initial_device() = %d\n", omp_is_initial_device()); CHECK_DATA(); // CHECK-NEXT: omp_is_initial_device() = 1 // CHECK-NEXT: host X = h // CHECK-NEXT: device X = d #pragma omp target teams device(DevInit) num_teams(1) map(always,tofrom:X) printf("omp_is_initial_device() = %d\n", omp_is_initial_device()); CHECK_DATA(); // Check that __kmpc_push_target_tripcount_mapper doesn't fail. I'm not sure // how to check that it actually pushes to the initial device. #pragma omp target teams device(DevInit) num_teams(1) #pragma omp distribute for (int i = 0; i < 2; ++i) ; // CHECK-NEXT: host X = h // CHECK-NEXT: device X = d #pragma omp target data device(DevInit) map(always,tofrom:X) ; CHECK_DATA(); // CHECK-NEXT: host X = h // CHECK-NEXT: device X = d #pragma omp target enter data device(DevInit) map(always,to:X) ; CHECK_DATA(); // CHECK-NEXT: host X = h // CHECK-NEXT: device X = d #pragma omp target exit data device(DevInit) map(always,from:X) ; CHECK_DATA(); // CHECK-NEXT: host X = h // CHECK-NEXT: device X = d #pragma omp target update device(DevInit) to(X) ; CHECK_DATA(); // CHECK-NEXT: host X = h // CHECK-NEXT: device X = d #pragma omp target update device(DevInit) from(X) ; CHECK_DATA(); //-------------------------------------------------- // Check behavior when device defaults to host. //-------------------------------------------------- omp_set_default_device(DevInit); // CHECK-NEXT: omp_is_initial_device() = 1 // CHECK-NEXT: host X = h // CHECK-NEXT: device X = d #pragma omp target map(always,tofrom:X) printf("omp_is_initial_device() = %d\n", omp_is_initial_device()); CHECK_DATA(); // CHECK-NEXT: omp_is_initial_device() = 1 // CHECK-NEXT: host X = h // CHECK-NEXT: device X = d #pragma omp target teams num_teams(1) map(always,tofrom:X) printf("omp_is_initial_device() = %d\n", omp_is_initial_device()); CHECK_DATA(); // Check that __kmpc_push_target_tripcount_mapper doesn't fail. I'm not sure // how to check that it actually pushes to the initial device. #pragma omp target teams num_teams(1) #pragma omp distribute for (int i = 0; i < 2; ++i) ; // CHECK-NEXT: host X = h // CHECK-NEXT: device X = d #pragma omp target data map(always,tofrom:X) ; CHECK_DATA(); // CHECK-NEXT: host X = h // CHECK-NEXT: device X = d #pragma omp target enter data map(always,to:X) ; CHECK_DATA(); // CHECK-NEXT: host X = h // CHECK-NEXT: device X = d #pragma omp target exit data map(always,from:X) ; CHECK_DATA(); // CHECK-NEXT: host X = h // CHECK-NEXT: device X = d #pragma omp target update to(X) ; CHECK_DATA(); // CHECK-NEXT: host X = h // CHECK-NEXT: device X = d #pragma omp target update from(X) ; CHECK_DATA(); return 0; }
TSDFVoxelGridImpl.h
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #include <atomic> #include <cmath> #include "open3d/core/Dispatch.h" #include "open3d/core/Dtype.h" #include "open3d/core/MemoryManager.h" #include "open3d/core/SizeVector.h" #include "open3d/core/Tensor.h" #include "open3d/t/geometry/Utility.h" #include "open3d/t/geometry/kernel/GeometryIndexer.h" #include "open3d/t/geometry/kernel/GeometryMacros.h" #include "open3d/t/geometry/kernel/TSDFVoxel.h" #include "open3d/t/geometry/kernel/TSDFVoxelGrid.h" #include "open3d/utility/Console.h" #include "open3d/utility/Timer.h" namespace open3d { namespace t { namespace geometry { namespace kernel { namespace tsdf { #if defined(__CUDACC__) void IntegrateCUDA #else void IntegrateCPU #endif (const core::Tensor& depth, const core::Tensor& color, const core::Tensor& indices, const core::Tensor& block_keys, core::Tensor& block_values, // Transforms const core::Tensor& intrinsics, const core::Tensor& extrinsics, // Parameters int64_t resolution, float voxel_size, float sdf_trunc, float depth_scale, float depth_max) { // Parameters int64_t resolution3 = resolution * resolution * resolution; // Shape / transform indexers, no data involved NDArrayIndexer voxel_indexer({resolution, resolution, resolution}); TransformIndexer transform_indexer(intrinsics, extrinsics, voxel_size); // Real data indexer NDArrayIndexer depth_indexer(depth, 2); NDArrayIndexer block_keys_indexer(block_keys, 1); NDArrayIndexer voxel_block_buffer_indexer(block_values, 4); // Optional color integration NDArrayIndexer color_indexer; bool integrate_color = false; if (color.NumElements() != 0) { color_indexer = NDArrayIndexer(color, 2); integrate_color = true; } // Plain arrays that does not require indexers const int* indices_ptr = indices.GetDataPtr<int>(); int64_t n = indices.GetLength() * resolution3; #if defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; #endif DISPATCH_BYTESIZE_TO_VOXEL( voxel_block_buffer_indexer.ElementByteSize(), [&]() { launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE( int64_t workload_idx) { // Natural index (0, N) -> (block_idx, voxel_idx) int block_idx = indices_ptr[workload_idx / resolution3]; int voxel_idx = workload_idx % resolution3; /// Coordinate transform // block_idx -> (x_block, y_block, z_block) int* block_key_ptr = block_keys_indexer.GetDataPtr<int>(block_idx); int64_t xb = static_cast<int64_t>(block_key_ptr[0]); int64_t yb = static_cast<int64_t>(block_key_ptr[1]); int64_t zb = static_cast<int64_t>(block_key_ptr[2]); // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // coordinate in world (in voxel) int64_t x = (xb * resolution + xv); int64_t y = (yb * resolution + yv); int64_t z = (zb * resolution + zv); // coordinate in camera (in voxel -> in meter) float xc, yc, zc, u, v; transform_indexer.RigidTransform( static_cast<float>(x), static_cast<float>(y), static_cast<float>(z), &xc, &yc, &zc); // coordinate in image (in pixel) transform_indexer.Project(xc, yc, zc, &u, &v); if (!depth_indexer.InBoundary(u, v)) { return; } // Associate image workload and compute SDF and TSDF. float depth = *depth_indexer.GetDataPtr<float>( static_cast<int64_t>(u), static_cast<int64_t>(v)) / depth_scale; float sdf = (depth - zc); if (depth <= 0 || depth > depth_max || zc <= 0 || sdf < -sdf_trunc) { return; } sdf = sdf < sdf_trunc ? sdf : sdf_trunc; sdf /= sdf_trunc; // Associate voxel workload and update TSDF/Weights voxel_t* voxel_ptr = voxel_block_buffer_indexer.GetDataPtr<voxel_t>( xv, yv, zv, block_idx); if (integrate_color) { float* color_ptr = color_indexer.GetDataPtr<float>( static_cast<int64_t>(u), static_cast<int64_t>(v)); voxel_ptr->Integrate(sdf, color_ptr[0], color_ptr[1], color_ptr[2]); } else { voxel_ptr->Integrate(sdf); } }); }); #if defined(__CUDACC__) OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); #endif } #if defined(__CUDACC__) void ExtractSurfacePointsCUDA #else void ExtractSurfacePointsCPU #endif (const core::Tensor& indices, const core::Tensor& nb_indices, const core::Tensor& nb_masks, const core::Tensor& block_keys, const core::Tensor& block_values, core::Tensor& points, utility::optional<std::reference_wrapper<core::Tensor>> normals, utility::optional<std::reference_wrapper<core::Tensor>> colors, int64_t resolution, float voxel_size, float weight_threshold, int& valid_size) { // Parameters int64_t resolution3 = resolution * resolution * resolution; // Shape / transform indexers, no data involved NDArrayIndexer voxel_indexer({resolution, resolution, resolution}); // Real data indexer NDArrayIndexer voxel_block_buffer_indexer(block_values, 4); NDArrayIndexer block_keys_indexer(block_keys, 1); NDArrayIndexer nb_block_masks_indexer(nb_masks, 2); NDArrayIndexer nb_block_indices_indexer(nb_indices, 2); // Plain arrays that does not require indexers const int64_t* indices_ptr = indices.GetDataPtr<int64_t>(); int64_t n_blocks = indices.GetLength(); int64_t n = n_blocks * resolution3; // Output #if defined(__CUDACC__) core::Tensor count(std::vector<int>{0}, {1}, core::Dtype::Int32, block_values.GetDevice()); int* count_ptr = count.GetDataPtr<int>(); #else std::atomic<int> count_atomic(0); std::atomic<int>* count_ptr = &count_atomic; #endif #if defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; #endif if (valid_size < 0) { utility::LogWarning( "No estimated max point cloud size provided, using a 2-pass " "estimation. Surface extraction could be slow."); // This pass determines valid number of points. DISPATCH_BYTESIZE_TO_VOXEL( voxel_block_buffer_indexer.ElementByteSize(), [&]() { launcher.LaunchGeneralKernel( n, [=] OPEN3D_DEVICE(int64_t workload_idx) { auto GetVoxelAt = [&] OPEN3D_DEVICE( int xo, int yo, int zo, int curr_block_idx) -> voxel_t* { return DeviceGetVoxelAt<voxel_t>( xo, yo, zo, curr_block_idx, static_cast<int>(resolution), nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; // Natural index (0, N) -> (block_idx, // voxel_idx) int64_t workload_block_idx = workload_idx / resolution3; int64_t block_idx = indices_ptr[workload_block_idx]; int64_t voxel_idx = workload_idx % resolution3; // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); voxel_t* voxel_ptr = voxel_block_buffer_indexer .GetDataPtr<voxel_t>(xv, yv, zv, block_idx); float tsdf_o = voxel_ptr->GetTSDF(); float weight_o = voxel_ptr->GetWeight(); if (weight_o <= weight_threshold) return; // Enumerate x-y-z directions for (int i = 0; i < 3; ++i) { voxel_t* ptr = GetVoxelAt( static_cast<int>(xv) + (i == 0), static_cast<int>(yv) + (i == 1), static_cast<int>(zv) + (i == 2), static_cast<int>( workload_block_idx)); if (ptr == nullptr) continue; float tsdf_i = ptr->GetTSDF(); float weight_i = ptr->GetWeight(); if (weight_i > weight_threshold && tsdf_i * tsdf_o < 0) { OPEN3D_ATOMIC_ADD(count_ptr, 1); } } }); }); #if defined(__CUDACC__) valid_size = count[0].Item<int>(); count[0] = 0; #else valid_size = (*count_ptr).load(); (*count_ptr) = 0; #endif } int max_count = valid_size; if (points.GetLength() == 0) { points = core::Tensor({max_count, 3}, core::Dtype::Float32, block_values.GetDevice()); } NDArrayIndexer point_indexer(points, 1); // Normals bool extract_normal = false; NDArrayIndexer normal_indexer; if (normals.has_value()) { extract_normal = true; if (normals.value().get().GetLength() == 0) { normals.value().get() = core::Tensor({max_count, 3}, core::Dtype::Float32, block_values.GetDevice()); } normal_indexer = NDArrayIndexer(normals.value().get(), 1); } // This pass extracts exact surface points. DISPATCH_BYTESIZE_TO_VOXEL( voxel_block_buffer_indexer.ElementByteSize(), [&]() { // Colors bool extract_color = false; NDArrayIndexer color_indexer; if (voxel_t::HasColor() && colors.has_value()) { extract_color = true; if (colors.value().get().GetLength() == 0) { colors.value().get() = core::Tensor( {max_count, 3}, core::Dtype::Float32, block_values.GetDevice()); } color_indexer = NDArrayIndexer(colors.value().get(), 1); } launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE( int64_t workload_idx) { auto GetVoxelAt = [&] OPEN3D_DEVICE( int xo, int yo, int zo, int curr_block_idx) -> voxel_t* { return DeviceGetVoxelAt<voxel_t>( xo, yo, zo, curr_block_idx, static_cast<int>(resolution), nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; auto GetNormalAt = [&] OPEN3D_DEVICE(int xo, int yo, int zo, int curr_block_idx, float* n) { return DeviceGetNormalAt<voxel_t>( xo, yo, zo, curr_block_idx, n, static_cast<int>(resolution), voxel_size, nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; // Natural index (0, N) -> (block_idx, voxel_idx) int64_t workload_block_idx = workload_idx / resolution3; int64_t block_idx = indices_ptr[workload_block_idx]; int64_t voxel_idx = workload_idx % resolution3; /// Coordinate transform // block_idx -> (x_block, y_block, z_block) int* block_key_ptr = block_keys_indexer.GetDataPtr<int>(block_idx); int64_t xb = static_cast<int64_t>(block_key_ptr[0]); int64_t yb = static_cast<int64_t>(block_key_ptr[1]); int64_t zb = static_cast<int64_t>(block_key_ptr[2]); // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); voxel_t* voxel_ptr = voxel_block_buffer_indexer.GetDataPtr<voxel_t>( xv, yv, zv, block_idx); float tsdf_o = voxel_ptr->GetTSDF(); float weight_o = voxel_ptr->GetWeight(); if (weight_o <= weight_threshold) return; int64_t x = xb * resolution + xv; int64_t y = yb * resolution + yv; int64_t z = zb * resolution + zv; float no[3] = {0}, ni[3] = {0}; if (extract_normal) { GetNormalAt(static_cast<int>(xv), static_cast<int>(yv), static_cast<int>(zv), static_cast<int>(workload_block_idx), no); } // Enumerate x-y-z axis for (int i = 0; i < 3; ++i) { voxel_t* ptr = GetVoxelAt( static_cast<int>(xv) + (i == 0), static_cast<int>(yv) + (i == 1), static_cast<int>(zv) + (i == 2), static_cast<int>(workload_block_idx)); if (ptr == nullptr) continue; float tsdf_i = ptr->GetTSDF(); float weight_i = ptr->GetWeight(); if (weight_i > weight_threshold && tsdf_i * tsdf_o < 0) { float ratio = (0 - tsdf_o) / (tsdf_i - tsdf_o); int idx = OPEN3D_ATOMIC_ADD(count_ptr, 1); if (idx >= valid_size) { printf("Point cloud size larger than " "estimated, please increase the " "estimation!\n"); return; } float* point_ptr = point_indexer.GetDataPtr<float>(idx); point_ptr[0] = voxel_size * (x + ratio * int(i == 0)); point_ptr[1] = voxel_size * (y + ratio * int(i == 1)); point_ptr[2] = voxel_size * (z + ratio * int(i == 2)); if (extract_color) { float* color_ptr = color_indexer.GetDataPtr<float>(idx); float r_o = voxel_ptr->GetR(); float g_o = voxel_ptr->GetG(); float b_o = voxel_ptr->GetB(); float r_i = ptr->GetR(); float g_i = ptr->GetG(); float b_i = ptr->GetB(); color_ptr[0] = ((1 - ratio) * r_o + ratio * r_i) / 255.0f; color_ptr[1] = ((1 - ratio) * g_o + ratio * g_i) / 255.0f; color_ptr[2] = ((1 - ratio) * b_o + ratio * b_i) / 255.0f; } if (extract_normal) { GetNormalAt( static_cast<int>(xv) + (i == 0), static_cast<int>(yv) + (i == 1), static_cast<int>(zv) + (i == 2), static_cast<int>(workload_block_idx), ni); float* normal_ptr = normal_indexer.GetDataPtr<float>(idx); float nx = (1 - ratio) * no[0] + ratio * ni[0]; float ny = (1 - ratio) * no[1] + ratio * ni[1]; float nz = (1 - ratio) * no[2] + ratio * ni[2]; float norm = static_cast<float>( sqrt(nx * nx + ny * ny + nz * nz) + 1e-5); normal_ptr[0] = nx / norm; normal_ptr[1] = ny / norm; normal_ptr[2] = nz / norm; } } } }); }); #if defined(__CUDACC__) int total_count = count.Item<int>(); #else int total_count = (*count_ptr).load(); #endif utility::LogDebug("{} vertices extracted", total_count); valid_size = total_count; #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__) OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); #endif } #if defined(__CUDACC__) void ExtractSurfaceMeshCUDA #else void ExtractSurfaceMeshCPU #endif (const core::Tensor& indices, const core::Tensor& inv_indices, const core::Tensor& nb_indices, const core::Tensor& nb_masks, const core::Tensor& block_keys, const core::Tensor& block_values, core::Tensor& vertices, core::Tensor& triangles, utility::optional<std::reference_wrapper<core::Tensor>> normals, utility::optional<std::reference_wrapper<core::Tensor>> colors, int64_t resolution, float voxel_size, float weight_threshold, int& vertex_count) { int64_t resolution3 = resolution * resolution * resolution; // Shape / transform indexers, no data involved NDArrayIndexer voxel_indexer({resolution, resolution, resolution}); int n_blocks = static_cast<int>(indices.GetLength()); #if defined(__CUDACC__) core::CUDACachedMemoryManager::ReleaseCache(); #endif // TODO(wei): profile performance by replacing the table to a hashmap. // Voxel-wise mesh info. 4 channels correspond to: // 3 edges' corresponding vertex index + 1 table index. core::Tensor mesh_structure; try { mesh_structure = core::Tensor::Zeros( {n_blocks, resolution, resolution, resolution, 4}, core::Dtype::Int32, block_keys.GetDevice()); } catch (const std::runtime_error&) { utility::LogError( "[MeshExtractionKernel] Unable to allocate assistance mesh " "structure for Marching " "Cubes with {} active voxel blocks. Please consider using a " "larger voxel size (currently {}) for TSDF " "integration, or using tsdf_volume.cpu() to perform mesh " "extraction on CPU.", n_blocks, voxel_size); } // Real data indexer NDArrayIndexer voxel_block_buffer_indexer(block_values, 4); NDArrayIndexer mesh_structure_indexer(mesh_structure, 4); NDArrayIndexer nb_block_masks_indexer(nb_masks, 2); NDArrayIndexer nb_block_indices_indexer(nb_indices, 2); // Plain arrays that does not require indexers const int64_t* indices_ptr = indices.GetDataPtr<int64_t>(); const int64_t* inv_indices_ptr = inv_indices.GetDataPtr<int64_t>(); int64_t n = n_blocks * resolution3; #if defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; #endif int64_t voxel_bytesize = voxel_block_buffer_indexer.ElementByteSize(); // Pass 0: analyze mesh structure, set up one-on-one correspondences // from edges to vertices. DISPATCH_BYTESIZE_TO_VOXEL(voxel_bytesize, [&]() { launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE(int64_t widx) { auto GetVoxelAt = [&] OPEN3D_DEVICE( int xo, int yo, int zo, int curr_block_idx) -> voxel_t* { return DeviceGetVoxelAt<voxel_t>( xo, yo, zo, curr_block_idx, static_cast<int>(resolution), nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; // Natural index (0, N) -> (block_idx, voxel_idx) int64_t workload_block_idx = widx / resolution3; int64_t voxel_idx = widx % resolution3; // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // Check per-vertex sign in the cube to determine cube // type int table_idx = 0; for (int i = 0; i < 8; ++i) { voxel_t* voxel_ptr_i = GetVoxelAt(static_cast<int>(xv) + vtx_shifts[i][0], static_cast<int>(yv) + vtx_shifts[i][1], static_cast<int>(zv) + vtx_shifts[i][2], static_cast<int>(workload_block_idx)); if (voxel_ptr_i == nullptr) return; float tsdf_i = voxel_ptr_i->GetTSDF(); float weight_i = voxel_ptr_i->GetWeight(); if (weight_i <= weight_threshold) return; table_idx |= ((tsdf_i < 0) ? (1 << i) : 0); } int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<int>( xv, yv, zv, workload_block_idx); mesh_struct_ptr[3] = table_idx; if (table_idx == 0 || table_idx == 255) return; // Check per-edge sign determine the cube type int edges_with_vertices = edge_table[table_idx]; for (int i = 0; i < 12; ++i) { if (edges_with_vertices & (1 << i)) { int64_t xv_i = xv + edge_shifts[i][0]; int64_t yv_i = yv + edge_shifts[i][1]; int64_t zv_i = zv + edge_shifts[i][2]; int edge_i = edge_shifts[i][3]; int dxb = static_cast<int>(xv_i / resolution); int dyb = static_cast<int>(yv_i / resolution); int dzb = static_cast<int>(zv_i / resolution); int nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9; int64_t block_idx_i = *nb_block_indices_indexer.GetDataPtr<int64_t>( workload_block_idx, nb_idx); int* mesh_ptr_i = mesh_structure_indexer.GetDataPtr<int>( xv_i - dxb * resolution, yv_i - dyb * resolution, zv_i - dzb * resolution, inv_indices_ptr[block_idx_i]); // Non-atomic write, but we are safe mesh_ptr_i[edge_i] = -1; } } }); }); // Pass 1: determine valid number of vertices (if not preset) #if defined(__CUDACC__) core::Tensor count(std::vector<int>{0}, {}, core::Dtype::Int32, block_values.GetDevice()); int* count_ptr = count.GetDataPtr<int>(); #else std::atomic<int> count_atomic(0); std::atomic<int>* count_ptr = &count_atomic; #endif if (vertex_count < 0) { launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE(int64_t widx) { // Natural index (0, N) -> (block_idx, voxel_idx) int64_t workload_block_idx = widx / resolution3; int64_t voxel_idx = widx % resolution3; // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // Obtain voxel's mesh struct ptr int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<int>( xv, yv, zv, workload_block_idx); // Early quit -- no allocated vertex to compute if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 && mesh_struct_ptr[2] != -1) { return; } // Enumerate 3 edges in the voxel for (int e = 0; e < 3; ++e) { int vertex_idx = mesh_struct_ptr[e]; if (vertex_idx != -1) continue; OPEN3D_ATOMIC_ADD(count_ptr, 1); } }); #if defined(__CUDACC__) vertex_count = count.Item<int>(); #else vertex_count = (*count_ptr).load(); #endif } utility::LogDebug("Total vertex count = {}", vertex_count); vertices = core::Tensor({vertex_count, 3}, core::Dtype::Float32, block_values.GetDevice()); bool extract_normal = false; NDArrayIndexer normal_indexer; if (normals.has_value()) { extract_normal = true; normals.value().get() = core::Tensor({vertex_count, 3}, core::Dtype::Float32, block_values.GetDevice()); normal_indexer = NDArrayIndexer(normals.value().get(), 1); } NDArrayIndexer block_keys_indexer(block_keys, 1); NDArrayIndexer vertex_indexer(vertices, 1); #if defined(__CUDACC__) count = core::Tensor(std::vector<int>{0}, {}, core::Dtype::Int32, block_values.GetDevice()); count_ptr = count.GetDataPtr<int>(); #else (*count_ptr) = 0; #endif // Pass 2: extract vertices. DISPATCH_BYTESIZE_TO_VOXEL(voxel_bytesize, [&]() { bool extract_color = false; NDArrayIndexer color_indexer; if (voxel_t::HasColor() && colors.has_value()) { extract_color = true; colors.value().get() = core::Tensor({vertex_count, 3}, core::Dtype::Float32, block_values.GetDevice()); color_indexer = NDArrayIndexer(colors.value().get(), 1); } launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE(int64_t widx) { auto GetVoxelAt = [&] OPEN3D_DEVICE( int xo, int yo, int zo, int curr_block_idx) -> voxel_t* { return DeviceGetVoxelAt<voxel_t>( xo, yo, zo, curr_block_idx, static_cast<int>(resolution), nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; auto GetNormalAt = [&] OPEN3D_DEVICE(int xo, int yo, int zo, int curr_block_idx, float* n) { return DeviceGetNormalAt<voxel_t>( xo, yo, zo, curr_block_idx, n, static_cast<int>(resolution), voxel_size, nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; // Natural index (0, N) -> (block_idx, voxel_idx) int64_t workload_block_idx = widx / resolution3; int64_t block_idx = indices_ptr[workload_block_idx]; int64_t voxel_idx = widx % resolution3; // block_idx -> (x_block, y_block, z_block) int* block_key_ptr = block_keys_indexer.GetDataPtr<int>(block_idx); int64_t xb = static_cast<int64_t>(block_key_ptr[0]); int64_t yb = static_cast<int64_t>(block_key_ptr[1]); int64_t zb = static_cast<int64_t>(block_key_ptr[2]); // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // global coordinate (in voxels) int64_t x = xb * resolution + xv; int64_t y = yb * resolution + yv; int64_t z = zb * resolution + zv; // Obtain voxel's mesh struct ptr int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<int>( xv, yv, zv, workload_block_idx); // Early quit -- no allocated vertex to compute if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 && mesh_struct_ptr[2] != -1) { return; } // Obtain voxel ptr voxel_t* voxel_ptr = voxel_block_buffer_indexer.GetDataPtr<voxel_t>( xv, yv, zv, block_idx); float tsdf_o = voxel_ptr->GetTSDF(); float no[3] = {0}, ne[3] = {0}; if (extract_normal) { GetNormalAt(static_cast<int>(xv), static_cast<int>(yv), static_cast<int>(zv), static_cast<int>(workload_block_idx), no); } // Enumerate 3 edges in the voxel for (int e = 0; e < 3; ++e) { int vertex_idx = mesh_struct_ptr[e]; if (vertex_idx != -1) continue; voxel_t* voxel_ptr_e = GetVoxelAt(static_cast<int>(xv) + (e == 0), static_cast<int>(yv) + (e == 1), static_cast<int>(zv) + (e == 2), static_cast<int>(workload_block_idx)); float tsdf_e = voxel_ptr_e->GetTSDF(); float ratio = (0 - tsdf_o) / (tsdf_e - tsdf_o); int idx = OPEN3D_ATOMIC_ADD(count_ptr, 1); mesh_struct_ptr[e] = idx; float ratio_x = ratio * int(e == 0); float ratio_y = ratio * int(e == 1); float ratio_z = ratio * int(e == 2); float* vertex_ptr = vertex_indexer.GetDataPtr<float>(idx); vertex_ptr[0] = voxel_size * (x + ratio_x); vertex_ptr[1] = voxel_size * (y + ratio_y); vertex_ptr[2] = voxel_size * (z + ratio_z); if (extract_normal) { float* normal_ptr = normal_indexer.GetDataPtr<float>(idx); GetNormalAt(static_cast<int>(xv) + (e == 0), static_cast<int>(yv) + (e == 1), static_cast<int>(zv) + (e == 2), static_cast<int>(workload_block_idx), ne); float nx = (1 - ratio) * no[0] + ratio * ne[0]; float ny = (1 - ratio) * no[1] + ratio * ne[1]; float nz = (1 - ratio) * no[2] + ratio * ne[2]; float norm = static_cast<float>( sqrt(nx * nx + ny * ny + nz * nz) + 1e-5); normal_ptr[0] = nx / norm; normal_ptr[1] = ny / norm; normal_ptr[2] = nz / norm; } if (extract_color) { float* color_ptr = color_indexer.GetDataPtr<float>(idx); float r_o = voxel_ptr->GetR(); float g_o = voxel_ptr->GetG(); float b_o = voxel_ptr->GetB(); float r_e = voxel_ptr_e->GetR(); float g_e = voxel_ptr_e->GetG(); float b_e = voxel_ptr_e->GetB(); color_ptr[0] = ((1 - ratio) * r_o + ratio * r_e) / 255.0f; color_ptr[1] = ((1 - ratio) * g_o + ratio * g_e) / 255.0f; color_ptr[2] = ((1 - ratio) * b_o + ratio * b_e) / 255.0f; } } }); }); // Pass 3: connect vertices and form triangles. int triangle_count = vertex_count * 3; triangles = core::Tensor({triangle_count, 3}, core::Dtype::Int64, block_values.GetDevice()); NDArrayIndexer triangle_indexer(triangles, 1); #if defined(__CUDACC__) count = core::Tensor(std::vector<int>{0}, {}, core::Dtype::Int32, block_values.GetDevice()); count_ptr = count.GetDataPtr<int>(); #else (*count_ptr) = 0; #endif launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE(int64_t widx) { // Natural index (0, N) -> (block_idx, voxel_idx) int64_t workload_block_idx = widx / resolution3; int64_t voxel_idx = widx % resolution3; // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // Obtain voxel's mesh struct ptr int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<int>( xv, yv, zv, workload_block_idx); int table_idx = mesh_struct_ptr[3]; if (tri_count[table_idx] == 0) return; for (size_t tri = 0; tri < 16; tri += 3) { if (tri_table[table_idx][tri] == -1) return; int tri_idx = OPEN3D_ATOMIC_ADD(count_ptr, 1); for (size_t vertex = 0; vertex < 3; ++vertex) { int edge = tri_table[table_idx][tri + vertex]; int64_t xv_i = xv + edge_shifts[edge][0]; int64_t yv_i = yv + edge_shifts[edge][1]; int64_t zv_i = zv + edge_shifts[edge][2]; int64_t edge_i = edge_shifts[edge][3]; int dxb = static_cast<int>(xv_i / resolution); int dyb = static_cast<int>(yv_i / resolution); int dzb = static_cast<int>(zv_i / resolution); int nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9; int64_t block_idx_i = *nb_block_indices_indexer.GetDataPtr<int64_t>( workload_block_idx, nb_idx); int* mesh_struct_ptr_i = mesh_structure_indexer.GetDataPtr<int>( xv_i - dxb * resolution, yv_i - dyb * resolution, zv_i - dzb * resolution, inv_indices_ptr[block_idx_i]); int64_t* triangle_ptr = triangle_indexer.GetDataPtr<int64_t>(tri_idx); triangle_ptr[2 - vertex] = mesh_struct_ptr_i[edge_i]; } } }); #if defined(__CUDACC__) triangle_count = count.Item<int>(); #else triangle_count = (*count_ptr).load(); #endif utility::LogInfo("Total triangle count = {}", triangle_count); triangles = triangles.Slice(0, 0, triangle_count); } #if defined(__CUDACC__) void EstimateRangeCUDA #else void EstimateRangeCPU #endif (const core::Tensor& block_keys, core::Tensor& range_minmax_map, const core::Tensor& intrinsics, const core::Tensor& extrinsics, int h, int w, int down_factor, int64_t block_resolution, float voxel_size, float depth_min, float depth_max) { // TODO(wei): reserve it in a reusable buffer // Every 2 channels: (min, max) int h_down = h / down_factor; int w_down = w / down_factor; range_minmax_map = core::Tensor({h_down, w_down, 2}, core::Dtype::Float32, block_keys.GetDevice()); NDArrayIndexer range_map_indexer(range_minmax_map, 2); // Every 6 channels: (v_min, u_min, v_max, u_max, z_min, z_max) const int fragment_size = 16; const int frag_buffer_size = 65535; // TODO(wei): explicit buffer core::Tensor fragment_buffer = core::Tensor({frag_buffer_size, 6}, core::Dtype::Float32, block_keys.GetDevice()); NDArrayIndexer frag_buffer_indexer(fragment_buffer, 1); NDArrayIndexer block_keys_indexer(block_keys, 1); TransformIndexer w2c_transform_indexer(intrinsics, extrinsics); #if defined(__CUDACC__) core::Tensor count(std::vector<int>{0}, {1}, core::Dtype::Int32, block_keys.GetDevice()); int* count_ptr = count.GetDataPtr<int>(); #else std::atomic<int> count_atomic(0); std::atomic<int>* count_ptr = &count_atomic; #endif #if defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; using std::max; using std::min; #endif // Pass 0: iterate over blocks, fill-in an rendering fragment array launcher.LaunchGeneralKernel( block_keys.GetLength(), [=] OPEN3D_DEVICE(int64_t workload_idx) { int* key = block_keys_indexer.GetDataPtr<int>(workload_idx); int u_min = w_down - 1, v_min = h_down - 1, u_max = 0, v_max = 0; float z_min = depth_max, z_max = depth_min; float xc, yc, zc, u, v; // Project 8 corners to low-res image and form a rectangle for (int i = 0; i < 8; ++i) { float xw = (key[0] + ((i & 1) > 0)) * block_resolution * voxel_size; float yw = (key[1] + ((i & 2) > 0)) * block_resolution * voxel_size; float zw = (key[2] + ((i & 4) > 0)) * block_resolution * voxel_size; w2c_transform_indexer.RigidTransform(xw, yw, zw, &xc, &yc, &zc); if (zc <= 0) continue; // Project to the down sampled image buffer w2c_transform_indexer.Project(xc, yc, zc, &u, &v); u /= down_factor; v /= down_factor; v_min = min(static_cast<int>(floorf(v)), v_min); v_max = max(static_cast<int>(ceilf(v)), v_max); u_min = min(static_cast<int>(floorf(u)), u_min); u_max = max(static_cast<int>(ceilf(u)), u_max); z_min = min(z_min, zc); z_max = max(z_max, zc); } v_min = max(0, v_min); v_max = min(h_down - 1, v_max); u_min = max(0, u_min); u_max = min(w_down - 1, u_max); if (v_min >= v_max || u_min >= u_max || z_min >= z_max) return; // Divide the rectangle into small 16x16 fragments int frag_v_count = ceilf(float(v_max - v_min + 1) / float(fragment_size)); int frag_u_count = ceilf(float(u_max - u_min + 1) / float(fragment_size)); int frag_count = frag_v_count * frag_u_count; int frag_count_start = OPEN3D_ATOMIC_ADD(count_ptr, 1); int frag_count_end = frag_count_start + frag_count; if (frag_count_end >= frag_buffer_size) { printf("Fragment count exceeding buffer size, abort!\n"); } int offset = 0; for (int frag_v = 0; frag_v < frag_v_count; ++frag_v) { for (int frag_u = 0; frag_u < frag_u_count; ++frag_u, ++offset) { float* frag_ptr = frag_buffer_indexer.GetDataPtr<float>( frag_count_start + offset); // zmin, zmax frag_ptr[0] = z_min; frag_ptr[1] = z_max; // vmin, umin frag_ptr[2] = v_min + frag_v * fragment_size; frag_ptr[3] = u_min + frag_u * fragment_size; // vmax, umax frag_ptr[4] = min(frag_ptr[2] + fragment_size - 1, static_cast<float>(v_max)); frag_ptr[5] = min(frag_ptr[3] + fragment_size - 1, static_cast<float>(u_max)); } } }); #if defined(__CUDACC__) int frag_count = count[0].Item<int>(); #else int frag_count = (*count_ptr).load(); #endif // Pass 0.5: Fill in range map to prepare for atomic min/max launcher.LaunchGeneralKernel( h_down * w_down, [=] OPEN3D_DEVICE(int64_t workload_idx) { int v = workload_idx / w_down; int u = workload_idx % w_down; float* range_ptr = range_map_indexer.GetDataPtr<float>(u, v); range_ptr[0] = depth_max; range_ptr[1] = depth_min; }); // Pass 1: iterate over rendering fragment array, fill-in range launcher.LaunchGeneralKernel( frag_count * fragment_size * fragment_size, [=] OPEN3D_DEVICE(int64_t workload_idx) { int frag_idx = workload_idx / (fragment_size * fragment_size); int local_idx = workload_idx % (fragment_size * fragment_size); int dv = local_idx / fragment_size; int du = local_idx % fragment_size; float* frag_ptr = frag_buffer_indexer.GetDataPtr<float>(frag_idx); int v_min = static_cast<int>(frag_ptr[2]); int u_min = static_cast<int>(frag_ptr[3]); int v_max = static_cast<int>(frag_ptr[4]); int u_max = static_cast<int>(frag_ptr[5]); int v = v_min + dv; int u = u_min + du; if (v > v_max || u > u_max) return; float z_min = frag_ptr[0]; float z_max = frag_ptr[1]; float* range_ptr = range_map_indexer.GetDataPtr<float>(u, v); #ifdef __CUDACC__ atomicMinf(&(range_ptr[0]), z_min); atomicMaxf(&(range_ptr[1]), z_max); #else #pragma omp critical { range_ptr[0] = min(z_min, range_ptr[0]); range_ptr[1] = max(z_max, range_ptr[1]); } #endif }); #if defined(__CUDACC__) OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); #endif } struct BlockCache { int x; int y; int z; int block_idx; inline int OPEN3D_DEVICE Check(int xin, int yin, int zin) { return (xin == x && yin == y && zin == z) ? block_idx : -1; } inline void OPEN3D_DEVICE Update(int xin, int yin, int zin, int block_idx_in) { x = xin; y = yin; z = zin; block_idx = block_idx_in; } }; #if defined(__CUDACC__) void RayCastCUDA #else void RayCastCPU #endif (std::shared_ptr<core::DeviceHashmap>& hashmap, const core::Tensor& block_values, const core::Tensor& range_map, core::Tensor& vertex_map, core::Tensor& depth_map, core::Tensor& color_map, core::Tensor& normal_map, const core::Tensor& intrinsics, const core::Tensor& extrinsics, int h, int w, int64_t block_resolution, float voxel_size, float sdf_trunc, float depth_scale, float depth_min, float depth_max, float weight_threshold) { using Key = core::Block<int, 3>; using Hash = core::BlockHash<int, 3>; #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__) auto cuda_hashmap = std::dynamic_pointer_cast<core::StdGPUHashmap<Key, Hash>>(hashmap); if (cuda_hashmap == nullptr) { utility::LogError( "Unsupported backend: CUDA raycasting only supports STDGPU."); } auto hashmap_impl = cuda_hashmap->GetImpl(); #else auto cpu_hashmap = std::dynamic_pointer_cast<core::TBBHashmap<Key, Hash>>(hashmap); auto hashmap_impl = *cpu_hashmap->GetImpl(); #endif NDArrayIndexer voxel_block_buffer_indexer(block_values, 4); NDArrayIndexer range_map_indexer(range_map, 2); NDArrayIndexer vertex_map_indexer; NDArrayIndexer depth_map_indexer; NDArrayIndexer color_map_indexer; NDArrayIndexer normal_map_indexer; bool enable_vertex = (vertex_map.GetLength() != 0); bool enable_depth = (depth_map.GetLength() != 0); bool enable_color = (color_map.GetLength() != 0); bool enable_normal = (normal_map.GetLength() != 0); if (!enable_vertex && !enable_depth && !enable_color && !enable_normal) { utility::LogWarning("No output specified for ray casting, exit."); return; } if (enable_vertex) { vertex_map_indexer = NDArrayIndexer(vertex_map, 2); } if (enable_depth) { depth_map_indexer = NDArrayIndexer(depth_map, 2); } if (enable_color) { color_map_indexer = NDArrayIndexer(color_map, 2); } if (enable_normal) { normal_map_indexer = NDArrayIndexer(normal_map, 2); } TransformIndexer c2w_transform_indexer( intrinsics, t::geometry::InverseTransformation(extrinsics)); TransformIndexer w2c_transform_indexer(intrinsics, extrinsics); int64_t rows = h; int64_t cols = w; float block_size = voxel_size * block_resolution; #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; using std::max; #endif DISPATCH_BYTESIZE_TO_VOXEL(voxel_block_buffer_indexer.ElementByteSize(), [&]() { launcher.LaunchGeneralKernel( rows * cols, [=] OPEN3D_DEVICE(int64_t workload_idx) { auto GetVoxelAtP = [&] OPEN3D_DEVICE( int x_b, int y_b, int z_b, int x_v, int y_v, int z_v, core::addr_t block_addr, BlockCache& cache) -> voxel_t* { int x_vn = (x_v + block_resolution) % block_resolution; int y_vn = (y_v + block_resolution) % block_resolution; int z_vn = (z_v + block_resolution) % block_resolution; int dx_b = Sign(x_v - x_vn); int dy_b = Sign(y_v - y_vn); int dz_b = Sign(z_v - z_vn); if (dx_b == 0 && dy_b == 0 && dz_b == 0) { return voxel_block_buffer_indexer .GetDataPtr<voxel_t>(x_v, y_v, z_v, block_addr); } else { Key key; key.Set(0, x_b + dx_b); key.Set(1, y_b + dy_b); key.Set(2, z_b + dz_b); int block_addr = cache.Check(key.Get(0), key.Get(1), key.Get(2)); if (block_addr < 0) { auto iter = hashmap_impl.find(key); if (iter == hashmap_impl.end()) return nullptr; block_addr = iter->second; cache.Update(key.Get(0), key.Get(1), key.Get(2), block_addr); } return voxel_block_buffer_indexer .GetDataPtr<voxel_t>(x_vn, y_vn, z_vn, block_addr); } }; auto GetVoxelAtT = [&] OPEN3D_DEVICE( float x_o, float y_o, float z_o, float x_d, float y_d, float z_d, float t, BlockCache& cache) -> voxel_t* { float x_g = x_o + t * x_d; float y_g = y_o + t * y_d; float z_g = z_o + t * z_d; // Block coordinate and look up int x_b = static_cast<int>(floorf(x_g / block_size)); int y_b = static_cast<int>(floorf(y_g / block_size)); int z_b = static_cast<int>(floorf(z_g / block_size)); Key key; key.Set(0, x_b); key.Set(1, y_b); key.Set(2, z_b); int block_addr = cache.Check(x_b, y_b, z_b); if (block_addr < 0) { auto iter = hashmap_impl.find(key); if (iter == hashmap_impl.end()) return nullptr; block_addr = iter->second; cache.Update(x_b, y_b, z_b, block_addr); } // Voxel coordinate and look up int x_v = int((x_g - x_b * block_size) / voxel_size); int y_v = int((y_g - y_b * block_size) / voxel_size); int z_v = int((z_g - z_b * block_size) / voxel_size); return voxel_block_buffer_indexer.GetDataPtr<voxel_t>( x_v, y_v, z_v, block_addr); }; int64_t y = workload_idx / cols; int64_t x = workload_idx % cols; float *depth_ptr = nullptr, *vertex_ptr = nullptr, *normal_ptr = nullptr, *color_ptr = nullptr; if (enable_depth) { depth_ptr = depth_map_indexer.GetDataPtr<float>(x, y); *depth_ptr = 0; } if (enable_vertex) { vertex_ptr = vertex_map_indexer.GetDataPtr<float>(x, y); vertex_ptr[0] = 0; vertex_ptr[1] = 0; vertex_ptr[2] = 0; } if (enable_color) { color_ptr = color_map_indexer.GetDataPtr<float>(x, y); color_ptr[0] = 0; color_ptr[1] = 0; color_ptr[2] = 0; } if (enable_normal) { normal_ptr = normal_map_indexer.GetDataPtr<float>(x, y); normal_ptr[0] = 0; normal_ptr[1] = 0; normal_ptr[2] = 0; } const float* range = range_map_indexer.GetDataPtr<float>(x / 8, y / 8); float t = range[0]; const float t_max = range[1]; if (t >= t_max) return; // Coordinates in camera and global float x_c = 0, y_c = 0, z_c = 0; float x_g = 0, y_g = 0, z_g = 0; float x_o = 0, y_o = 0, z_o = 0; // Iterative ray intersection check float t_prev = t; float tsdf_prev = -1.0f; float tsdf = 1.0; float w = 0.0; // Camera origin c2w_transform_indexer.RigidTransform(0, 0, 0, &x_o, &y_o, &z_o); // Direction c2w_transform_indexer.Unproject(static_cast<float>(x), static_cast<float>(y), 1.0f, &x_c, &y_c, &z_c); c2w_transform_indexer.RigidTransform(x_c, y_c, z_c, &x_g, &y_g, &z_g); float x_d = (x_g - x_o); float y_d = (y_g - y_o); float z_d = (z_g - z_o); BlockCache cache{0, 0, 0, -1}; bool surface_found = false; while (t < t_max) { voxel_t* voxel_ptr = GetVoxelAtT(x_o, y_o, z_o, x_d, y_d, z_d, t, cache); if (!voxel_ptr) { t_prev = t; t += block_size; } else { tsdf_prev = tsdf; tsdf = voxel_ptr->GetTSDF(); w = voxel_ptr->GetWeight(); if (tsdf_prev > 0 && w >= weight_threshold && tsdf <= 0) { surface_found = true; break; } t_prev = t; float delta = tsdf * sdf_trunc; t += delta < voxel_size ? voxel_size : delta; } } if (surface_found) { float t_intersect = (t * tsdf_prev - t_prev * tsdf) / (tsdf_prev - tsdf); x_g = x_o + t_intersect * x_d; y_g = y_o + t_intersect * y_d; z_g = z_o + t_intersect * z_d; // Trivial vertex assignment if (enable_depth) { *depth_ptr = t_intersect * depth_scale; } if (enable_vertex) { w2c_transform_indexer.RigidTransform( x_g, y_g, z_g, vertex_ptr + 0, vertex_ptr + 1, vertex_ptr + 2); } // Trilinear interpolation // TODO(wei): simplify the flow by splitting the // functions given what is enabled if (enable_color || enable_normal) { int x_b = static_cast<int>(floorf(x_g / block_size)); int y_b = static_cast<int>(floorf(y_g / block_size)); int z_b = static_cast<int>(floorf(z_g / block_size)); float x_v = (x_g - float(x_b) * block_size) / voxel_size; float y_v = (y_g - float(y_b) * block_size) / voxel_size; float z_v = (z_g - float(z_b) * block_size) / voxel_size; Key key; key.Set(0, x_b); key.Set(1, y_b); key.Set(2, z_b); int block_addr = cache.Check(x_b, y_b, z_b); if (block_addr < 0) { auto iter = hashmap_impl.find(key); if (iter == hashmap_impl.end()) return; block_addr = iter->second; cache.Update(x_b, y_b, z_b, block_addr); } int x_v_floor = static_cast<int>(floorf(x_v)); int y_v_floor = static_cast<int>(floorf(y_v)); int z_v_floor = static_cast<int>(floorf(z_v)); float ratio_x = x_v - float(x_v_floor); float ratio_y = y_v - float(y_v_floor); float ratio_z = z_v - float(z_v_floor); float sum_weight_color = 0.0; float sum_weight_normal = 0.0; for (int k = 0; k < 8; ++k) { int dx_v = (k & 1) > 0 ? 1 : 0; int dy_v = (k & 2) > 0 ? 1 : 0; int dz_v = (k & 4) > 0 ? 1 : 0; float ratio = (dx_v * (ratio_x) + (1 - dx_v) * (1 - ratio_x)) * (dy_v * (ratio_y) + (1 - dy_v) * (1 - ratio_y)) * (dz_v * (ratio_z) + (1 - dz_v) * (1 - ratio_z)); voxel_t* voxel_ptr_k = GetVoxelAtP( x_b, y_b, z_b, x_v_floor + dx_v, y_v_floor + dy_v, z_v_floor + dz_v, block_addr, cache); if (enable_color && voxel_ptr_k && voxel_ptr_k->GetWeight() > 0) { sum_weight_color += ratio; color_ptr[0] += ratio * voxel_ptr_k->GetR(); color_ptr[1] += ratio * voxel_ptr_k->GetG(); color_ptr[2] += ratio * voxel_ptr_k->GetB(); } if (enable_normal) { for (int dim = 0; dim < 3; ++dim) { voxel_t* voxel_ptr_k_plus = GetVoxelAtP( x_b, y_b, z_b, x_v_floor + dx_v + (dim == 0), y_v_floor + dy_v + (dim == 1), z_v_floor + dz_v + (dim == 2), block_addr, cache); voxel_t* voxel_ptr_k_minus = GetVoxelAtP(x_b, y_b, z_b, x_v_floor + dx_v - (dim == 0), y_v_floor + dy_v - (dim == 1), z_v_floor + dz_v - (dim == 2), block_addr, cache); bool valid = false; if (voxel_ptr_k_plus && voxel_ptr_k_plus->GetWeight() > 0) { normal_ptr[dim] += ratio * voxel_ptr_k_plus ->GetTSDF() / (2 * voxel_size); valid = true; } if (voxel_ptr_k_minus && voxel_ptr_k_minus->GetWeight() > 0) { normal_ptr[dim] -= ratio * voxel_ptr_k_minus ->GetTSDF() / (2 * voxel_size); valid = true; } sum_weight_normal += valid ? ratio : 0; } } // if (enable_normal) } // loop over 8 neighbors if (enable_color && sum_weight_color > 0) { sum_weight_color *= 255.0; color_ptr[0] /= sum_weight_color; color_ptr[1] /= sum_weight_color; color_ptr[2] /= sum_weight_color; } if (enable_normal && sum_weight_normal > 0) { normal_ptr[0] /= sum_weight_normal; normal_ptr[1] /= sum_weight_normal; normal_ptr[2] /= sum_weight_normal; float norm = sqrt(normal_ptr[0] * normal_ptr[0] + normal_ptr[1] * normal_ptr[1] + normal_ptr[2] * normal_ptr[2]); w2c_transform_indexer.Rotate( normal_ptr[0] / norm, normal_ptr[1] / norm, normal_ptr[2] / norm, normal_ptr + 0, normal_ptr + 1, normal_ptr + 2); } } // if (color or normal) } // if (tsdf < 0) }); }); #if defined(__CUDACC__) OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); #endif } } // namespace tsdf } // namespace kernel } // namespace geometry } // namespace t } // namespace open3d
pr67502.c
/* PR c/67502 */ /* { dg-do compile } */ /* { dg-options "-fopenmp" } */ /* { dg-additional-options "-std=c99" { target c } } */ void bar (int, int); void foo (void) { #pragma omp parallel #pragma omp for simd collapse(2) for (int i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) bar (i, j); }
convolution_3x3_pack4_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); bottom_blob_tm.create(tiles, 64, inch, 4u * elempack, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][4]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const unsigned short* r0 = img0.row<const unsigned short>(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4)); float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8)); float32x4_t _r03 = vcvt_f32_bf16(vld1_u16(r0 + 12)); float32x4_t _r04 = vcvt_f32_bf16(vld1_u16(r0 + 16)); float32x4_t _r05 = vcvt_f32_bf16(vld1_u16(r0 + 20)); float32x4_t _r06 = vcvt_f32_bf16(vld1_u16(r0 + 24)); float32x4_t _r07 = vcvt_f32_bf16(vld1_u16(r0 + 28)); float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f); float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[7][m], _tmp7m); // tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; // tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f); // float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); // float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); // tmp[1][m] = tmp12a + tmp12b; // tmp[2][m] = tmp12a - tmp12b; float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); // float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); // float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); // tmp[3][m] = tmp34a + tmp34b; // tmp[4][m] = tmp34a - tmp34b; float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); // float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); // float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(tmp[5][m], _tmp5m); vst1q_f32(tmp[6][m], _tmp6m); // tmp[5][m] = tmp56a + tmp56b; // tmp[6][m] = tmp56a - tmp56b; r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 8 + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; float* r0_tm_6 = r0_tm_0 + tiles * 24; float* r0_tm_7 = r0_tm_0 + tiles * 28; for (int m = 0; m < 8; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f); float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f); // r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; // r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f); // float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); // float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25); float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b); // r0_tm[1] = tmp12a + tmp12b; // r0_tm[2] = tmp12a - tmp12b; float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); // float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); // float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b); // r0_tm[3] = tmp34a + tmp34b; // r0_tm[4] = tmp34a - tmp34b; float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); // float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); // float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b); // r0_tm[5] = tmp56a + tmp56b; // r0_tm[6] = tmp56a - tmp56b; vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); vst1q_f32(r0_tm_6, _r0tm6); vst1q_f32(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 32; r0_tm_1 += tiles * 32; r0_tm_2 += tiles * 32; r0_tm_3 += tiles * 32; r0_tm_4 += tiles * 32; r0_tm_5 += tiles * 32; r0_tm_6 += tiles * 32; r0_tm_7 += tiles * 32; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __aarch64__ if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator); #else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { float* tm2p = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v8.4s}, [%1], #16 \n" "sub %0, %0, #128 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v9.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v10.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v11.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); r0 += bottom_blob_tm.cstep * 4; } } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8); #else float* tm2p = tm2.row(i / 8); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" "sub %0, %0, #64 \n" "st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); #else asm volatile( "pld [%0, #512] \n" "vldm %0!, {d0-d7} \n" "pld [%0, #512] \n" "vldm %0, {d16-d23} \n" // transpose 8x4 "vtrn.32 q0, q1 \n" "vtrn.32 q2, q3 \n" "vtrn.32 q8, q9 \n" "vtrn.32 q10, q11 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vswp d17, d20 \n" "vswp d19, d22 \n" "vswp q1, q8 \n" "vswp q3, q10 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "sub %0, %0, #64 \n" "vst1.f32 {d4-d7}, [%1 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"); #endif r0 += bottom_blob_tm.cstep * 4; } } for (; i + 3 < tiles; i += 4) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); #else asm volatile( "pld [%0, #512] \n" "vldm %0, {d0-d7} \n" "vstm %1!, {d0-d7} \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i + 1 < tiles; i += 2) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.4s, v1.4s}, [%0] \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1"); #else asm volatile( "pld [%0, #256] \n" "vld1.f32 {d0-d3}, [%0 :128] \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i < tiles; i++) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 4u * elempack, elempack, opt.workspace_allocator); int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ int nn_outch = 0; nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p + 1); const Mat kernel01_tm = kernel_tm.channel(pp); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w0011_01 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "fmla v20.4s, v5.4s, v0.s[0] \n" "fmla v21.4s, v5.4s, v0.s[1] \n" "fmla v22.4s, v5.4s, v0.s[2] \n" "fmla v23.4s, v5.4s, v0.s[3] \n" "fmla v24.4s, v5.4s, v1.s[0] \n" "fmla v25.4s, v5.4s, v1.s[1] \n" "fmla v26.4s, v5.4s, v1.s[2] \n" "fmla v27.4s, v5.4s, v1.s[3] \n" "fmla v28.4s, v5.4s, v2.s[0] \n" "fmla v29.4s, v5.4s, v2.s[1] \n" "fmla v30.4s, v5.4s, v2.s[2] \n" "fmla v31.4s, v5.4s, v2.s[3] \n" "fmla v8.4s, v6.4s, v3.s[0] \n" "fmla v9.4s, v6.4s, v3.s[1] \n" "fmla v10.4s, v6.4s, v3.s[2] \n" "fmla v11.4s, v6.4s, v3.s[3] \n" "fmla v20.4s, v7.4s, v3.s[0] \n" "fmla v21.4s, v7.4s, v3.s[1] \n" "fmla v22.4s, v7.4s, v3.s[2] \n" "fmla v23.4s, v7.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v12.4s, v6.4s, v0.s[0] \n" "fmla v13.4s, v6.4s, v0.s[1] \n" "fmla v14.4s, v6.4s, v0.s[2] \n" "fmla v15.4s, v6.4s, v0.s[3] \n" "fmla v16.4s, v6.4s, v1.s[0] \n" "fmla v17.4s, v6.4s, v1.s[1] \n" "fmla v18.4s, v6.4s, v1.s[2] \n" "fmla v19.4s, v6.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v0.s[0] \n" "fmla v25.4s, v7.4s, v0.s[1] \n" "fmla v26.4s, v7.4s, v0.s[2] \n" "fmla v27.4s, v7.4s, v0.s[3] \n" "fmla v28.4s, v7.4s, v1.s[0] \n" "fmla v29.4s, v7.4s, v1.s[1] \n" "fmla v30.4s, v7.4s, v1.s[2] \n" "fmla v31.4s, v7.4s, v1.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w2233_01 "fmla v8.4s, v4.4s, v2.s[0] \n" "fmla v9.4s, v4.4s, v2.s[1] \n" "fmla v10.4s, v4.4s, v2.s[2] \n" "fmla v11.4s, v4.4s, v2.s[3] \n" "fmla v12.4s, v4.4s, v3.s[0] \n" "fmla v13.4s, v4.4s, v3.s[1] \n" "fmla v14.4s, v4.4s, v3.s[2] \n" "fmla v15.4s, v4.4s, v3.s[3] \n" "fmla v20.4s, v5.4s, v2.s[0] \n" "fmla v21.4s, v5.4s, v2.s[1] \n" "fmla v22.4s, v5.4s, v2.s[2] \n" "fmla v23.4s, v5.4s, v2.s[3] \n" "fmla v24.4s, v5.4s, v3.s[0] \n" "fmla v25.4s, v5.4s, v3.s[1] \n" "fmla v26.4s, v5.4s, v3.s[2] \n" "fmla v27.4s, v5.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v16.4s, v4.4s, v0.s[0] \n" "fmla v17.4s, v4.4s, v0.s[1] \n" "fmla v18.4s, v4.4s, v0.s[2] \n" "fmla v19.4s, v4.4s, v0.s[3] \n" "fmla v28.4s, v5.4s, v0.s[0] \n" "fmla v29.4s, v5.4s, v0.s[1] \n" "fmla v30.4s, v5.4s, v0.s[2] \n" "fmla v31.4s, v5.4s, v0.s[3] \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v9.4s, v6.4s, v1.s[1] \n" "fmla v10.4s, v6.4s, v1.s[2] \n" "fmla v11.4s, v6.4s, v1.s[3] \n" "fmla v12.4s, v6.4s, v2.s[0] \n" "fmla v13.4s, v6.4s, v2.s[1] \n" "fmla v14.4s, v6.4s, v2.s[2] \n" "fmla v15.4s, v6.4s, v2.s[3] \n" "fmla v16.4s, v6.4s, v3.s[0] \n" "fmla v17.4s, v6.4s, v3.s[1] \n" "fmla v18.4s, v6.4s, v3.s[2] \n" "fmla v19.4s, v6.4s, v3.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v20.4s, v7.4s, v1.s[0] \n" "fmla v21.4s, v7.4s, v1.s[1] \n" "fmla v22.4s, v7.4s, v1.s[2] \n" "fmla v23.4s, v7.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v2.s[0] \n" "fmla v25.4s, v7.4s, v2.s[1] \n" "fmla v26.4s, v7.4s, v2.s[2] \n" "fmla v27.4s, v7.4s, v2.s[3] \n" "fmla v28.4s, v7.4s, v3.s[0] \n" "fmla v29.4s, v7.4s, v3.s[1] \n" "fmla v30.4s, v7.4s, v3.s[2] \n" "fmla v31.4s, v7.4s, v3.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r4 r5 r6 r7 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v24.4s, v9.4s, v0.s[0] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v26.4s, v9.4s, v2.s[0] \n" "fmla v27.4s, v9.4s, v3.s[0] \n" "fmla v28.4s, v9.4s, v4.s[0] \n" "fmla v29.4s, v9.4s, v5.s[0] \n" "fmla v30.4s, v9.4s, v6.s[0] \n" "fmla v31.4s, v9.4s, v7.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v10.4s, v4.s[1] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v6.s[1] \n" "fmla v23.4s, v10.4s, v7.s[1] \n" "fmla v24.4s, v11.4s, v0.s[1] \n" "fmla v25.4s, v11.4s, v1.s[1] \n" "fmla v26.4s, v11.4s, v2.s[1] \n" "fmla v27.4s, v11.4s, v3.s[1] \n" "fmla v28.4s, v11.4s, v4.s[1] \n" "fmla v29.4s, v11.4s, v5.s[1] \n" "fmla v30.4s, v11.4s, v6.s[1] \n" "fmla v31.4s, v11.4s, v7.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v12.4s, v4.s[2] \n" "fmla v21.4s, v12.4s, v5.s[2] \n" "fmla v22.4s, v12.4s, v6.s[2] \n" "fmla v23.4s, v12.4s, v7.s[2] \n" "fmla v24.4s, v13.4s, v0.s[2] \n" "fmla v25.4s, v13.4s, v1.s[2] \n" "fmla v26.4s, v13.4s, v2.s[2] \n" "fmla v27.4s, v13.4s, v3.s[2] \n" "fmla v28.4s, v13.4s, v4.s[2] \n" "fmla v29.4s, v13.4s, v5.s[2] \n" "fmla v30.4s, v13.4s, v6.s[2] \n" "fmla v31.4s, v13.4s, v7.s[2] \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v14.4s, v4.s[3] \n" "fmla v21.4s, v14.4s, v5.s[3] \n" "fmla v22.4s, v14.4s, v6.s[3] \n" "fmla v23.4s, v14.4s, v7.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.4s, v15.4s, v0.s[3] \n" "fmla v25.4s, v15.4s, v1.s[3] \n" "fmla v26.4s, v15.4s, v2.s[3] \n" "fmla v27.4s, v15.4s, v3.s[3] \n" "fmla v28.4s, v15.4s, v4.s[3] \n" "fmla v29.4s, v15.4s, v5.s[3] \n" "fmla v30.4s, v15.4s, v6.s[3] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v20.4s, v9.4s, v0.s[0] \n" "fmla v21.4s, v9.4s, v1.s[0] \n" "fmla v22.4s, v9.4s, v2.s[0] \n" "fmla v23.4s, v9.4s, v3.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v11.4s, v0.s[1] \n" "fmla v21.4s, v11.4s, v1.s[1] \n" "fmla v22.4s, v11.4s, v2.s[1] \n" "fmla v23.4s, v11.4s, v3.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v13.4s, v0.s[2] \n" "fmla v21.4s, v13.4s, v1.s[2] \n" "fmla v22.4s, v13.4s, v2.s[2] \n" "fmla v23.4s, v13.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v15.4s, v0.s[3] \n" "fmla v21.4s, v15.4s, v1.s[3] \n" "fmla v22.4s, v15.4s, v2.s[3] \n" "fmla v23.4s, v15.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4s, v1.4s}, [%3], #32 \n" // r0 r1 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v1.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v11.4s, v0.s[1] \n" "fmla v19.4s, v11.4s, v1.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v13.4s, v0.s[2] \n" "fmla v19.4s, v13.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v15.4s, v0.s[3] \n" "fmla v19.4s, v15.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" "st1 {v18.4s, v19.4s}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "0: \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4s}, [%3], #16 \n" // r0 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v11.4s, v0.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v13.4s, v0.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v15.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v17.4s}, [%2], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17"); } } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); #if __aarch64__ const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2); #else const Mat kernel0_tm = kernel_tm.channel(p); #endif for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // w0123_0 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "fmla v8.4s, v5.4s, v3.s[0] \n" "fmla v9.4s, v5.4s, v3.s[1] \n" "fmla v10.4s, v5.4s, v3.s[2] \n" "fmla v11.4s, v5.4s, v3.s[3] \n" "fmla v12.4s, v5.4s, v20.s[0] \n" "fmla v13.4s, v5.4s, v20.s[1] \n" "fmla v14.4s, v5.4s, v20.s[2] \n" "fmla v15.4s, v5.4s, v20.s[3] \n" "fmla v16.4s, v5.4s, v21.s[0] \n" "fmla v17.4s, v5.4s, v21.s[1] \n" "fmla v18.4s, v5.4s, v21.s[2] \n" "fmla v19.4s, v5.4s, v21.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "fmla v8.4s, v6.4s, v22.s[0] \n" "fmla v9.4s, v6.4s, v22.s[1] \n" "fmla v10.4s, v6.4s, v22.s[2] \n" "fmla v11.4s, v6.4s, v22.s[3] \n" "fmla v12.4s, v6.4s, v23.s[0] \n" "fmla v13.4s, v6.4s, v23.s[1] \n" "fmla v14.4s, v6.4s, v23.s[2] \n" "fmla v15.4s, v6.4s, v23.s[3] \n" "fmla v16.4s, v6.4s, v24.s[0] \n" "fmla v17.4s, v6.4s, v24.s[1] \n" "fmla v18.4s, v6.4s, v24.s[2] \n" "fmla v19.4s, v6.4s, v24.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v7.4s, v25.s[0] \n" "fmla v9.4s, v7.4s, v25.s[1] \n" "fmla v10.4s, v7.4s, v25.s[2] \n" "fmla v11.4s, v7.4s, v25.s[3] \n" "fmla v12.4s, v7.4s, v26.s[0] \n" "fmla v13.4s, v7.4s, v26.s[1] \n" "fmla v14.4s, v7.4s, v26.s[2] \n" "fmla v15.4s, v7.4s, v26.s[3] \n" "fmla v16.4s, v7.4s, v27.s[0] \n" "fmla v17.4s, v7.4s, v27.s[1] \n" "fmla v18.4s, v7.4s, v27.s[2] \n" "fmla v19.4s, v7.4s, v27.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8); #else const float* r0 = bb2.row(i / 8); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r4 r5 r6 r7 "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v20.4s, v9.4s, v4.s[1] \n" "fmla v21.4s, v9.4s, v5.s[1] \n" "fmla v22.4s, v9.4s, v6.s[1] \n" "fmla v23.4s, v9.4s, v7.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "fmla v20.4s, v10.4s, v4.s[2] \n" "fmla v21.4s, v10.4s, v5.s[2] \n" "fmla v22.4s, v10.4s, v6.s[2] \n" "fmla v23.4s, v10.4s, v7.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "fmla v20.4s, v11.4s, v4.s[3] \n" "fmla v21.4s, v11.4s, v5.s[3] \n" "fmla v22.4s, v11.4s, v6.s[3] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "veor q12, q12 \n" "veor q13, q13 \n" "veor q14, q14 \n" "veor q15, q15 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q12, q4, d2[0] \n" "vmla.f32 q13, q4, d2[1] \n" "vmla.f32 q14, q4, d3[0] \n" "vmla.f32 q15, q4, d3[1] \n" "vmla.f32 q8, q5, d4[0] \n" "vmla.f32 q9, q5, d4[1] \n" "vmla.f32 q10, q5, d5[0] \n" "vmla.f32 q11, q5, d5[1] \n" "vmla.f32 q12, q5, d6[0] \n" "vmla.f32 q13, q5, d6[1] \n" "vmla.f32 q14, q5, d7[0] \n" "vmla.f32 q15, q5, d7[1] \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "vmla.f32 q8, q6, d0[0] \n" "vmla.f32 q9, q6, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q6, d1[1] \n" "vmla.f32 q12, q6, d2[0] \n" "vmla.f32 q13, q6, d2[1] \n" "vmla.f32 q14, q6, d3[0] \n" "vmla.f32 q15, q6, d3[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d4[0] \n" "vmla.f32 q9, q7, d4[1] \n" "vmla.f32 q10, q7, d5[0] \n" "vmla.f32 q11, q7, d5[1] \n" "vmla.f32 q12, q7, d6[0] \n" "vmla.f32 q13, q7, d6[1] \n" "vmla.f32 q14, q7, d7[0] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" "vstm %1!, {d24-d31} \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; i + 3 < tiles; i += 4) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q10, q4, d4[0] \n" "vmla.f32 q11, q4, d6[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d4[1] \n" "vmla.f32 q11, q5, d6[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d7[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "vmla.f32 q10, q7, d5[1] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif } for (; i + 1 < tiles; i += 2) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4s, v1.4s}, [%2], #32 \n" // r0 r1 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "0: \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "bne 0b \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9"); #endif } for (; i < tiles; i++) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" // r0 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16"); #else asm volatile( "veor q8, q8 \n" "0: \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "vst1.f32 {d16-d17}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8"); #endif } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); float tmp[6][8][4]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 8 + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 8; const float* output0_tm_3 = output0_tm_0 + tiles * 12; const float* output0_tm_4 = output0_tm_0 + tiles * 16; const float* output0_tm_5 = output0_tm_0 + tiles * 20; const float* output0_tm_6 = output0_tm_0 + tiles * 24; const float* output0_tm_7 = output0_tm_0 + tiles * 28; unsigned short* output0 = out0.row<unsigned short>(i * 6) + (j * 6) * 4; // TODO neon optimize for (int m = 0; m < 8; m++) { float32x4_t _out0tm0 = vld1q_f32(output0_tm_0); float32x4_t _out0tm1 = vld1q_f32(output0_tm_1); float32x4_t _out0tm2 = vld1q_f32(output0_tm_2); float32x4_t _out0tm3 = vld1q_f32(output0_tm_3); float32x4_t _out0tm4 = vld1q_f32(output0_tm_4); float32x4_t _out0tm5 = vld1q_f32(output0_tm_5); float32x4_t _out0tm6 = vld1q_f32(output0_tm_6); float32x4_t _out0tm7 = vld1q_f32(output0_tm_7); float32x4_t _tmp024a = vaddq_f32(_out0tm1, _out0tm2); float32x4_t _tmp135a = vsubq_f32(_out0tm1, _out0tm2); // float tmp024a = output0_tm[1] + output0_tm[2]; // float tmp135a = output0_tm[1] - output0_tm[2]; float32x4_t _tmp024b = vaddq_f32(_out0tm3, _out0tm4); float32x4_t _tmp135b = vsubq_f32(_out0tm3, _out0tm4); // float tmp024b = output0_tm[3] + output0_tm[4]; // float tmp135b = output0_tm[3] - output0_tm[4]; float32x4_t _tmp024c = vaddq_f32(_out0tm5, _out0tm6); float32x4_t _tmp135c = vsubq_f32(_out0tm5, _out0tm6); // float tmp024c = output0_tm[5] + output0_tm[6]; // float tmp135c = output0_tm[5] - output0_tm[6]; float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f)); float32x4_t _tmp2m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float32x4_t _tmp4m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[4][m], _tmp4m); // tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; // tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; // tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; float32x4_t _tmp1m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float32x4_t _tmp3m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float32x4_t _tmp5m = vaddq_f32(vaddq_f32(_out0tm7, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f)); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[5][m], _tmp5m); // tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; // tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; // tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 32; output0_tm_1 += tiles * 32; output0_tm_2 += tiles * 32; output0_tm_3 += tiles * 32; output0_tm_4 += tiles * 32; output0_tm_5 += tiles * 32; output0_tm_6 += tiles * 32; output0_tm_7 += tiles * 32; } for (int m = 0; m < 6; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _tmp024a = vaddq_f32(_tmp01, _tmp02); float32x4_t _tmp135a = vsubq_f32(_tmp01, _tmp02); // float tmp024a = tmp0[1] + tmp0[2]; // float tmp135a = tmp0[1] - tmp0[2]; float32x4_t _tmp024b = vaddq_f32(_tmp03, _tmp04); float32x4_t _tmp135b = vsubq_f32(_tmp03, _tmp04); // float tmp024b = tmp0[3] + tmp0[4]; // float tmp135b = tmp0[3] - tmp0[4]; float32x4_t _tmp024c = vaddq_f32(_tmp05, _tmp06); float32x4_t _tmp135c = vsubq_f32(_tmp05, _tmp06); // float tmp024c = tmp0[5] + tmp0[6]; // float tmp135c = tmp0[5] - tmp0[6]; float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f))); float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float32x4_t _out04 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); vst1_u16(output0, vcvt_bf16_f32(_out00)); vst1_u16(output0 + 8, vcvt_bf16_f32(_out02)); vst1_u16(output0 + 16, vcvt_bf16_f32(_out04)); // output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; // output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; // output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float32x4_t _out05 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp07, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f))); vst1_u16(output0 + 4, vcvt_bf16_f32(_out01)); vst1_u16(output0 + 12, vcvt_bf16_f32(_out03)); vst1_u16(output0 + 20, vcvt_bf16_f32(_out05)); // output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; // output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; // output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd42_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; bottom_blob_tm.create(tiles, 36, inch, 4u * elempack, elempack, opt.workspace_allocator); // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[6][6][4]; // tile for (int i = 0; i < h_tm / 6; i++) { for (int j = 0; j < w_tm / 6; j++) { const unsigned short* r0 = img0.row<const unsigned short>(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4)); float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8)); float32x4_t _r03 = vcvt_f32_bf16(vld1_u16(r0 + 12)); float32x4_t _r04 = vcvt_f32_bf16(vld1_u16(r0 + 16)); float32x4_t _r05 = vcvt_f32_bf16(vld1_u16(r0 + 20)); float32x4_t _tmp0m = vmlsq_n_f32(vmlaq_n_f32(_r04, _r00, 4.f), _r02, 5.f); float32x4_t _tmp1m = vmlsq_n_f32(vaddq_f32(_r04, _r03), vaddq_f32(_r01, _r02), 4.f); float32x4_t _tmp2m = vmlaq_n_f32(vsubq_f32(_r04, _r03), vsubq_f32(_r01, _r02), 4.f); float32x4_t _tmp3m = vmlsq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f); float32x4_t _tmp4m = vmlaq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f); float32x4_t _tmp5m = vmlsq_n_f32(vmlaq_n_f32(_r05, _r01, 4.f), _r03, 5.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); vst1q_f32(tmp[5][m], _tmp5m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 6 + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; for (int m = 0; m < 6; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _r0tm0 = vmlsq_n_f32(vmlaq_n_f32(_tmp04, _tmp00, 4.f), _tmp02, 5.f); float32x4_t _r0tm1 = vmlsq_n_f32(vaddq_f32(_tmp04, _tmp03), vaddq_f32(_tmp01, _tmp02), 4.f); float32x4_t _r0tm2 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp03), vsubq_f32(_tmp01, _tmp02), 4.f); float32x4_t _r0tm3 = vmlsq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f); float32x4_t _r0tm4 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f); float32x4_t _r0tm5 = vmlsq_n_f32(vmlaq_n_f32(_tmp05, _tmp01, 4.f), _tmp03, 5.f); vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 24; r0_tm_1 += tiles * 24; r0_tm_2 += tiles * 24; r0_tm_3 += tiles * 24; r0_tm_4 += tiles * 24; r0_tm_5 += tiles * 24; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, 4u * elempack, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __aarch64__ if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 4u * elempack, elempack, opt.workspace_allocator); #else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 4u * elempack, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { float* tm2p = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v8.4s}, [%1], #16 \n" "sub %0, %0, #128 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v9.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v10.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v11.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); r0 += bottom_blob_tm.cstep * 4; } } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8); #else float* tm2p = tm2.row(i / 8); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" "sub %0, %0, #64 \n" "st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); #else asm volatile( "pld [%0, #512] \n" "vldm %0!, {d0-d7} \n" "pld [%0, #512] \n" "vldm %0, {d16-d23} \n" // transpose 8x4 "vtrn.32 q0, q1 \n" "vtrn.32 q2, q3 \n" "vtrn.32 q8, q9 \n" "vtrn.32 q10, q11 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vswp d17, d20 \n" "vswp d19, d22 \n" "vswp q1, q8 \n" "vswp q3, q10 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "sub %0, %0, #64 \n" "vst1.f32 {d4-d7}, [%1 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"); #endif r0 += bottom_blob_tm.cstep * 4; } } for (; i + 3 < tiles; i += 4) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); #else asm volatile( "pld [%0, #512] \n" "vldm %0, {d0-d7} \n" "vstm %1!, {d0-d7} \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i + 1 < tiles; i += 2) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.4s, v1.4s}, [%0] \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1"); #else asm volatile( "pld [%0, #256] \n" "vld1.f32 {d0-d3}, [%0 :128] \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i < tiles; i++) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 4u * elempack, elempack, opt.workspace_allocator); int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ int nn_outch = 0; nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p + 1); const Mat kernel01_tm = kernel_tm.channel(pp); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w0011_01 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "fmla v20.4s, v5.4s, v0.s[0] \n" "fmla v21.4s, v5.4s, v0.s[1] \n" "fmla v22.4s, v5.4s, v0.s[2] \n" "fmla v23.4s, v5.4s, v0.s[3] \n" "fmla v24.4s, v5.4s, v1.s[0] \n" "fmla v25.4s, v5.4s, v1.s[1] \n" "fmla v26.4s, v5.4s, v1.s[2] \n" "fmla v27.4s, v5.4s, v1.s[3] \n" "fmla v28.4s, v5.4s, v2.s[0] \n" "fmla v29.4s, v5.4s, v2.s[1] \n" "fmla v30.4s, v5.4s, v2.s[2] \n" "fmla v31.4s, v5.4s, v2.s[3] \n" "fmla v8.4s, v6.4s, v3.s[0] \n" "fmla v9.4s, v6.4s, v3.s[1] \n" "fmla v10.4s, v6.4s, v3.s[2] \n" "fmla v11.4s, v6.4s, v3.s[3] \n" "fmla v20.4s, v7.4s, v3.s[0] \n" "fmla v21.4s, v7.4s, v3.s[1] \n" "fmla v22.4s, v7.4s, v3.s[2] \n" "fmla v23.4s, v7.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v12.4s, v6.4s, v0.s[0] \n" "fmla v13.4s, v6.4s, v0.s[1] \n" "fmla v14.4s, v6.4s, v0.s[2] \n" "fmla v15.4s, v6.4s, v0.s[3] \n" "fmla v16.4s, v6.4s, v1.s[0] \n" "fmla v17.4s, v6.4s, v1.s[1] \n" "fmla v18.4s, v6.4s, v1.s[2] \n" "fmla v19.4s, v6.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v0.s[0] \n" "fmla v25.4s, v7.4s, v0.s[1] \n" "fmla v26.4s, v7.4s, v0.s[2] \n" "fmla v27.4s, v7.4s, v0.s[3] \n" "fmla v28.4s, v7.4s, v1.s[0] \n" "fmla v29.4s, v7.4s, v1.s[1] \n" "fmla v30.4s, v7.4s, v1.s[2] \n" "fmla v31.4s, v7.4s, v1.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w2233_01 "fmla v8.4s, v4.4s, v2.s[0] \n" "fmla v9.4s, v4.4s, v2.s[1] \n" "fmla v10.4s, v4.4s, v2.s[2] \n" "fmla v11.4s, v4.4s, v2.s[3] \n" "fmla v12.4s, v4.4s, v3.s[0] \n" "fmla v13.4s, v4.4s, v3.s[1] \n" "fmla v14.4s, v4.4s, v3.s[2] \n" "fmla v15.4s, v4.4s, v3.s[3] \n" "fmla v20.4s, v5.4s, v2.s[0] \n" "fmla v21.4s, v5.4s, v2.s[1] \n" "fmla v22.4s, v5.4s, v2.s[2] \n" "fmla v23.4s, v5.4s, v2.s[3] \n" "fmla v24.4s, v5.4s, v3.s[0] \n" "fmla v25.4s, v5.4s, v3.s[1] \n" "fmla v26.4s, v5.4s, v3.s[2] \n" "fmla v27.4s, v5.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v16.4s, v4.4s, v0.s[0] \n" "fmla v17.4s, v4.4s, v0.s[1] \n" "fmla v18.4s, v4.4s, v0.s[2] \n" "fmla v19.4s, v4.4s, v0.s[3] \n" "fmla v28.4s, v5.4s, v0.s[0] \n" "fmla v29.4s, v5.4s, v0.s[1] \n" "fmla v30.4s, v5.4s, v0.s[2] \n" "fmla v31.4s, v5.4s, v0.s[3] \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v9.4s, v6.4s, v1.s[1] \n" "fmla v10.4s, v6.4s, v1.s[2] \n" "fmla v11.4s, v6.4s, v1.s[3] \n" "fmla v12.4s, v6.4s, v2.s[0] \n" "fmla v13.4s, v6.4s, v2.s[1] \n" "fmla v14.4s, v6.4s, v2.s[2] \n" "fmla v15.4s, v6.4s, v2.s[3] \n" "fmla v16.4s, v6.4s, v3.s[0] \n" "fmla v17.4s, v6.4s, v3.s[1] \n" "fmla v18.4s, v6.4s, v3.s[2] \n" "fmla v19.4s, v6.4s, v3.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v20.4s, v7.4s, v1.s[0] \n" "fmla v21.4s, v7.4s, v1.s[1] \n" "fmla v22.4s, v7.4s, v1.s[2] \n" "fmla v23.4s, v7.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v2.s[0] \n" "fmla v25.4s, v7.4s, v2.s[1] \n" "fmla v26.4s, v7.4s, v2.s[2] \n" "fmla v27.4s, v7.4s, v2.s[3] \n" "fmla v28.4s, v7.4s, v3.s[0] \n" "fmla v29.4s, v7.4s, v3.s[1] \n" "fmla v30.4s, v7.4s, v3.s[2] \n" "fmla v31.4s, v7.4s, v3.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r4 r5 r6 r7 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v24.4s, v9.4s, v0.s[0] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v26.4s, v9.4s, v2.s[0] \n" "fmla v27.4s, v9.4s, v3.s[0] \n" "fmla v28.4s, v9.4s, v4.s[0] \n" "fmla v29.4s, v9.4s, v5.s[0] \n" "fmla v30.4s, v9.4s, v6.s[0] \n" "fmla v31.4s, v9.4s, v7.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v10.4s, v4.s[1] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v6.s[1] \n" "fmla v23.4s, v10.4s, v7.s[1] \n" "fmla v24.4s, v11.4s, v0.s[1] \n" "fmla v25.4s, v11.4s, v1.s[1] \n" "fmla v26.4s, v11.4s, v2.s[1] \n" "fmla v27.4s, v11.4s, v3.s[1] \n" "fmla v28.4s, v11.4s, v4.s[1] \n" "fmla v29.4s, v11.4s, v5.s[1] \n" "fmla v30.4s, v11.4s, v6.s[1] \n" "fmla v31.4s, v11.4s, v7.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v12.4s, v4.s[2] \n" "fmla v21.4s, v12.4s, v5.s[2] \n" "fmla v22.4s, v12.4s, v6.s[2] \n" "fmla v23.4s, v12.4s, v7.s[2] \n" "fmla v24.4s, v13.4s, v0.s[2] \n" "fmla v25.4s, v13.4s, v1.s[2] \n" "fmla v26.4s, v13.4s, v2.s[2] \n" "fmla v27.4s, v13.4s, v3.s[2] \n" "fmla v28.4s, v13.4s, v4.s[2] \n" "fmla v29.4s, v13.4s, v5.s[2] \n" "fmla v30.4s, v13.4s, v6.s[2] \n" "fmla v31.4s, v13.4s, v7.s[2] \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v14.4s, v4.s[3] \n" "fmla v21.4s, v14.4s, v5.s[3] \n" "fmla v22.4s, v14.4s, v6.s[3] \n" "fmla v23.4s, v14.4s, v7.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.4s, v15.4s, v0.s[3] \n" "fmla v25.4s, v15.4s, v1.s[3] \n" "fmla v26.4s, v15.4s, v2.s[3] \n" "fmla v27.4s, v15.4s, v3.s[3] \n" "fmla v28.4s, v15.4s, v4.s[3] \n" "fmla v29.4s, v15.4s, v5.s[3] \n" "fmla v30.4s, v15.4s, v6.s[3] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v20.4s, v9.4s, v0.s[0] \n" "fmla v21.4s, v9.4s, v1.s[0] \n" "fmla v22.4s, v9.4s, v2.s[0] \n" "fmla v23.4s, v9.4s, v3.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v11.4s, v0.s[1] \n" "fmla v21.4s, v11.4s, v1.s[1] \n" "fmla v22.4s, v11.4s, v2.s[1] \n" "fmla v23.4s, v11.4s, v3.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v13.4s, v0.s[2] \n" "fmla v21.4s, v13.4s, v1.s[2] \n" "fmla v22.4s, v13.4s, v2.s[2] \n" "fmla v23.4s, v13.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v15.4s, v0.s[3] \n" "fmla v21.4s, v15.4s, v1.s[3] \n" "fmla v22.4s, v15.4s, v2.s[3] \n" "fmla v23.4s, v15.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4s, v1.4s}, [%3], #32 \n" // r0 r1 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v1.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v11.4s, v0.s[1] \n" "fmla v19.4s, v11.4s, v1.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v13.4s, v0.s[2] \n" "fmla v19.4s, v13.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v15.4s, v0.s[3] \n" "fmla v19.4s, v15.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" "st1 {v18.4s, v19.4s}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "0: \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4s}, [%3], #16 \n" // r0 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v11.4s, v0.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v13.4s, v0.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v15.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v17.4s}, [%2], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17"); } } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); #if __aarch64__ const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2); #else const Mat kernel0_tm = kernel_tm.channel(p); #endif for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // w0123_0 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "fmla v8.4s, v5.4s, v3.s[0] \n" "fmla v9.4s, v5.4s, v3.s[1] \n" "fmla v10.4s, v5.4s, v3.s[2] \n" "fmla v11.4s, v5.4s, v3.s[3] \n" "fmla v12.4s, v5.4s, v20.s[0] \n" "fmla v13.4s, v5.4s, v20.s[1] \n" "fmla v14.4s, v5.4s, v20.s[2] \n" "fmla v15.4s, v5.4s, v20.s[3] \n" "fmla v16.4s, v5.4s, v21.s[0] \n" "fmla v17.4s, v5.4s, v21.s[1] \n" "fmla v18.4s, v5.4s, v21.s[2] \n" "fmla v19.4s, v5.4s, v21.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "fmla v8.4s, v6.4s, v22.s[0] \n" "fmla v9.4s, v6.4s, v22.s[1] \n" "fmla v10.4s, v6.4s, v22.s[2] \n" "fmla v11.4s, v6.4s, v22.s[3] \n" "fmla v12.4s, v6.4s, v23.s[0] \n" "fmla v13.4s, v6.4s, v23.s[1] \n" "fmla v14.4s, v6.4s, v23.s[2] \n" "fmla v15.4s, v6.4s, v23.s[3] \n" "fmla v16.4s, v6.4s, v24.s[0] \n" "fmla v17.4s, v6.4s, v24.s[1] \n" "fmla v18.4s, v6.4s, v24.s[2] \n" "fmla v19.4s, v6.4s, v24.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v7.4s, v25.s[0] \n" "fmla v9.4s, v7.4s, v25.s[1] \n" "fmla v10.4s, v7.4s, v25.s[2] \n" "fmla v11.4s, v7.4s, v25.s[3] \n" "fmla v12.4s, v7.4s, v26.s[0] \n" "fmla v13.4s, v7.4s, v26.s[1] \n" "fmla v14.4s, v7.4s, v26.s[2] \n" "fmla v15.4s, v7.4s, v26.s[3] \n" "fmla v16.4s, v7.4s, v27.s[0] \n" "fmla v17.4s, v7.4s, v27.s[1] \n" "fmla v18.4s, v7.4s, v27.s[2] \n" "fmla v19.4s, v7.4s, v27.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8); #else const float* r0 = bb2.row(i / 8); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r4 r5 r6 r7 "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v20.4s, v9.4s, v4.s[1] \n" "fmla v21.4s, v9.4s, v5.s[1] \n" "fmla v22.4s, v9.4s, v6.s[1] \n" "fmla v23.4s, v9.4s, v7.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "fmla v20.4s, v10.4s, v4.s[2] \n" "fmla v21.4s, v10.4s, v5.s[2] \n" "fmla v22.4s, v10.4s, v6.s[2] \n" "fmla v23.4s, v10.4s, v7.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "fmla v20.4s, v11.4s, v4.s[3] \n" "fmla v21.4s, v11.4s, v5.s[3] \n" "fmla v22.4s, v11.4s, v6.s[3] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "veor q12, q12 \n" "veor q13, q13 \n" "veor q14, q14 \n" "veor q15, q15 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q12, q4, d2[0] \n" "vmla.f32 q13, q4, d2[1] \n" "vmla.f32 q14, q4, d3[0] \n" "vmla.f32 q15, q4, d3[1] \n" "vmla.f32 q8, q5, d4[0] \n" "vmla.f32 q9, q5, d4[1] \n" "vmla.f32 q10, q5, d5[0] \n" "vmla.f32 q11, q5, d5[1] \n" "vmla.f32 q12, q5, d6[0] \n" "vmla.f32 q13, q5, d6[1] \n" "vmla.f32 q14, q5, d7[0] \n" "vmla.f32 q15, q5, d7[1] \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "vmla.f32 q8, q6, d0[0] \n" "vmla.f32 q9, q6, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q6, d1[1] \n" "vmla.f32 q12, q6, d2[0] \n" "vmla.f32 q13, q6, d2[1] \n" "vmla.f32 q14, q6, d3[0] \n" "vmla.f32 q15, q6, d3[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d4[0] \n" "vmla.f32 q9, q7, d4[1] \n" "vmla.f32 q10, q7, d5[0] \n" "vmla.f32 q11, q7, d5[1] \n" "vmla.f32 q12, q7, d6[0] \n" "vmla.f32 q13, q7, d6[1] \n" "vmla.f32 q14, q7, d7[0] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" "vstm %1!, {d24-d31} \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; i + 3 < tiles; i += 4) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q10, q4, d4[0] \n" "vmla.f32 q11, q4, d6[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d4[1] \n" "vmla.f32 q11, q5, d6[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d7[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "vmla.f32 q10, q7, d5[1] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif } for (; i + 1 < tiles; i += 2) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4s, v1.4s}, [%2], #32 \n" // r0 r1 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "0: \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "bne 0b \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9"); #endif } for (; i < tiles; i++) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" // r0 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16"); #else asm volatile( "veor q8, q8 \n" "0: \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "vst1.f32 {d16-d17}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8"); #endif } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); float tmp[4][6][4]; // tile for (int i = 0; i < outh / 4; i++) { for (int j = 0; j < outw / 4; j++) { // top_blob_tm.create(tiles, 36, outch, elemsize, elempack); const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 6 + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 8; const float* output0_tm_3 = output0_tm_0 + tiles * 12; const float* output0_tm_4 = output0_tm_0 + tiles * 16; const float* output0_tm_5 = output0_tm_0 + tiles * 20; unsigned short* output0 = out0.row<unsigned short>(i * 4) + (j * 4) * 4; // TODO neon optimize for (int m = 0; m < 6; m++) { float32x4_t _out0tm0 = vld1q_f32(output0_tm_0); float32x4_t _out0tm1 = vld1q_f32(output0_tm_1); float32x4_t _out0tm2 = vld1q_f32(output0_tm_2); float32x4_t _out0tm3 = vld1q_f32(output0_tm_3); float32x4_t _out0tm4 = vld1q_f32(output0_tm_4); float32x4_t _out0tm5 = vld1q_f32(output0_tm_5); float32x4_t _tmp02a = vaddq_f32(_out0tm1, _out0tm2); float32x4_t _tmp13a = vsubq_f32(_out0tm1, _out0tm2); float32x4_t _tmp02b = vaddq_f32(_out0tm3, _out0tm4); float32x4_t _tmp13b = vsubq_f32(_out0tm3, _out0tm4); float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp02a), _tmp02b); float32x4_t _tmp1m = vmlaq_n_f32(_tmp13a, _tmp13b, 2.f); float32x4_t _tmp2m = vmlaq_n_f32(_tmp02a, _tmp02b, 4.f); float32x4_t _tmp3m = vmlaq_n_f32(vaddq_f32(_out0tm5, _tmp13a), _tmp13b, 8.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 24; output0_tm_1 += tiles * 24; output0_tm_2 += tiles * 24; output0_tm_3 += tiles * 24; output0_tm_4 += tiles * 24; output0_tm_5 += tiles * 24; } for (int m = 0; m < 4; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp02a = vaddq_f32(_tmp01, _tmp02); float32x4_t _tmp13a = vsubq_f32(_tmp01, _tmp02); float32x4_t _tmp02b = vaddq_f32(_tmp03, _tmp04); float32x4_t _tmp13b = vsubq_f32(_tmp03, _tmp04); float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp02a), _tmp02b)); float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp13a, _tmp13b, 2.f)); float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp02a, _tmp02b, 4.f)); float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vaddq_f32(_tmp05, _tmp13a), _tmp13b, 8.f)); vst1_u16(output0, vcvt_bf16_f32(_out00)); vst1_u16(output0 + 4, vcvt_bf16_f32(_out01)); vst1_u16(output0 + 8, vcvt_bf16_f32(_out02)); vst1_u16(output0 + 12, vcvt_bf16_f32(_out03)); output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s2_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4, 4, opt.workspace_allocator); const int tailstep = (w - 2 * outw + w) * 4; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob_fp32.channel(get_omp_thread_num()); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); out0.fill(_bias0); int q = 0; for (; q < inch - 1; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); const unsigned short* kptr = (const unsigned short*)kernel.channel(p).row<const unsigned short>(q); #if __aarch64__ // 16 * 9 uint16x8_t _k00_01 = vld1q_u16(kptr); uint16x8_t _k00_23 = vld1q_u16(kptr + 8); uint16x8_t _k01_01 = vld1q_u16(kptr + 16); uint16x8_t _k01_23 = vld1q_u16(kptr + 24); uint16x8_t _k02_01 = vld1q_u16(kptr + 32); uint16x8_t _k02_23 = vld1q_u16(kptr + 40); uint16x8_t _k10_01 = vld1q_u16(kptr + 48); uint16x8_t _k10_23 = vld1q_u16(kptr + 56); uint16x8_t _k11_01 = vld1q_u16(kptr + 64); uint16x8_t _k11_23 = vld1q_u16(kptr + 72); uint16x8_t _k12_01 = vld1q_u16(kptr + 80); uint16x8_t _k12_23 = vld1q_u16(kptr + 88); uint16x8_t _k20_01 = vld1q_u16(kptr + 96); uint16x8_t _k20_23 = vld1q_u16(kptr + 104); uint16x8_t _k21_01 = vld1q_u16(kptr + 112); uint16x8_t _k21_23 = vld1q_u16(kptr + 120); uint16x8_t _k22_01 = vld1q_u16(kptr + 128); uint16x8_t _k22_23 = vld1q_u16(kptr + 136); #endif // __aarch64__ int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%0] \n" // sum0 sum1 sum2 sum3 "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%1], #64 \n" // r00 r01 r02 r03 "shll v0.4s, v4.4h, #16 \n" "shll2 v1.4s, v4.8h, #16 \n" "shll v2.4s, v5.4h, #16 \n" "shll2 v3.4s, v5.8h, #16 \n" "shll v4.4s, v6.4h, #16 \n" "shll2 v5.4s, v6.8h, #16 \n" "shll v6.4s, v7.4h, #16 \n" "shll2 v7.4s, v7.8h, #16 \n" "shll v8.4s, %8.4h, #16 \n" "shll2 v9.4s, %8.8h, #16 \n" "fmla v10.4s, v8.4s, v0.s[0] \n" "fmla v11.4s, v8.4s, v2.s[0] \n" "fmla v12.4s, v8.4s, v4.s[0] \n" "fmla v13.4s, v8.4s, v6.s[0] \n" "fmla v10.4s, v9.4s, v0.s[1] \n" "fmla v11.4s, v9.4s, v2.s[1] \n" "fmla v12.4s, v9.4s, v4.s[1] \n" "fmla v13.4s, v9.4s, v6.s[1] \n" "shll v8.4s, %9.4h, #16 \n" "shll2 v9.4s, %9.8h, #16 \n" "fmla v10.4s, v8.4s, v0.s[2] \n" "fmla v11.4s, v8.4s, v2.s[2] \n" "fmla v12.4s, v8.4s, v4.s[2] \n" "fmla v13.4s, v8.4s, v6.s[2] \n" "fmla v10.4s, v9.4s, v0.s[3] \n" "fmla v11.4s, v9.4s, v2.s[3] \n" "fmla v12.4s, v9.4s, v4.s[3] \n" "fmla v13.4s, v9.4s, v6.s[3] \n" "shll v8.4s, %10.4h, #16 \n" "shll2 v9.4s, %10.8h, #16 \n" "fmla v10.4s, v8.4s, v1.s[0] \n" "fmla v11.4s, v8.4s, v3.s[0] \n" "fmla v12.4s, v8.4s, v5.s[0] \n" "fmla v13.4s, v8.4s, v7.s[0] \n" "fmla v10.4s, v9.4s, v1.s[1] \n" "fmla v11.4s, v9.4s, v3.s[1] \n" "fmla v12.4s, v9.4s, v5.s[1] \n" "fmla v13.4s, v9.4s, v7.s[1] \n" "shll v8.4s, %11.4h, #16 \n" "shll2 v9.4s, %11.8h, #16 \n" "fmla v10.4s, v8.4s, v1.s[2] \n" "fmla v11.4s, v8.4s, v3.s[2] \n" "fmla v12.4s, v8.4s, v5.s[2] \n" "fmla v13.4s, v8.4s, v7.s[2] \n" "fmla v10.4s, v9.4s, v1.s[3] \n" "fmla v11.4s, v9.4s, v3.s[3] \n" "fmla v12.4s, v9.4s, v5.s[3] \n" "fmla v13.4s, v9.4s, v7.s[3] \n" "prfm pldl1keep, [%1, #64] \n" "ld1 {v0.4h}, [%1] \n" // r08 "shll v0.4s, v0.4h, #16 \n" "shll v8.4s, %12.4h, #16 \n" "shll2 v9.4s, %12.8h, #16 \n" "fmla v10.4s, v8.4s, v2.s[0] \n" "fmla v11.4s, v8.4s, v4.s[0] \n" "fmla v12.4s, v8.4s, v6.s[0] \n" "fmla v13.4s, v8.4s, v0.s[0] \n" "fmla v10.4s, v9.4s, v2.s[1] \n" "fmla v11.4s, v9.4s, v4.s[1] \n" "fmla v12.4s, v9.4s, v6.s[1] \n" "fmla v13.4s, v9.4s, v0.s[1] \n" "shll v8.4s, %13.4h, #16 \n" "shll2 v9.4s, %13.8h, #16 \n" "fmla v10.4s, v8.4s, v2.s[2] \n" "fmla v11.4s, v8.4s, v4.s[2] \n" "fmla v12.4s, v8.4s, v6.s[2] \n" "fmla v13.4s, v8.4s, v0.s[2] \n" "fmla v10.4s, v9.4s, v2.s[3] \n" "fmla v11.4s, v9.4s, v4.s[3] \n" "fmla v12.4s, v9.4s, v6.s[3] \n" "fmla v13.4s, v9.4s, v0.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r10 r11 r12 r13 "shll v0.4s, v4.4h, #16 \n" "shll2 v1.4s, v4.8h, #16 \n" "shll v2.4s, v5.4h, #16 \n" "shll2 v3.4s, v5.8h, #16 \n" "shll v4.4s, v6.4h, #16 \n" "shll2 v5.4s, v6.8h, #16 \n" "shll v6.4s, v7.4h, #16 \n" "shll2 v7.4s, v7.8h, #16 \n" "shll v8.4s, %14.4h, #16 \n" "shll2 v9.4s, %14.8h, #16 \n" "fmla v10.4s, v8.4s, v0.s[0] \n" "fmla v11.4s, v8.4s, v2.s[0] \n" "fmla v12.4s, v8.4s, v4.s[0] \n" "fmla v13.4s, v8.4s, v6.s[0] \n" "fmla v10.4s, v9.4s, v0.s[1] \n" "fmla v11.4s, v9.4s, v2.s[1] \n" "fmla v12.4s, v9.4s, v4.s[1] \n" "fmla v13.4s, v9.4s, v6.s[1] \n" "shll v8.4s, %15.4h, #16 \n" "shll2 v9.4s, %15.8h, #16 \n" "fmla v10.4s, v8.4s, v0.s[2] \n" "fmla v11.4s, v8.4s, v2.s[2] \n" "fmla v12.4s, v8.4s, v4.s[2] \n" "fmla v13.4s, v8.4s, v6.s[2] \n" "fmla v10.4s, v9.4s, v0.s[3] \n" "fmla v11.4s, v9.4s, v2.s[3] \n" "fmla v12.4s, v9.4s, v4.s[3] \n" "fmla v13.4s, v9.4s, v6.s[3] \n" "shll v8.4s, %16.4h, #16 \n" "shll2 v9.4s, %16.8h, #16 \n" "fmla v10.4s, v8.4s, v1.s[0] \n" "fmla v11.4s, v8.4s, v3.s[0] \n" "fmla v12.4s, v8.4s, v5.s[0] \n" "fmla v13.4s, v8.4s, v7.s[0] \n" "fmla v10.4s, v9.4s, v1.s[1] \n" "fmla v11.4s, v9.4s, v3.s[1] \n" "fmla v12.4s, v9.4s, v5.s[1] \n" "fmla v13.4s, v9.4s, v7.s[1] \n" "shll v8.4s, %17.4h, #16 \n" "shll2 v9.4s, %17.8h, #16 \n" "fmla v10.4s, v8.4s, v1.s[2] \n" "fmla v11.4s, v8.4s, v3.s[2] \n" "fmla v12.4s, v8.4s, v5.s[2] \n" "fmla v13.4s, v8.4s, v7.s[2] \n" "fmla v10.4s, v9.4s, v1.s[3] \n" "fmla v11.4s, v9.4s, v3.s[3] \n" "fmla v12.4s, v9.4s, v5.s[3] \n" "fmla v13.4s, v9.4s, v7.s[3] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v0.4h}, [%2] \n" // r18 "shll v0.4s, v0.4h, #16 \n" "shll v8.4s, %18.4h, #16 \n" "shll2 v9.4s, %18.8h, #16 \n" "fmla v10.4s, v8.4s, v2.s[0] \n" "fmla v11.4s, v8.4s, v4.s[0] \n" "fmla v12.4s, v8.4s, v6.s[0] \n" "fmla v13.4s, v8.4s, v0.s[0] \n" "fmla v10.4s, v9.4s, v2.s[1] \n" "fmla v11.4s, v9.4s, v4.s[1] \n" "fmla v12.4s, v9.4s, v6.s[1] \n" "fmla v13.4s, v9.4s, v0.s[1] \n" "shll v8.4s, %19.4h, #16 \n" "shll2 v9.4s, %19.8h, #16 \n" "fmla v10.4s, v8.4s, v2.s[2] \n" "fmla v11.4s, v8.4s, v4.s[2] \n" "fmla v12.4s, v8.4s, v6.s[2] \n" "fmla v13.4s, v8.4s, v0.s[2] \n" "fmla v10.4s, v9.4s, v2.s[3] \n" "fmla v11.4s, v9.4s, v4.s[3] \n" "fmla v12.4s, v9.4s, v6.s[3] \n" "fmla v13.4s, v9.4s, v0.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n" // r20 r21 r22 r23 "shll v0.4s, v4.4h, #16 \n" "shll2 v1.4s, v4.8h, #16 \n" "shll v2.4s, v5.4h, #16 \n" "shll2 v3.4s, v5.8h, #16 \n" "shll v4.4s, v6.4h, #16 \n" "shll2 v5.4s, v6.8h, #16 \n" "shll v6.4s, v7.4h, #16 \n" "shll2 v7.4s, v7.8h, #16 \n" "shll v8.4s, %20.4h, #16 \n" "shll2 v9.4s, %20.8h, #16 \n" "fmla v10.4s, v8.4s, v0.s[0] \n" "fmla v11.4s, v8.4s, v2.s[0] \n" "fmla v12.4s, v8.4s, v4.s[0] \n" "fmla v13.4s, v8.4s, v6.s[0] \n" "fmla v10.4s, v9.4s, v0.s[1] \n" "fmla v11.4s, v9.4s, v2.s[1] \n" "fmla v12.4s, v9.4s, v4.s[1] \n" "fmla v13.4s, v9.4s, v6.s[1] \n" "shll v8.4s, %21.4h, #16 \n" "shll2 v9.4s, %21.8h, #16 \n" "fmla v10.4s, v8.4s, v0.s[2] \n" "fmla v11.4s, v8.4s, v2.s[2] \n" "fmla v12.4s, v8.4s, v4.s[2] \n" "fmla v13.4s, v8.4s, v6.s[2] \n" "fmla v10.4s, v9.4s, v0.s[3] \n" "fmla v11.4s, v9.4s, v2.s[3] \n" "fmla v12.4s, v9.4s, v4.s[3] \n" "fmla v13.4s, v9.4s, v6.s[3] \n" "shll v8.4s, %22.4h, #16 \n" "shll2 v9.4s, %22.8h, #16 \n" "fmla v10.4s, v8.4s, v1.s[0] \n" "fmla v11.4s, v8.4s, v3.s[0] \n" "fmla v12.4s, v8.4s, v5.s[0] \n" "fmla v13.4s, v8.4s, v7.s[0] \n" "fmla v10.4s, v9.4s, v1.s[1] \n" "fmla v11.4s, v9.4s, v3.s[1] \n" "fmla v12.4s, v9.4s, v5.s[1] \n" "fmla v13.4s, v9.4s, v7.s[1] \n" "shll v8.4s, %23.4h, #16 \n" "shll2 v9.4s, %23.8h, #16 \n" "fmla v10.4s, v8.4s, v1.s[2] \n" "fmla v11.4s, v8.4s, v3.s[2] \n" "fmla v12.4s, v8.4s, v5.s[2] \n" "fmla v13.4s, v8.4s, v7.s[2] \n" "fmla v10.4s, v9.4s, v1.s[3] \n" "fmla v11.4s, v9.4s, v3.s[3] \n" "fmla v12.4s, v9.4s, v5.s[3] \n" "fmla v13.4s, v9.4s, v7.s[3] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3] \n" // r28 "shll v0.4s, v0.4h, #16 \n" "shll v8.4s, %24.4h, #16 \n" "shll2 v9.4s, %24.8h, #16 \n" "fmla v10.4s, v8.4s, v2.s[0] \n" "fmla v11.4s, v8.4s, v4.s[0] \n" "fmla v12.4s, v8.4s, v6.s[0] \n" "fmla v13.4s, v8.4s, v0.s[0] \n" "fmla v10.4s, v9.4s, v2.s[1] \n" "fmla v11.4s, v9.4s, v4.s[1] \n" "fmla v12.4s, v9.4s, v6.s[1] \n" "fmla v13.4s, v9.4s, v0.s[1] \n" "shll v8.4s, %25.4h, #16 \n" "shll2 v9.4s, %25.8h, #16 \n" "fmla v10.4s, v8.4s, v2.s[2] \n" "fmla v11.4s, v8.4s, v4.s[2] \n" "fmla v12.4s, v8.4s, v6.s[2] \n" "fmla v13.4s, v8.4s, v0.s[2] \n" "fmla v10.4s, v9.4s, v2.s[3] \n" "fmla v11.4s, v9.4s, v4.s[3] \n" "fmla v12.4s, v9.4s, v6.s[3] \n" "fmla v13.4s, v9.4s, v0.s[3] \n" "st1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00_01), // %8 "w"(_k00_23), // %9 "w"(_k01_01), // %10 "w"(_k01_23), // %11 "w"(_k02_01), // %12 "w"(_k02_23), // %13 "w"(_k10_01), // %14 "w"(_k10_23), // %15 "w"(_k11_01), // %16 "w"(_k11_23), // %17 "w"(_k12_01), // %18 "w"(_k12_23), // %19 "w"(_k20_01), // %20 "w"(_k20_23), // %21 "w"(_k21_01), // %22 "w"(_k21_23), // %23 "w"(_k22_01), // %24 "w"(_k22_23) // %25 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"); #else // __aarch64__ asm volatile( "pld [%0, #512] \n" "vldm %0, {d24-d31} \n" // sum0 sum1 sum2 sum3 "pld [%1, #512] \n" "vldm %1!, {d8-d15} \n" // r00 r01 r02 r03 r04 r05 r06 r07 "vshll.u16 q0, d8, #16 \n" "vshll.u16 q1, d9, #16 \n" "vshll.u16 q2, d10, #16 \n" "vshll.u16 q3, d11, #16 \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%1, #64] \n" "vld1.f32 {d1}, [%1 :64] \n" // r08 "vshll.u16 q0, d1, #16 \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d15[1] \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%2, #512] \n" "vldm %2!, {d8-d15} \n" // r10 r11 r12 r13 r14 r15 r16 r17 "vshll.u16 q0, d8, #16 \n" "vshll.u16 q1, d9, #16 \n" "vshll.u16 q2, d10, #16 \n" "vshll.u16 q3, d11, #16 \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%2, #64] \n" "vld1.f32 {d1}, [%2 :64] \n" // r18 "vshll.u16 q0, d1, #16 \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d15[1] \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%3, #256] \n" "vldm %3!, {d8-d15} \n" // r20 r21 r22 r23 r24 r25 r26 r27 "vshll.u16 q0, d8, #16 \n" "vshll.u16 q1, d9, #16 \n" "vshll.u16 q2, d10, #16 \n" "vshll.u16 q3, d11, #16 \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%3, #64] \n" "vld1.f32 {d1}, [%3 :64] \n" // r28 "vshll.u16 q0, d1, #16 \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d15[1] \n" // "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128] \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "sub %4, %4, #256 \n" // kptr -= 8 * 16; "vstm %0!, {d24-d31} \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v12.4s, v13.4s}, [%0] \n" // sum0 sum1 "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n" // r00 r01 r02 r03 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v6.4s, %8.4h, #16 \n" "shll2 v7.4s, %8.8h, #16 \n" "shll v8.4s, %9.4h, #16 \n" "shll2 v9.4s, %9.8h, #16 \n" "fmul v10.4s, v6.4s, v0.s[0] \n" "fmul v11.4s, v6.4s, v2.s[0] \n" "fmla v12.4s, v7.4s, v0.s[1] \n" "fmla v13.4s, v7.4s, v2.s[1] \n" "fmla v10.4s, v8.4s, v0.s[2] \n" "fmla v11.4s, v8.4s, v2.s[2] \n" "fmla v12.4s, v9.4s, v0.s[3] \n" "fmla v13.4s, v9.4s, v2.s[3] \n" "prfm pldl1keep, [%1, #64] \n" "ld1 {v4.4h}, [%1] \n" // r04 "shll v4.4s, v4.4h, #16 \n" "shll v6.4s, %10.4h, #16 \n" "shll2 v7.4s, %10.8h, #16 \n" "shll v8.4s, %11.4h, #16 \n" "shll2 v9.4s, %11.8h, #16 \n" "fmla v10.4s, v6.4s, v1.s[0] \n" "fmla v11.4s, v6.4s, v3.s[0] \n" "fmla v12.4s, v7.4s, v1.s[1] \n" "fmla v13.4s, v7.4s, v3.s[1] \n" "fmla v10.4s, v8.4s, v1.s[2] \n" "fmla v11.4s, v8.4s, v3.s[2] \n" "fmla v12.4s, v9.4s, v1.s[3] \n" "fmla v13.4s, v9.4s, v3.s[3] \n" "shll v6.4s, %12.4h, #16 \n" "shll2 v7.4s, %12.8h, #16 \n" "shll v8.4s, %13.4h, #16 \n" "shll2 v9.4s, %13.8h, #16 \n" "fmla v10.4s, v6.4s, v2.s[0] \n" "fmla v11.4s, v6.4s, v4.s[0] \n" "fmla v12.4s, v7.4s, v2.s[1] \n" "fmla v13.4s, v7.4s, v4.s[1] \n" "fmla v10.4s, v8.4s, v2.s[2] \n" "fmla v11.4s, v8.4s, v4.s[2] \n" "fmla v12.4s, v9.4s, v2.s[3] \n" "fmla v13.4s, v9.4s, v4.s[3] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r10 r11 r12 r13 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v6.4s, %14.4h, #16 \n" "shll2 v7.4s, %14.8h, #16 \n" "shll v8.4s, %15.4h, #16 \n" "shll2 v9.4s, %15.8h, #16 \n" "fmla v10.4s, v6.4s, v0.s[0] \n" "fmla v11.4s, v6.4s, v2.s[0] \n" "fmla v12.4s, v7.4s, v0.s[1] \n" "fmla v13.4s, v7.4s, v2.s[1] \n" "fmla v10.4s, v8.4s, v0.s[2] \n" "fmla v11.4s, v8.4s, v2.s[2] \n" "fmla v12.4s, v9.4s, v0.s[3] \n" "fmla v13.4s, v9.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v4.4h}, [%2] \n" // r14 "shll v4.4s, v4.4h, #16 \n" "shll v6.4s, %16.4h, #16 \n" "shll2 v7.4s, %16.8h, #16 \n" "shll v8.4s, %17.4h, #16 \n" "shll2 v9.4s, %17.8h, #16 \n" "fmla v10.4s, v6.4s, v1.s[0] \n" "fmla v11.4s, v6.4s, v3.s[0] \n" "fmla v12.4s, v7.4s, v1.s[1] \n" "fmla v13.4s, v7.4s, v3.s[1] \n" "fmla v10.4s, v8.4s, v1.s[2] \n" "fmla v11.4s, v8.4s, v3.s[2] \n" "fmla v12.4s, v9.4s, v1.s[3] \n" "fmla v13.4s, v9.4s, v3.s[3] \n" "shll v6.4s, %18.4h, #16 \n" "shll2 v7.4s, %18.8h, #16 \n" "shll v8.4s, %19.4h, #16 \n" "shll2 v9.4s, %19.8h, #16 \n" "fmla v10.4s, v6.4s, v2.s[0] \n" "fmla v11.4s, v6.4s, v4.s[0] \n" "fmla v12.4s, v7.4s, v2.s[1] \n" "fmla v13.4s, v7.4s, v4.s[1] \n" "fmla v10.4s, v8.4s, v2.s[2] \n" "fmla v11.4s, v8.4s, v4.s[2] \n" "fmla v12.4s, v9.4s, v2.s[3] \n" "fmla v13.4s, v9.4s, v4.s[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r20 r21 r22 r23 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v6.4s, %20.4h, #16 \n" "shll2 v7.4s, %20.8h, #16 \n" "shll v8.4s, %21.4h, #16 \n" "shll2 v9.4s, %21.8h, #16 \n" "fmla v10.4s, v6.4s, v0.s[0] \n" "fmla v11.4s, v6.4s, v2.s[0] \n" "fmla v12.4s, v7.4s, v0.s[1] \n" "fmla v13.4s, v7.4s, v2.s[1] \n" "fmla v10.4s, v8.4s, v0.s[2] \n" "fmla v11.4s, v8.4s, v2.s[2] \n" "fmla v12.4s, v9.4s, v0.s[3] \n" "fmla v13.4s, v9.4s, v2.s[3] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v4.4h}, [%3] \n" // r24 "shll v4.4s, v4.4h, #16 \n" "shll v6.4s, %22.4h, #16 \n" "shll2 v7.4s, %22.8h, #16 \n" "shll v8.4s, %23.4h, #16 \n" "shll2 v9.4s, %23.8h, #16 \n" "fmla v10.4s, v6.4s, v1.s[0] \n" "fmla v11.4s, v6.4s, v3.s[0] \n" "fmla v12.4s, v7.4s, v1.s[1] \n" "fmla v13.4s, v7.4s, v3.s[1] \n" "fmla v10.4s, v8.4s, v1.s[2] \n" "fmla v11.4s, v8.4s, v3.s[2] \n" "fmla v12.4s, v9.4s, v1.s[3] \n" "fmla v13.4s, v9.4s, v3.s[3] \n" "shll v6.4s, %24.4h, #16 \n" "shll2 v7.4s, %24.8h, #16 \n" "shll v8.4s, %25.4h, #16 \n" "shll2 v9.4s, %25.8h, #16 \n" "fmla v10.4s, v6.4s, v2.s[0] \n" "fmla v11.4s, v6.4s, v4.s[0] \n" "fmla v12.4s, v7.4s, v2.s[1] \n" "fmla v13.4s, v7.4s, v4.s[1] \n" "fmla v10.4s, v8.4s, v2.s[2] \n" "fmla v11.4s, v8.4s, v4.s[2] \n" "fmla v12.4s, v9.4s, v2.s[3] \n" "fmla v13.4s, v9.4s, v4.s[3] \n" "fadd v12.4s, v10.4s, v12.4s \n" "fadd v13.4s, v11.4s, v13.4s \n" "st1 {v12.4s, v13.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00_01), // %8 "w"(_k00_23), // %9 "w"(_k01_01), // %10 "w"(_k01_23), // %11 "w"(_k02_01), // %12 "w"(_k02_23), // %13 "w"(_k10_01), // %14 "w"(_k10_23), // %15 "w"(_k11_01), // %16 "w"(_k11_23), // %17 "w"(_k12_01), // %18 "w"(_k12_23), // %19 "w"(_k20_01), // %20 "w"(_k20_23), // %21 "w"(_k21_01), // %22 "w"(_k21_23), // %23 "w"(_k22_01), // %24 "w"(_k22_23) // %25 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"); #else // __aarch64__ asm volatile( "pld [%0, #256] \n" "vld1.f32 {d28-d31}, [%0 :128] \n" // sum0 sum1 "pld [%1, #256] \n" "vld1.u16 {d4-d7}, [%1 :64]! \n" // r00 r01 r02 r03 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmul.f32 q12, q8, d0[0] \n" "vmul.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q9, d4[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q11, d1[1] \n" "vmla.f32 q15, q11, d5[1] \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%1, #64] \n" "vld1.f32 {d9}, [%1 :64] \n" // r04 "vshll.u16 q4, d9, #16 \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q11, d3[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d8[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d9[1] \n" "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2 :64]! \n" // r10 r11 r12 r13 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q9, d4[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q11, d1[1] \n" "vmla.f32 q15, q11, d5[1] \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%2, #64] \n" "vld1.f32 {d9}, [%2 :64] \n" // r14 "vshll.u16 q4, d9, #16 \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q11, d3[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d8[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d9[1] \n" "pld [%3, #256] \n" "vld1.u16 {d4-d7}, [%3 :64]! \n" // r20 r21 r22 r23 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q9, d4[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q11, d1[1] \n" "vmla.f32 q15, q11, d5[1] \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%3, #64] \n" "vld1.f32 {d9}, [%3 :64] \n" // r24 "vshll.u16 q4, d9, #16 \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q11, d3[1] \n" "vmla.f32 q15, q11, d7[1] \n" // "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128] \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d8[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d9[1] \n" "vadd.f32 q14, q12, q14 \n" "vadd.f32 q15, q13, q15 \n" "sub %4, %4, #256 \n" // kptr -= 8 * 16; "vst1.f32 {d28-d31}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j < outw; j++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v13.4s}, [%0] \n" // sum0 "prfm pldl1keep, [%1, #192] \n" "ld1 {v0.4h, v1.4h, v2.4h}, [%1] \n" // r00 r01 r02 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v6.4s, %8.4h, #16 \n" "shll2 v7.4s, %8.8h, #16 \n" "fmul v10.4s, v6.4s, v0.s[0] \n" "fmul v11.4s, v7.4s, v0.s[1] \n" "shll v8.4s, %9.4h, #16 \n" "shll2 v9.4s, %9.8h, #16 \n" "fmul v12.4s, v8.4s, v0.s[2] \n" "fmla v13.4s, v9.4s, v0.s[3] \n" "shll v6.4s, %10.4h, #16 \n" "shll2 v7.4s, %10.8h, #16 \n" "fmla v10.4s, v6.4s, v1.s[0] \n" "fmla v11.4s, v7.4s, v1.s[1] \n" "shll v8.4s, %11.4h, #16 \n" "shll2 v9.4s, %11.8h, #16 \n" "fmla v12.4s, v8.4s, v1.s[2] \n" "fmla v13.4s, v9.4s, v1.s[3] \n" "shll v6.4s, %12.4h, #16 \n" "shll2 v7.4s, %12.8h, #16 \n" "fmla v10.4s, v6.4s, v2.s[0] \n" "fmla v11.4s, v7.4s, v2.s[1] \n" "shll v8.4s, %13.4h, #16 \n" "shll2 v9.4s, %13.8h, #16 \n" "fmla v12.4s, v8.4s, v2.s[2] \n" "fmla v13.4s, v9.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #192] \n" "ld1 {v3.4h, v4.4h, v5.4h}, [%2] \n" // r10 r11 r12 "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, %14.4h, #16 \n" "shll2 v7.4s, %14.8h, #16 \n" "fmla v10.4s, v6.4s, v3.s[0] \n" "fmla v11.4s, v7.4s, v3.s[1] \n" "shll v8.4s, %15.4h, #16 \n" "shll2 v9.4s, %15.8h, #16 \n" "fmla v12.4s, v8.4s, v3.s[2] \n" "fmla v13.4s, v9.4s, v3.s[3] \n" "shll v6.4s, %16.4h, #16 \n" "shll2 v7.4s, %16.8h, #16 \n" "fmla v10.4s, v6.4s, v4.s[0] \n" "fmla v11.4s, v7.4s, v4.s[1] \n" "shll v8.4s, %17.4h, #16 \n" "shll2 v9.4s, %17.8h, #16 \n" "fmla v12.4s, v8.4s, v4.s[2] \n" "fmla v13.4s, v9.4s, v4.s[3] \n" "shll v6.4s, %18.4h, #16 \n" "shll2 v7.4s, %18.8h, #16 \n" "fmla v10.4s, v6.4s, v5.s[0] \n" "fmla v11.4s, v7.4s, v5.s[1] \n" "shll v8.4s, %19.4h, #16 \n" "shll2 v9.4s, %19.8h, #16 \n" "fmla v12.4s, v8.4s, v5.s[2] \n" "fmla v13.4s, v9.4s, v5.s[3] \n" "prfm pldl1keep, [%3, #192] \n" "ld1 {v0.4h, v1.4h, v2.4h}, [%3] \n" // r20 r21 r22 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v6.4s, %20.4h, #16 \n" "shll2 v7.4s, %20.8h, #16 \n" "fmla v10.4s, v6.4s, v0.s[0] \n" "fmla v11.4s, v7.4s, v0.s[1] \n" "shll v8.4s, %21.4h, #16 \n" "shll2 v9.4s, %21.8h, #16 \n" "fmla v12.4s, v8.4s, v0.s[2] \n" "fmla v13.4s, v9.4s, v0.s[3] \n" "shll v6.4s, %22.4h, #16 \n" "shll2 v7.4s, %22.8h, #16 \n" "fmla v10.4s, v6.4s, v1.s[0] \n" "fmla v11.4s, v7.4s, v1.s[1] \n" "shll v8.4s, %23.4h, #16 \n" "shll2 v9.4s, %23.8h, #16 \n" "fmla v12.4s, v8.4s, v1.s[2] \n" "fmla v13.4s, v9.4s, v1.s[3] \n" "shll v6.4s, %24.4h, #16 \n" "shll2 v7.4s, %24.8h, #16 \n" "fmla v10.4s, v6.4s, v2.s[0] \n" "fmla v11.4s, v7.4s, v2.s[1] \n" "shll v8.4s, %25.4h, #16 \n" "shll2 v9.4s, %25.8h, #16 \n" "fmla v12.4s, v8.4s, v2.s[2] \n" "fmla v13.4s, v9.4s, v2.s[3] \n" "fadd v11.4s, v10.4s, v11.4s \n" "add %1, %1, #16 \n" "fadd v13.4s, v12.4s, v13.4s \n" "add %2, %2, #16 \n" "fadd v13.4s, v11.4s, v13.4s \n" "add %3, %3, #16 \n" "st1 {v13.4s}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00_01), // %8 "w"(_k00_23), // %9 "w"(_k01_01), // %10 "w"(_k01_23), // %11 "w"(_k02_01), // %12 "w"(_k02_23), // %13 "w"(_k10_01), // %14 "w"(_k10_23), // %15 "w"(_k11_01), // %16 "w"(_k11_23), // %17 "w"(_k12_01), // %18 "w"(_k12_23), // %19 "w"(_k20_01), // %20 "w"(_k20_23), // %21 "w"(_k21_01), // %22 "w"(_k21_23), // %23 "w"(_k22_01), // %24 "w"(_k22_23) // %25 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"); #else // __aarch64__ asm volatile( "pld [%0, #128] \n" "vld1.f32 {d30-d31}, [%0 :128] \n" // sum0 "pld [%1, #192] \n" "vld1.u16 {d2-d4}, [%1 :64] \n" // r00 r01 r02 "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "vshll.u16 q2, d4, #16 \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vmul.f32 q12, q8, d0[0] \n" "vmul.f32 q13, q9, d0[1] \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmul.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q9, d2[1] \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q11, d3[1] \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q9, d4[1] \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q11, d5[1] \n" "pld [%2, #192] \n" "vld1.u16 {d2-d4}, [%2 :64] \n" // r10 r11 r12 "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "vshll.u16 q2, d4, #16 \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q9, d0[1] \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q9, d2[1] \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q11, d3[1] \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q9, d4[1] \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q11, d5[1] \n" "pld [%3, #192] \n" "vld1.u16 {d2-d4}, [%3 :64] \n" // r20 r21 r22 "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "vshll.u16 q2, d4, #16 \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q9, d0[1] \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q9, d2[1] \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q11, d3[1] \n" // "pld [%4, #256] \n" "vld1.u16 {d20-d23}, [%4 :128] \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q9, d4[1] \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q11, d5[1] \n" "add %1, %1, #16 \n" "vadd.f32 q13, q12, q13 \n" "add %2, %2, #16 \n" "vadd.f32 q15, q14, q15 \n" "add %3, %3, #16 \n" "vadd.f32 q15, q13, q15 \n" "sub %4, %4, #256 \n" // kptr -= 8 * 16 * 2; "vst1.f32 {d30-d31}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } for (; q < inch; q++) { unsigned short* outptr0_bf16 = top_blob.channel(p); const float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); const unsigned short* kptr = (const unsigned short*)kernel.channel(p).row<const unsigned short>(q); #if __aarch64__ // 16 * 9 uint16x8_t _k00_01 = vld1q_u16(kptr); uint16x8_t _k00_23 = vld1q_u16(kptr + 8); uint16x8_t _k01_01 = vld1q_u16(kptr + 16); uint16x8_t _k01_23 = vld1q_u16(kptr + 24); uint16x8_t _k02_01 = vld1q_u16(kptr + 32); uint16x8_t _k02_23 = vld1q_u16(kptr + 40); uint16x8_t _k10_01 = vld1q_u16(kptr + 48); uint16x8_t _k10_23 = vld1q_u16(kptr + 56); uint16x8_t _k11_01 = vld1q_u16(kptr + 64); uint16x8_t _k11_23 = vld1q_u16(kptr + 72); uint16x8_t _k12_01 = vld1q_u16(kptr + 80); uint16x8_t _k12_23 = vld1q_u16(kptr + 88); uint16x8_t _k20_01 = vld1q_u16(kptr + 96); uint16x8_t _k20_23 = vld1q_u16(kptr + 104); uint16x8_t _k21_01 = vld1q_u16(kptr + 112); uint16x8_t _k21_23 = vld1q_u16(kptr + 120); uint16x8_t _k22_01 = vld1q_u16(kptr + 128); uint16x8_t _k22_23 = vld1q_u16(kptr + 136); #endif // __aarch64__ int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%1], #64 \n" // sum0 sum1 sum2 sum3 "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r00 r01 r02 r03 "shll v0.4s, v4.4h, #16 \n" "shll2 v1.4s, v4.8h, #16 \n" "shll v2.4s, v5.4h, #16 \n" "shll2 v3.4s, v5.8h, #16 \n" "shll v4.4s, v6.4h, #16 \n" "shll2 v5.4s, v6.8h, #16 \n" "shll v6.4s, v7.4h, #16 \n" "shll2 v7.4s, v7.8h, #16 \n" "shll v8.4s, %10.4h, #16 \n" "shll2 v9.4s, %10.8h, #16 \n" "fmla v10.4s, v8.4s, v0.s[0] \n" "fmla v11.4s, v8.4s, v2.s[0] \n" "fmla v12.4s, v8.4s, v4.s[0] \n" "fmla v13.4s, v8.4s, v6.s[0] \n" "fmla v10.4s, v9.4s, v0.s[1] \n" "fmla v11.4s, v9.4s, v2.s[1] \n" "fmla v12.4s, v9.4s, v4.s[1] \n" "fmla v13.4s, v9.4s, v6.s[1] \n" "shll v8.4s, %11.4h, #16 \n" "shll2 v9.4s, %11.8h, #16 \n" "fmla v10.4s, v8.4s, v0.s[2] \n" "fmla v11.4s, v8.4s, v2.s[2] \n" "fmla v12.4s, v8.4s, v4.s[2] \n" "fmla v13.4s, v8.4s, v6.s[2] \n" "fmla v10.4s, v9.4s, v0.s[3] \n" "fmla v11.4s, v9.4s, v2.s[3] \n" "fmla v12.4s, v9.4s, v4.s[3] \n" "fmla v13.4s, v9.4s, v6.s[3] \n" "shll v8.4s, %12.4h, #16 \n" "shll2 v9.4s, %12.8h, #16 \n" "fmla v10.4s, v8.4s, v1.s[0] \n" "fmla v11.4s, v8.4s, v3.s[0] \n" "fmla v12.4s, v8.4s, v5.s[0] \n" "fmla v13.4s, v8.4s, v7.s[0] \n" "fmla v10.4s, v9.4s, v1.s[1] \n" "fmla v11.4s, v9.4s, v3.s[1] \n" "fmla v12.4s, v9.4s, v5.s[1] \n" "fmla v13.4s, v9.4s, v7.s[1] \n" "shll v8.4s, %13.4h, #16 \n" "shll2 v9.4s, %13.8h, #16 \n" "fmla v10.4s, v8.4s, v1.s[2] \n" "fmla v11.4s, v8.4s, v3.s[2] \n" "fmla v12.4s, v8.4s, v5.s[2] \n" "fmla v13.4s, v8.4s, v7.s[2] \n" "fmla v10.4s, v9.4s, v1.s[3] \n" "fmla v11.4s, v9.4s, v3.s[3] \n" "fmla v12.4s, v9.4s, v5.s[3] \n" "fmla v13.4s, v9.4s, v7.s[3] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v0.4h}, [%2] \n" // r08 "shll v0.4s, v0.4h, #16 \n" "shll v8.4s, %14.4h, #16 \n" "shll2 v9.4s, %14.8h, #16 \n" "fmla v10.4s, v8.4s, v2.s[0] \n" "fmla v11.4s, v8.4s, v4.s[0] \n" "fmla v12.4s, v8.4s, v6.s[0] \n" "fmla v13.4s, v8.4s, v0.s[0] \n" "fmla v10.4s, v9.4s, v2.s[1] \n" "fmla v11.4s, v9.4s, v4.s[1] \n" "fmla v12.4s, v9.4s, v6.s[1] \n" "fmla v13.4s, v9.4s, v0.s[1] \n" "shll v8.4s, %15.4h, #16 \n" "shll2 v9.4s, %15.8h, #16 \n" "fmla v10.4s, v8.4s, v2.s[2] \n" "fmla v11.4s, v8.4s, v4.s[2] \n" "fmla v12.4s, v8.4s, v6.s[2] \n" "fmla v13.4s, v8.4s, v0.s[2] \n" "fmla v10.4s, v9.4s, v2.s[3] \n" "fmla v11.4s, v9.4s, v4.s[3] \n" "fmla v12.4s, v9.4s, v6.s[3] \n" "fmla v13.4s, v9.4s, v0.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n" // r10 r11 r12 r13 "shll v0.4s, v4.4h, #16 \n" "shll2 v1.4s, v4.8h, #16 \n" "shll v2.4s, v5.4h, #16 \n" "shll2 v3.4s, v5.8h, #16 \n" "shll v4.4s, v6.4h, #16 \n" "shll2 v5.4s, v6.8h, #16 \n" "shll v6.4s, v7.4h, #16 \n" "shll2 v7.4s, v7.8h, #16 \n" "shll v8.4s, %16.4h, #16 \n" "shll2 v9.4s, %16.8h, #16 \n" "fmla v10.4s, v8.4s, v0.s[0] \n" "fmla v11.4s, v8.4s, v2.s[0] \n" "fmla v12.4s, v8.4s, v4.s[0] \n" "fmla v13.4s, v8.4s, v6.s[0] \n" "fmla v10.4s, v9.4s, v0.s[1] \n" "fmla v11.4s, v9.4s, v2.s[1] \n" "fmla v12.4s, v9.4s, v4.s[1] \n" "fmla v13.4s, v9.4s, v6.s[1] \n" "shll v8.4s, %17.4h, #16 \n" "shll2 v9.4s, %17.8h, #16 \n" "fmla v10.4s, v8.4s, v0.s[2] \n" "fmla v11.4s, v8.4s, v2.s[2] \n" "fmla v12.4s, v8.4s, v4.s[2] \n" "fmla v13.4s, v8.4s, v6.s[2] \n" "fmla v10.4s, v9.4s, v0.s[3] \n" "fmla v11.4s, v9.4s, v2.s[3] \n" "fmla v12.4s, v9.4s, v4.s[3] \n" "fmla v13.4s, v9.4s, v6.s[3] \n" "shll v8.4s, %18.4h, #16 \n" "shll2 v9.4s, %18.8h, #16 \n" "fmla v10.4s, v8.4s, v1.s[0] \n" "fmla v11.4s, v8.4s, v3.s[0] \n" "fmla v12.4s, v8.4s, v5.s[0] \n" "fmla v13.4s, v8.4s, v7.s[0] \n" "fmla v10.4s, v9.4s, v1.s[1] \n" "fmla v11.4s, v9.4s, v3.s[1] \n" "fmla v12.4s, v9.4s, v5.s[1] \n" "fmla v13.4s, v9.4s, v7.s[1] \n" "shll v8.4s, %19.4h, #16 \n" "shll2 v9.4s, %19.8h, #16 \n" "fmla v10.4s, v8.4s, v1.s[2] \n" "fmla v11.4s, v8.4s, v3.s[2] \n" "fmla v12.4s, v8.4s, v5.s[2] \n" "fmla v13.4s, v8.4s, v7.s[2] \n" "fmla v10.4s, v9.4s, v1.s[3] \n" "fmla v11.4s, v9.4s, v3.s[3] \n" "fmla v12.4s, v9.4s, v5.s[3] \n" "fmla v13.4s, v9.4s, v7.s[3] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3] \n" // r18 "shll v0.4s, v0.4h, #16 \n" "shll v8.4s, %20.4h, #16 \n" "shll2 v9.4s, %20.8h, #16 \n" "fmla v10.4s, v8.4s, v2.s[0] \n" "fmla v11.4s, v8.4s, v4.s[0] \n" "fmla v12.4s, v8.4s, v6.s[0] \n" "fmla v13.4s, v8.4s, v0.s[0] \n" "fmla v10.4s, v9.4s, v2.s[1] \n" "fmla v11.4s, v9.4s, v4.s[1] \n" "fmla v12.4s, v9.4s, v6.s[1] \n" "fmla v13.4s, v9.4s, v0.s[1] \n" "shll v8.4s, %21.4h, #16 \n" "shll2 v9.4s, %21.8h, #16 \n" "fmla v10.4s, v8.4s, v2.s[2] \n" "fmla v11.4s, v8.4s, v4.s[2] \n" "fmla v12.4s, v8.4s, v6.s[2] \n" "fmla v13.4s, v8.4s, v0.s[2] \n" "fmla v10.4s, v9.4s, v2.s[3] \n" "fmla v11.4s, v9.4s, v4.s[3] \n" "fmla v12.4s, v9.4s, v6.s[3] \n" "fmla v13.4s, v9.4s, v0.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%4], #64 \n" // r20 r21 r22 r23 "shll v0.4s, v4.4h, #16 \n" "shll2 v1.4s, v4.8h, #16 \n" "shll v2.4s, v5.4h, #16 \n" "shll2 v3.4s, v5.8h, #16 \n" "shll v4.4s, v6.4h, #16 \n" "shll2 v5.4s, v6.8h, #16 \n" "shll v6.4s, v7.4h, #16 \n" "shll2 v7.4s, v7.8h, #16 \n" "shll v8.4s, %22.4h, #16 \n" "shll2 v9.4s, %22.8h, #16 \n" "fmla v10.4s, v8.4s, v0.s[0] \n" "fmla v11.4s, v8.4s, v2.s[0] \n" "fmla v12.4s, v8.4s, v4.s[0] \n" "fmla v13.4s, v8.4s, v6.s[0] \n" "fmla v10.4s, v9.4s, v0.s[1] \n" "fmla v11.4s, v9.4s, v2.s[1] \n" "fmla v12.4s, v9.4s, v4.s[1] \n" "fmla v13.4s, v9.4s, v6.s[1] \n" "shll v8.4s, %23.4h, #16 \n" "shll2 v9.4s, %23.8h, #16 \n" "fmla v10.4s, v8.4s, v0.s[2] \n" "fmla v11.4s, v8.4s, v2.s[2] \n" "fmla v12.4s, v8.4s, v4.s[2] \n" "fmla v13.4s, v8.4s, v6.s[2] \n" "fmla v10.4s, v9.4s, v0.s[3] \n" "fmla v11.4s, v9.4s, v2.s[3] \n" "fmla v12.4s, v9.4s, v4.s[3] \n" "fmla v13.4s, v9.4s, v6.s[3] \n" "shll v8.4s, %24.4h, #16 \n" "shll2 v9.4s, %24.8h, #16 \n" "fmla v10.4s, v8.4s, v1.s[0] \n" "fmla v11.4s, v8.4s, v3.s[0] \n" "fmla v12.4s, v8.4s, v5.s[0] \n" "fmla v13.4s, v8.4s, v7.s[0] \n" "fmla v10.4s, v9.4s, v1.s[1] \n" "fmla v11.4s, v9.4s, v3.s[1] \n" "fmla v12.4s, v9.4s, v5.s[1] \n" "fmla v13.4s, v9.4s, v7.s[1] \n" "shll v8.4s, %25.4h, #16 \n" "shll2 v9.4s, %25.8h, #16 \n" "fmla v10.4s, v8.4s, v1.s[2] \n" "fmla v11.4s, v8.4s, v3.s[2] \n" "fmla v12.4s, v8.4s, v5.s[2] \n" "fmla v13.4s, v8.4s, v7.s[2] \n" "fmla v10.4s, v9.4s, v1.s[3] \n" "fmla v11.4s, v9.4s, v3.s[3] \n" "fmla v12.4s, v9.4s, v5.s[3] \n" "fmla v13.4s, v9.4s, v7.s[3] \n" "prfm pldl1keep, [%4, #64] \n" "ld1 {v0.4h}, [%4] \n" // r28 "shll v0.4s, v0.4h, #16 \n" "shll v8.4s, %26.4h, #16 \n" "shll2 v9.4s, %26.8h, #16 \n" "fmla v10.4s, v8.4s, v2.s[0] \n" "fmla v11.4s, v8.4s, v4.s[0] \n" "fmla v12.4s, v8.4s, v6.s[0] \n" "fmla v13.4s, v8.4s, v0.s[0] \n" "fmla v10.4s, v9.4s, v2.s[1] \n" "fmla v11.4s, v9.4s, v4.s[1] \n" "fmla v12.4s, v9.4s, v6.s[1] \n" "fmla v13.4s, v9.4s, v0.s[1] \n" "shll v8.4s, %27.4h, #16 \n" "shll2 v9.4s, %27.8h, #16 \n" "fmla v10.4s, v8.4s, v2.s[2] \n" "fmla v11.4s, v8.4s, v4.s[2] \n" "fmla v12.4s, v8.4s, v6.s[2] \n" "fmla v13.4s, v8.4s, v0.s[2] \n" "fmla v10.4s, v9.4s, v2.s[3] \n" "fmla v11.4s, v9.4s, v4.s[3] \n" "fmla v12.4s, v9.4s, v6.s[3] \n" "fmla v13.4s, v9.4s, v0.s[3] \n" "shrn v10.4h, v10.4s, #16 \n" "shrn v11.4h, v11.4s, #16 \n" "shrn v12.4h, v12.4s, #16 \n" "shrn v13.4h, v13.4s, #16 \n" "st1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%0], #32 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00_01), // %10 "w"(_k00_23), // %11 "w"(_k01_01), // %12 "w"(_k01_23), // %13 "w"(_k02_01), // %14 "w"(_k02_23), // %15 "w"(_k10_01), // %16 "w"(_k10_23), // %17 "w"(_k11_01), // %18 "w"(_k11_23), // %19 "w"(_k12_01), // %20 "w"(_k12_23), // %21 "w"(_k20_01), // %22 "w"(_k20_23), // %23 "w"(_k21_01), // %24 "w"(_k21_23), // %25 "w"(_k22_01), // %26 "w"(_k22_23) // %27 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"); #else // __aarch64__ asm volatile( "pld [%1, #512] \n" "vldm %1!, {d24-d31} \n" // sum0 sum1 sum2 sum3 "pld [%2, #512] \n" "vldm %2!, {d8-d15} \n" // r00 r01 r02 r03 r04 r05 r06 r07 "vshll.u16 q0, d8, #16 \n" "vshll.u16 q1, d9, #16 \n" "vshll.u16 q2, d10, #16 \n" "vshll.u16 q3, d11, #16 \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%2, #64] \n" "vld1.f32 {d1}, [%2 :64] \n" // r08 "vshll.u16 q0, d1, #16 \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d15[1] \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" // r10 r11 r12 r13 r14 r15 r16 r17 "vshll.u16 q0, d8, #16 \n" "vshll.u16 q1, d9, #16 \n" "vshll.u16 q2, d10, #16 \n" "vshll.u16 q3, d11, #16 \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%3, #64] \n" "vld1.f32 {d1}, [%3 :64] \n" // r18 "vshll.u16 q0, d1, #16 \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d15[1] \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%4, #256] \n" "vldm %4!, {d8-d15} \n" // r20 r21 r22 r23 r24 r25 r26 r27 "vshll.u16 q0, d8, #16 \n" "vshll.u16 q1, d9, #16 \n" "vshll.u16 q2, d10, #16 \n" "vshll.u16 q3, d11, #16 \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%4, #64] \n" "vld1.f32 {d1}, [%4 :64] \n" // r28 "vshll.u16 q0, d1, #16 \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d15[1] \n" // "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128] \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "sub %5, %5, #256 \n" // kptr -= 8 * 16; "vshrn.u32 d24, q12, #16 \n" "vshrn.u32 d25, q13, #16 \n" "vshrn.u32 d26, q14, #16 \n" "vshrn.u32 d27, q15, #16 \n" "vst1.f32 {d24-d27}, [%0 :64]! \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(kptr) // %5 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "5"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld1 {v12.4s, v13.4s}, [%1], #32 \n" // sum0 sum1 "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r00 r01 r02 r03 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v6.4s, %10.4h, #16 \n" "shll2 v7.4s, %10.8h, #16 \n" "fmul v10.4s, v6.4s, v0.s[0] \n" "fmul v11.4s, v6.4s, v2.s[0] \n" "fmla v12.4s, v7.4s, v0.s[1] \n" "fmla v13.4s, v7.4s, v2.s[1] \n" "shll v8.4s, %11.4h, #16 \n" "shll2 v9.4s, %11.8h, #16 \n" "fmla v10.4s, v8.4s, v0.s[2] \n" "fmla v11.4s, v8.4s, v2.s[2] \n" "fmla v12.4s, v9.4s, v0.s[3] \n" "fmla v13.4s, v9.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v4.4h}, [%2] \n" // r04 "shll v4.4s, v4.4h, #16 \n" "shll v6.4s, %12.4h, #16 \n" "shll2 v7.4s, %12.8h, #16 \n" "fmla v10.4s, v6.4s, v1.s[0] \n" "fmla v11.4s, v6.4s, v3.s[0] \n" "fmla v12.4s, v7.4s, v1.s[1] \n" "fmla v13.4s, v7.4s, v3.s[1] \n" "shll v8.4s, %13.4h, #16 \n" "shll2 v9.4s, %13.8h, #16 \n" "fmla v10.4s, v8.4s, v1.s[2] \n" "fmla v11.4s, v8.4s, v3.s[2] \n" "fmla v12.4s, v9.4s, v1.s[3] \n" "fmla v13.4s, v9.4s, v3.s[3] \n" "shll v6.4s, %14.4h, #16 \n" "shll2 v7.4s, %14.8h, #16 \n" "fmla v10.4s, v6.4s, v2.s[0] \n" "fmla v11.4s, v6.4s, v4.s[0] \n" "fmla v12.4s, v7.4s, v2.s[1] \n" "fmla v13.4s, v7.4s, v4.s[1] \n" "shll v8.4s, %15.4h, #16 \n" "shll2 v9.4s, %15.8h, #16 \n" "fmla v10.4s, v8.4s, v2.s[2] \n" "fmla v11.4s, v8.4s, v4.s[2] \n" "fmla v12.4s, v9.4s, v2.s[3] \n" "fmla v13.4s, v9.4s, v4.s[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r10 r11 r12 r13 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v6.4s, %16.4h, #16 \n" "shll2 v7.4s, %16.8h, #16 \n" "fmla v10.4s, v6.4s, v0.s[0] \n" "fmla v11.4s, v6.4s, v2.s[0] \n" "fmla v12.4s, v7.4s, v0.s[1] \n" "fmla v13.4s, v7.4s, v2.s[1] \n" "shll v8.4s, %17.4h, #16 \n" "shll2 v9.4s, %17.8h, #16 \n" "fmla v10.4s, v8.4s, v0.s[2] \n" "fmla v11.4s, v8.4s, v2.s[2] \n" "fmla v12.4s, v9.4s, v0.s[3] \n" "fmla v13.4s, v9.4s, v2.s[3] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v4.4h}, [%3] \n" // r14 "shll v4.4s, v4.4h, #16 \n" "shll v6.4s, %18.4h, #16 \n" "shll2 v7.4s, %18.8h, #16 \n" "fmla v10.4s, v6.4s, v1.s[0] \n" "fmla v11.4s, v6.4s, v3.s[0] \n" "fmla v12.4s, v7.4s, v1.s[1] \n" "fmla v13.4s, v7.4s, v3.s[1] \n" "shll v8.4s, %19.4h, #16 \n" "shll2 v9.4s, %19.8h, #16 \n" "fmla v10.4s, v8.4s, v1.s[2] \n" "fmla v11.4s, v8.4s, v3.s[2] \n" "fmla v12.4s, v9.4s, v1.s[3] \n" "fmla v13.4s, v9.4s, v3.s[3] \n" "shll v6.4s, %20.4h, #16 \n" "shll2 v7.4s, %20.8h, #16 \n" "fmla v10.4s, v6.4s, v2.s[0] \n" "fmla v11.4s, v6.4s, v4.s[0] \n" "fmla v12.4s, v7.4s, v2.s[1] \n" "fmla v13.4s, v7.4s, v4.s[1] \n" "shll v8.4s, %21.4h, #16 \n" "shll2 v9.4s, %21.8h, #16 \n" "fmla v10.4s, v8.4s, v2.s[2] \n" "fmla v11.4s, v8.4s, v4.s[2] \n" "fmla v12.4s, v9.4s, v2.s[3] \n" "fmla v13.4s, v9.4s, v4.s[3] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%4], #32 \n" // r20 r21 r22 r23 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v6.4s, %22.4h, #16 \n" "shll2 v7.4s, %22.8h, #16 \n" "fmla v10.4s, v6.4s, v0.s[0] \n" "fmla v11.4s, v6.4s, v2.s[0] \n" "fmla v12.4s, v7.4s, v0.s[1] \n" "fmla v13.4s, v7.4s, v2.s[1] \n" "shll v8.4s, %23.4h, #16 \n" "shll2 v9.4s, %23.8h, #16 \n" "fmla v10.4s, v8.4s, v0.s[2] \n" "fmla v11.4s, v8.4s, v2.s[2] \n" "fmla v12.4s, v9.4s, v0.s[3] \n" "fmla v13.4s, v9.4s, v2.s[3] \n" "prfm pldl1keep, [%4, #64] \n" "ld1 {v4.4h}, [%4] \n" // r24 "shll v4.4s, v4.4h, #16 \n" "shll v6.4s, %24.4h, #16 \n" "shll2 v7.4s, %24.8h, #16 \n" "fmla v10.4s, v6.4s, v1.s[0] \n" "fmla v11.4s, v6.4s, v3.s[0] \n" "fmla v12.4s, v7.4s, v1.s[1] \n" "fmla v13.4s, v7.4s, v3.s[1] \n" "shll v8.4s, %25.4h, #16 \n" "shll2 v9.4s, %25.8h, #16 \n" "fmla v10.4s, v8.4s, v1.s[2] \n" "fmla v11.4s, v8.4s, v3.s[2] \n" "fmla v12.4s, v9.4s, v1.s[3] \n" "fmla v13.4s, v9.4s, v3.s[3] \n" "shll v6.4s, %26.4h, #16 \n" "shll2 v7.4s, %26.8h, #16 \n" "fmla v10.4s, v6.4s, v2.s[0] \n" "fmla v11.4s, v6.4s, v4.s[0] \n" "fmla v12.4s, v7.4s, v2.s[1] \n" "fmla v13.4s, v7.4s, v4.s[1] \n" "shll v8.4s, %27.4h, #16 \n" "shll2 v9.4s, %27.8h, #16 \n" "fmla v10.4s, v8.4s, v2.s[2] \n" "fmla v11.4s, v8.4s, v4.s[2] \n" "fmla v12.4s, v9.4s, v2.s[3] \n" "fmla v13.4s, v9.4s, v4.s[3] \n" "fadd v12.4s, v10.4s, v12.4s \n" "fadd v13.4s, v11.4s, v13.4s \n" "shrn v12.4h, v12.4s, #16 \n" "shrn v13.4h, v13.4s, #16 \n" "st1 {v12.4h, v13.4h}, [%0], #16 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00_01), // %10 "w"(_k00_23), // %11 "w"(_k01_01), // %12 "w"(_k01_23), // %13 "w"(_k02_01), // %14 "w"(_k02_23), // %15 "w"(_k10_01), // %16 "w"(_k10_23), // %17 "w"(_k11_01), // %18 "w"(_k11_23), // %19 "w"(_k12_01), // %20 "w"(_k12_23), // %21 "w"(_k20_01), // %22 "w"(_k20_23), // %23 "w"(_k21_01), // %24 "w"(_k21_23), // %25 "w"(_k22_01), // %26 "w"(_k22_23) // %27 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"); #else // __aarch64__ asm volatile( "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128]! \n" // sum0 sum1 "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2 :64]! \n" // r00 r01 r02 r03 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmul.f32 q12, q8, d0[0] \n" "vmul.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q9, d4[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q11, d1[1] \n" "vmla.f32 q15, q11, d5[1] \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%2, #64] \n" "vld1.f32 {d9}, [%2 :64] \n" // r04 "vshll.u16 q4, d9, #16 \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q11, d3[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d8[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d9[1] \n" "pld [%3, #256] \n" "vld1.u16 {d4-d7}, [%3 :64]! \n" // r10 r11 r12 r13 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q9, d4[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q11, d1[1] \n" "vmla.f32 q15, q11, d5[1] \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%3, #64] \n" "vld1.f32 {d9}, [%3 :64] \n" // r14 "vshll.u16 q4, d9, #16 \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q11, d3[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d8[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d9[1] \n" "pld [%4, #256] \n" "vld1.u16 {d4-d7}, [%4 :64]! \n" // r20 r21 r22 r23 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q9, d4[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q11, d1[1] \n" "vmla.f32 q15, q11, d5[1] \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "pld [%4, #64] \n" "vld1.f32 {d9}, [%4 :64] \n" // r24 "vshll.u16 q4, d9, #16 \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q11, d3[1] \n" "vmla.f32 q15, q11, d7[1] \n" // "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128] \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d8[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d9[1] \n" "vadd.f32 q14, q12, q14 \n" "vadd.f32 q15, q13, q15 \n" "sub %5, %5, #256 \n" // kptr -= 8 * 16; "vshrn.u32 d28, q14, #16 \n" "vshrn.u32 d29, q15, #16 \n" "vst1.f32 {d28-d29}, [%0 :64]! \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(kptr) // %5 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "5"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j < outw; j++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v13.4s}, [%1], #16 \n" // sum0 "prfm pldl1keep, [%2, #192] \n" "ld1 {v0.4h, v1.4h, v2.4h}, [%2] \n" // r00 r01 r02 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v6.4s, %10.4h, #16 \n" "shll2 v7.4s, %10.8h, #16 \n" "fmul v10.4s, v6.4s, v0.s[0] \n" "fmul v11.4s, v7.4s, v0.s[1] \n" "shll v8.4s, %11.4h, #16 \n" "shll2 v9.4s, %11.8h, #16 \n" "fmul v12.4s, v8.4s, v0.s[2] \n" "fmla v13.4s, v9.4s, v0.s[3] \n" "shll v6.4s, %12.4h, #16 \n" "shll2 v7.4s, %12.8h, #16 \n" "fmla v10.4s, v6.4s, v1.s[0] \n" "fmla v11.4s, v7.4s, v1.s[1] \n" "shll v8.4s, %13.4h, #16 \n" "shll2 v9.4s, %13.8h, #16 \n" "fmla v12.4s, v8.4s, v1.s[2] \n" "fmla v13.4s, v9.4s, v1.s[3] \n" "shll v6.4s, %14.4h, #16 \n" "shll2 v7.4s, %14.8h, #16 \n" "fmla v10.4s, v6.4s, v2.s[0] \n" "fmla v11.4s, v7.4s, v2.s[1] \n" "shll v8.4s, %15.4h, #16 \n" "shll2 v9.4s, %15.8h, #16 \n" "fmla v12.4s, v8.4s, v2.s[2] \n" "fmla v13.4s, v9.4s, v2.s[3] \n" "prfm pldl1keep, [%3, #192] \n" "ld1 {v3.4h, v4.4h, v5.4h}, [%3] \n" // r10 r11 r12 "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, %16.4h, #16 \n" "shll2 v7.4s, %16.8h, #16 \n" "fmla v10.4s, v6.4s, v3.s[0] \n" "fmla v11.4s, v7.4s, v3.s[1] \n" "shll v8.4s, %17.4h, #16 \n" "shll2 v9.4s, %17.8h, #16 \n" "fmla v12.4s, v8.4s, v3.s[2] \n" "fmla v13.4s, v9.4s, v3.s[3] \n" "shll v6.4s, %18.4h, #16 \n" "shll2 v7.4s, %18.8h, #16 \n" "fmla v10.4s, v6.4s, v4.s[0] \n" "fmla v11.4s, v7.4s, v4.s[1] \n" "shll v8.4s, %19.4h, #16 \n" "shll2 v9.4s, %19.8h, #16 \n" "fmla v12.4s, v8.4s, v4.s[2] \n" "fmla v13.4s, v9.4s, v4.s[3] \n" "shll v6.4s, %20.4h, #16 \n" "shll2 v7.4s, %20.8h, #16 \n" "fmla v10.4s, v6.4s, v5.s[0] \n" "fmla v11.4s, v7.4s, v5.s[1] \n" "shll v8.4s, %21.4h, #16 \n" "shll2 v9.4s, %21.8h, #16 \n" "fmla v12.4s, v8.4s, v5.s[2] \n" "fmla v13.4s, v9.4s, v5.s[3] \n" "prfm pldl1keep, [%4, #192] \n" "ld1 {v0.4h, v1.4h, v2.4h}, [%4] \n" // r20 r21 r22 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v6.4s, %22.4h, #16 \n" "shll2 v7.4s, %22.8h, #16 \n" "fmla v10.4s, v6.4s, v0.s[0] \n" "fmla v11.4s, v7.4s, v0.s[1] \n" "shll v8.4s, %23.4h, #16 \n" "shll2 v9.4s, %23.8h, #16 \n" "fmla v12.4s, v8.4s, v0.s[2] \n" "fmla v13.4s, v9.4s, v0.s[3] \n" "shll v6.4s, %24.4h, #16 \n" "shll2 v7.4s, %24.8h, #16 \n" "fmla v10.4s, v6.4s, v1.s[0] \n" "fmla v11.4s, v7.4s, v1.s[1] \n" "shll v8.4s, %25.4h, #16 \n" "shll2 v9.4s, %25.8h, #16 \n" "fmla v12.4s, v8.4s, v1.s[2] \n" "fmla v13.4s, v9.4s, v1.s[3] \n" "shll v6.4s, %26.4h, #16 \n" "shll2 v7.4s, %26.8h, #16 \n" "fmla v10.4s, v6.4s, v2.s[0] \n" "fmla v11.4s, v7.4s, v2.s[1] \n" "shll v8.4s, %27.4h, #16 \n" "shll2 v9.4s, %27.8h, #16 \n" "fmla v12.4s, v8.4s, v2.s[2] \n" "fmla v13.4s, v9.4s, v2.s[3] \n" "fadd v11.4s, v10.4s, v11.4s \n" "add %2, %2, #16 \n" "fadd v13.4s, v12.4s, v13.4s \n" "add %3, %3, #16 \n" "fadd v13.4s, v11.4s, v13.4s \n" "add %4, %4, #16 \n" "shrn v13.4h, v13.4s, #16 \n" "st1 {v13.4h}, [%0], #8 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00_01), // %10 "w"(_k00_23), // %11 "w"(_k01_01), // %12 "w"(_k01_23), // %13 "w"(_k02_01), // %14 "w"(_k02_23), // %15 "w"(_k10_01), // %16 "w"(_k10_23), // %17 "w"(_k11_01), // %18 "w"(_k11_23), // %19 "w"(_k12_01), // %20 "w"(_k12_23), // %21 "w"(_k20_01), // %22 "w"(_k20_23), // %23 "w"(_k21_01), // %24 "w"(_k21_23), // %25 "w"(_k22_01), // %26 "w"(_k22_23) // %27 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"); #else // __aarch64__ asm volatile( "pld [%1, #128] \n" "vld1.f32 {d30-d31}, [%1 :128]! \n" // sum0 "pld [%2, #192] \n" "vld1.u16 {d2-d4}, [%2 :64] \n" // r00 r01 r02 "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "vshll.u16 q2, d4, #16 \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vmul.f32 q12, q8, d0[0] \n" "vmul.f32 q13, q9, d0[1] \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmul.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q9, d2[1] \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q11, d3[1] \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q9, d4[1] \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q11, d5[1] \n" "pld [%3, #192] \n" "vld1.u16 {d2-d4}, [%3 :64] \n" // r10 r11 r12 "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "vshll.u16 q2, d4, #16 \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q9, d0[1] \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q9, d2[1] \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q11, d3[1] \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q9, d4[1] \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q11, d5[1] \n" "pld [%4, #192] \n" "vld1.u16 {d2-d4}, [%4 :64] \n" // r20 r21 r22 "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "vshll.u16 q2, d4, #16 \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q9, d0[1] \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q9, d2[1] \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q11, d3[1] \n" // "pld [%5, #256] \n" "vld1.u16 {d20-d23}, [%5 :128] \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q9, d4[1] \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q11, d5[1] \n" "add %2, %2, #16 \n" "vadd.f32 q13, q12, q13 \n" "add %3, %3, #16 \n" "vadd.f32 q15, q14, q15 \n" "add %4, %4, #16 \n" "vadd.f32 q15, q13, q15 \n" "sub %5, %5, #256 \n" // kptr -= 8 * 16 * 2; "vshrn.u32 d31, q15, #16 \n" "vst1.u16 {d31}, [%0 :64]! \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(kptr) // %5 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "5"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } }
GB_binop__second_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__second_fc64) // A.*B function (eWiseMult): GB (_AemultB_08__second_fc64) // A.*B function (eWiseMult): GB (_AemultB_02__second_fc64) // A.*B function (eWiseMult): GB (_AemultB_04__second_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__second_fc64) // A*D function (colscale): GB (_AxD__second_fc64) // D*A function (rowscale): GB (_DxB__second_fc64) // C+=B function (dense accum): GB (_Cdense_accumB__second_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__second_fc64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_fc64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: GxB_FC64_t // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = bij #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = y ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 1 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SECOND || GxB_NO_FC64 || GxB_NO_SECOND_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__second_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__second_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__second_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__second_fc64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__second_fc64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__second_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__second_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__second_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__second_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__second_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = GBX (Bx, p, false) ; Cx [p] = bij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = y ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = y ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
residualbased_predictorcorrector_velocity_bossak_scheme_turbulent.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela // #if !defined(KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BOSSAK_TURBULENT_SCHEME ) #define KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BOSSAK_TURBULENT_SCHEME /* System includes */ /* External includes */ #include "boost/smart_ptr.hpp" /* Project includes */ #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "solving_strategies/schemes/scheme.h" #include "includes/variables.h" #include "includes/cfd_variables.h" #include "containers/array_1d.h" #include "utilities/openmp_utils.h" #include "utilities/dof_updater.h" #include "utilities/coordinate_transformation_utilities.h" #include "processes/process.h" namespace Kratos { /**@name Kratos Globals */ /*@{ */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ /**@name Enum's */ /*@{ */ /*@} */ /**@name Functions */ /*@{ */ /*@} */ /**@name Kratos Classes */ /*@{ */ /// Bossak time scheme for the incompressible flow problem. /** This class provides a second order time scheme of the generalized-alpha Newmark family of methods. It also includes code required to implement slip conditions on the incompressible flow problem and provides the possibility of using a RANS model by passing a turbulence model as an argument to the constructor. This time scheme is intended to be used in combination with elements of type ASGS2D, ASGS3D, VMS or derived classes. To use the slip condition, set the SLIP flag on slip wall nodes. To use a wall law in combination with the slip condition, use MonolithicWallCondition to mesh the boundary @see ASGS2D, ASGS3D, VMS, MonolithicWallConditon */ template<class TSparseSpace, class TDenseSpace //= DenseSpace<double> > class ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent : public Scheme<TSparseSpace, TDenseSpace> { public: /**@name Type Definitions */ /*@{ */ KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent); typedef Scheme<TSparseSpace, TDenseSpace> BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename Element::DofsVectorType DofsVectorType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef Element::GeometryType GeometryType; /*@} */ /**@name Life Cycle */ /*@{ */ /** Constructor without a turbulence model */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, double MoveMeshStrategy, unsigned int DomainSize) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs. mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = MoveMeshStrategy; //Allocate auxiliary memory int NumThreads = OpenMPUtils::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Constructor without a turbulence model with periodic conditions */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, unsigned int DomainSize, const Variable<int>& rPeriodicIdVar) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs. mrPeriodicIdVar(rPeriodicIdVar) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = 0.0; //Allocate auxiliary memory int NumThreads = OpenMPUtils::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Constructor without a turbulence model */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, double MoveMeshStrategy, unsigned int DomainSize, Kratos::Flags& rSlipFlag) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,rSlipFlag), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs. mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = MoveMeshStrategy; //Allocate auxiliary memory int NumThreads = OpenMPUtils::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Constructor with a turbulence model */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, double MoveMeshStrategy, unsigned int DomainSize, Process::Pointer pTurbulenceModel) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()), mpTurbulenceModel(pTurbulenceModel) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = MoveMeshStrategy; //Allocate auxiliary memory int NumThreads = OpenMPUtils::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Destructor. */ ~ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent() override { } /*@} */ /**@name Operators */ /*@{ */ /** Performing the update of the solution. */ //*************************************************************************** void Update(ModelPart& r_model_part, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) override { KRATOS_TRY; mRotationTool.RotateVelocities(r_model_part); mpDofUpdater->UpdateDofs(rDofSet,Dv); mRotationTool.RecoverVelocities(r_model_part); AdditionalUpdateOperations(r_model_part, rDofSet, A, Dv, b); KRATOS_CATCH("") } //*************************************************************************** void AdditionalUpdateOperations(ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) { KRATOS_TRY int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::PartitionVector NodePartition; OpenMPUtils::DivideInPartitions(rModelPart.Nodes().size(), NumThreads, NodePartition); //updating time derivatives (nodally for efficiency) #pragma omp parallel { array_1d<double, 3 > DeltaVel; int k = OpenMPUtils::ThisThread(); ModelPart::NodeIterator NodesBegin = rModelPart.NodesBegin() + NodePartition[k]; ModelPart::NodeIterator NodesEnd = rModelPart.NodesBegin() + NodePartition[k + 1]; for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; itNode++) { noalias(DeltaVel) = (itNode)->FastGetSolutionStepValue(VELOCITY) - (itNode)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 0); array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1); UpdateAcceleration(CurrentAcceleration, DeltaVel, OldAcceleration); if (mMeshVelocity == 2)//Lagrangian { if((itNode)->FastGetSolutionStepValue(IS_LAGRANGIAN_INLET) < 1e-15) { array_1d<double, 3 > & CurrentDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 0); array_1d<double, 3 > & OldDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 1); array_1d<double, 3 > & OldVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY, 1); noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY)) = itNode->FastGetSolutionStepValue(VELOCITY); UpdateDisplacement(CurrentDisplacement, OldDisplacement, OldVelocity, OldAcceleration, CurrentAcceleration); } else { noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY)) = ZeroVector(3); noalias(itNode->FastGetSolutionStepValue(DISPLACEMENT)) = ZeroVector(3); } } } } KRATOS_CATCH("") } //*************************************************************************** //predicts the solution at the current step as // v = vold void Predict(ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) override { // if (rModelPart.GetCommunicator().MyPID() == 0) // std::cout << "prediction" << std::endl; int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::PartitionVector NodePartition; OpenMPUtils::DivideInPartitions(rModelPart.Nodes().size(), NumThreads, NodePartition); #pragma omp parallel { //array_1d<double, 3 > DeltaDisp; int k = OpenMPUtils::ThisThread(); ModelPart::NodeIterator NodesBegin = rModelPart.NodesBegin() + NodePartition[k]; ModelPart::NodeIterator NodesEnd = rModelPart.NodesBegin() + NodePartition[k + 1]; for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; itNode++) { array_1d<double, 3 > & OldVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY, 1); double& OldPressure = (itNode)->FastGetSolutionStepValue(PRESSURE, 1); //predicting velocity //ATTENTION::: the prediction is performed only on free nodes array_1d<double, 3 > & CurrentVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY); double& CurrentPressure = (itNode)->FastGetSolutionStepValue(PRESSURE); if ((itNode->pGetDof(VELOCITY_X))->IsFree()) (CurrentVelocity[0]) = OldVelocity[0]; if (itNode->pGetDof(VELOCITY_Y)->IsFree()) (CurrentVelocity[1]) = OldVelocity[1]; if (itNode->HasDofFor(VELOCITY_Z)) if (itNode->pGetDof(VELOCITY_Z)->IsFree()) (CurrentVelocity[2]) = OldVelocity[2]; if (itNode->pGetDof(PRESSURE)->IsFree()) CurrentPressure = OldPressure; // updating time derivatives ::: please note that displacements and // their time derivatives can not be consistently fixed separately array_1d<double, 3 > DeltaVel; noalias(DeltaVel) = CurrentVelocity - OldVelocity; array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1); array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION); UpdateAcceleration(CurrentAcceleration, DeltaVel, OldAcceleration); if (mMeshVelocity == 2) //Lagrangian { array_1d<double, 3 > & OldDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 1); array_1d<double, 3 > & CurrentDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 0); if((itNode)->FastGetSolutionStepValue(IS_LAGRANGIAN_INLET) < 1e-15) { noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY)) = itNode->FastGetSolutionStepValue(VELOCITY); UpdateDisplacement(CurrentDisplacement, OldDisplacement, OldVelocity, OldAcceleration, CurrentAcceleration); } else { itNode->FastGetSolutionStepValue(MESH_VELOCITY_X) = 0.0; itNode->FastGetSolutionStepValue(MESH_VELOCITY_Y) = 0.0; itNode->FastGetSolutionStepValue(DISPLACEMENT_X) = 0.0; itNode->FastGetSolutionStepValue(DISPLACEMENT_Y) = 0.0; } } } } // if (rModelPart.GetCommunicator().MyPID() == 0) // std::cout << "end of prediction" << std::endl; } //*************************************************************************** /** this function is designed to be called in the builder and solver to introduce the selected time integration scheme. It "asks" the matrix needed to the element and performs the operations needed to introduce the seected time integration scheme. this function calculates at the same time the contribution to the LHS and to the RHS of the system */ void CalculateSystemContributions(Element::Pointer rCurrentElement, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& CurrentProcessInfo) override { KRATOS_TRY int k = OpenMPUtils::ThisThread(); //Initializing the non linear iteration for the current element (rCurrentElement) -> InitializeNonLinearIteration(CurrentProcessInfo); //KRATOS_WATCH(LHS_Contribution); //basic operations for the element considered (rCurrentElement)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); //std::cout << rCurrentElement->Id() << " RHS = " << RHS_Contribution << std::endl; (rCurrentElement)->CalculateMassMatrix(mMass[k], CurrentProcessInfo); (rCurrentElement)->CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo); (rCurrentElement)->EquationIdVector(EquationId, CurrentProcessInfo); //adding the dynamic contributions (statics is already included) AddDynamicsToLHS(LHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); AddDynamicsToRHS(rCurrentElement, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); // If there is a slip condition, apply it on a rotated system of coordinates mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentElement->GetGeometry()); mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentElement->GetGeometry()); KRATOS_CATCH("") } void Calculate_RHS_Contribution(Element::Pointer rCurrentElement, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& CurrentProcessInfo) override { int k = OpenMPUtils::ThisThread(); //Initializing the non linear iteration for the current element (rCurrentElement) -> InitializeNonLinearIteration(CurrentProcessInfo); //basic operations for the element considered (rCurrentElement)->CalculateRightHandSide(RHS_Contribution, CurrentProcessInfo); (rCurrentElement)->CalculateMassMatrix(mMass[k], CurrentProcessInfo); (rCurrentElement)->CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo); (rCurrentElement)->EquationIdVector(EquationId, CurrentProcessInfo); //adding the dynamic contributions (static is already included) AddDynamicsToRHS(rCurrentElement, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); // If there is a slip condition, apply it on a rotated system of coordinates mRotationTool.Rotate(RHS_Contribution,rCurrentElement->GetGeometry()); mRotationTool.ApplySlipCondition(RHS_Contribution,rCurrentElement->GetGeometry()); } /** functions totally analogous to the precedent but applied to the "condition" objects */ void Condition_CalculateSystemContributions(Condition::Pointer rCurrentCondition, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& CurrentProcessInfo) override { KRATOS_TRY int k = OpenMPUtils::ThisThread(); //KRATOS_WATCH("CONDITION LOCALVELOCITYCONTRIBUTION IS NOT DEFINED"); (rCurrentCondition) -> InitializeNonLinearIteration(CurrentProcessInfo); (rCurrentCondition)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); (rCurrentCondition)->CalculateMassMatrix(mMass[k], CurrentProcessInfo); //(rCurrentCondition)->CalculateDampingMatrix(VelocityBossakAuxiliaries::mDamp,CurrentProcessInfo); (rCurrentCondition)->CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo); (rCurrentCondition)->EquationIdVector(EquationId, CurrentProcessInfo); AddDynamicsToLHS(LHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); AddDynamicsToRHS(rCurrentCondition, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); // Rotate contributions (to match coordinates for slip conditions) mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentCondition->GetGeometry()); mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentCondition->GetGeometry()); KRATOS_CATCH("") } void Condition_Calculate_RHS_Contribution(Condition::Pointer rCurrentCondition, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; int k = OpenMPUtils::ThisThread(); //KRATOS_WATCH("CONDITION LOCALVELOCITYCONTRIBUTION IS NOT DEFINED"); //Initializing the non linear iteration for the current condition (rCurrentCondition) -> InitializeNonLinearIteration(rCurrentProcessInfo); //basic operations for the element considered (rCurrentCondition)->CalculateRightHandSide(RHS_Contribution,rCurrentProcessInfo); (rCurrentCondition)->CalculateMassMatrix(mMass[k],rCurrentProcessInfo); //(rCurrentCondition)->CalculateDampingMatrix(VelocityBossakAuxiliaries::mDamp,CurrentProcessInfo); (rCurrentCondition)->CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution,rCurrentProcessInfo); (rCurrentCondition)->EquationIdVector(EquationId,rCurrentProcessInfo); //adding the dynamic contributions (static is already included) AddDynamicsToRHS(rCurrentCondition, RHS_Contribution, mDamp[k], mMass[k],rCurrentProcessInfo); // Rotate contributions (to match coordinates for slip conditions) mRotationTool.Rotate(RHS_Contribution,rCurrentCondition->GetGeometry()); mRotationTool.ApplySlipCondition(RHS_Contribution,rCurrentCondition->GetGeometry()); KRATOS_CATCH(""); } //************************************************************************************* //************************************************************************************* void InitializeSolutionStep(ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); Scheme<TSparseSpace, TDenseSpace>::InitializeSolutionStep(r_model_part, A, Dx, b); double DeltaTime = CurrentProcessInfo[DELTA_TIME]; if (DeltaTime == 0) KRATOS_THROW_ERROR(std::logic_error, "detected delta_time = 0 in the Bossak Scheme ... check if the time step is created correctly for the current model part", ""); //initializing constants ma0 = 1.0 / (mGammaNewmark * DeltaTime); ma1 = DeltaTime * mBetaNewmark / mGammaNewmark; ma2 = (-1 + mGammaNewmark) / mGammaNewmark; ma3 = DeltaTime; ma4 = pow(DeltaTime, 2)*(-2.0 * mBetaNewmark + 1.0) / 2.0; ma5 = pow(DeltaTime, 2) * mBetaNewmark; mam = (1.0 - mAlphaBossak) / (mGammaNewmark * DeltaTime); } //************************************************************************************* //************************************************************************************* void InitializeNonLinIteration(ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY if (mpTurbulenceModel != 0) // If not null mpTurbulenceModel->Execute(); KRATOS_CATCH("") } void FinalizeNonLinIteration(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); //if orthogonal subscales are computed if (CurrentProcessInfo[OSS_SWITCH] == 1.0) { KRATOS_INFO("Bossak Scheme") << "Computing OSS projections" << std::endl; const int nnodes = static_cast<int>(rModelPart.Nodes().size()); auto nbegin = rModelPart.NodesBegin(); #pragma omp parallel for firstprivate(nbegin,nnodes) for(int i=0; i<nnodes; ++i) { auto ind = nbegin + i; noalias(ind->FastGetSolutionStepValue(ADVPROJ)) = ZeroVector(3); ind->FastGetSolutionStepValue(DIVPROJ) = 0.0; ind->FastGetSolutionStepValue(NODAL_AREA) = 0.0; }//end of loop over nodes //loop on nodes to compute ADVPROJ CONVPROJ NODALAREA array_1d<double, 3 > output = ZeroVector(3); const int nel = static_cast<int>(rModelPart.Elements().size()); auto elbegin = rModelPart.ElementsBegin(); #pragma omp parallel for firstprivate(elbegin,nel,output) for(int i=0; i<nel; ++i) { auto elem = elbegin + i; elem->Calculate(ADVPROJ, output, CurrentProcessInfo); } rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA); rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ); rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ); // Correction for periodic conditions this->PeriodicConditionProjectionCorrection(rModelPart); #pragma omp parallel for firstprivate(nbegin,nnodes) for(int i=0; i<nnodes; ++i) { auto ind = nbegin + i; if (ind->FastGetSolutionStepValue(NODAL_AREA) == 0.0) { ind->FastGetSolutionStepValue(NODAL_AREA) = 1.0; //KRATOS_WATCH("*********ATTENTION: NODAL AREA IS ZERRROOOO************"); } const double Area = ind->FastGetSolutionStepValue(NODAL_AREA); ind->FastGetSolutionStepValue(ADVPROJ) /= Area; ind->FastGetSolutionStepValue(DIVPROJ) /= Area; } } } void FinalizeSolutionStep(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { Element::EquationIdVectorType EquationId; LocalSystemVectorType RHS_Contribution; LocalSystemMatrixType LHS_Contribution; ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); //for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); ++itNode) #pragma omp parallel for for(int k = 0; k<static_cast<int>(rModelPart.Nodes().size()); k++) { auto itNode = rModelPart.NodesBegin() + k; (itNode->FastGetSolutionStepValue(REACTION)).clear(); // calculating relaxed acceleration const array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 0); const array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1); const array_1d<double, 3> relaxed_acceleration = (1 - mAlphaBossak) * CurrentAcceleration + mAlphaBossak * OldAcceleration; (itNode)->SetValue(RELAXED_ACCELERATION, relaxed_acceleration); } //for (ModelPart::ElementsContainerType::ptr_iterator itElem = rModelPart.Elements().ptr_begin(); itElem != rModelPart.Elements().ptr_end(); ++itElem) #pragma omp parallel for firstprivate(EquationId,RHS_Contribution,LHS_Contribution) for(int k = 0; k<static_cast<int>(rModelPart.Elements().size()); k++) { auto itElem = rModelPart.Elements().ptr_begin()+k; int thread_id = OpenMPUtils::ThisThread(); (*itElem)->InitializeNonLinearIteration(CurrentProcessInfo); //KRATOS_WATCH(LHS_Contribution); //basic operations for the element considered (*itElem)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); //std::cout << rCurrentElement->Id() << " RHS = " << RHS_Contribution << std::endl; (*itElem)->CalculateMassMatrix(mMass[thread_id], CurrentProcessInfo); (*itElem)->CalculateLocalVelocityContribution(mDamp[thread_id], RHS_Contribution, CurrentProcessInfo); (*itElem)->EquationIdVector(EquationId, CurrentProcessInfo); //adding the dynamic contributions (statics is already included) AddDynamicsToLHS(LHS_Contribution, mDamp[thread_id], mMass[thread_id], CurrentProcessInfo); AddDynamicsToRHS((*itElem), RHS_Contribution, mDamp[thread_id], mMass[thread_id], CurrentProcessInfo); GeometryType& rGeom = (*itElem)->GetGeometry(); unsigned int NumNodes = rGeom.PointsNumber(); unsigned int Dimension = rGeom.WorkingSpaceDimension(); unsigned int index = 0; for (unsigned int i = 0; i < NumNodes; i++) { auto& reaction = rGeom[i].FastGetSolutionStepValue(REACTION); double& target_value0 = reaction[0]; const double& origin_value0 = RHS_Contribution[index++]; #pragma omp atomic target_value0 -= origin_value0; double& target_value1 = reaction[1]; const double& origin_value1 = RHS_Contribution[index++]; #pragma omp atomic target_value1 -= origin_value1; if (Dimension == 3) { double& target_value2 = reaction[2]; const double& origin_value2 = RHS_Contribution[index++]; #pragma omp atomic target_value2 -= origin_value2; } // rGeom[i].FastGetSolutionStepValue(REACTION_X,0) -= RHS_Contribution[index++]; // rGeom[i].FastGetSolutionStepValue(REACTION_Y,0) -= RHS_Contribution[index++]; // if (Dimension == 3) rGeom[i].FastGetSolutionStepValue(REACTION_Z,0) -= RHS_Contribution[index++]; index++; // skip pressure dof } } rModelPart.GetCommunicator().AssembleCurrentData(REACTION); // Base scheme calls FinalizeSolutionStep method of elements and conditions Scheme<TSparseSpace, TDenseSpace>::FinalizeSolutionStep(rModelPart, A, Dx, b); } //************************************************************************************************ //************************************************************************************************ /// Free memory allocated by this object. void Clear() override { this->mpDofUpdater->Clear(); } /*@} */ /**@name Operations */ /*@{ */ /*@} */ /**@name Access */ /*@{ */ /*@} */ /**@name Inquiry */ /*@{ */ /*@} */ /**@name Friends */ /*@{ */ /*@} */ protected: /**@name Protected static Member Variables */ /*@{ */ /*@} */ /**@name Protected member Variables */ /*@{ */ double mAlphaBossak; double mBetaNewmark; double mGammaNewmark; double mMeshVelocity; double ma0; double ma1; double ma2; double ma3; double ma4; double ma5; double mam; std::vector< Matrix > mMass; std::vector< Matrix > mDamp; std::vector< Vector > mvel; std::vector< Vector > macc; std::vector< Vector > maccold; /*@} */ /**@name Protected Operators*/ /*@{ */ /** On periodic boundaries, the nodal area and the values to project need to take into account contributions from elements on * both sides of the boundary. This is done using the conditions and the non-historical nodal data containers as follows:\n * 1- The partition that owns the PeriodicCondition adds the values on both nodes to their non-historical containers.\n * 2- The non-historical containers are added across processes, communicating the right value from the condition owner to all partitions.\n * 3- The value on all periodic nodes is replaced by the one received in step 2. */ void PeriodicConditionProjectionCorrection(ModelPart& rModelPart) { const int num_nodes = rModelPart.NumberOfNodes(); const int num_conditions = rModelPart.NumberOfConditions(); #pragma omp parallel for for (int i = 0; i < num_nodes; i++) { auto it_node = rModelPart.NodesBegin() + i; it_node->SetValue(NODAL_AREA,0.0); it_node->SetValue(ADVPROJ,ZeroVector(3)); it_node->SetValue(DIVPROJ,0.0); } #pragma omp parallel for for (int i = 0; i < num_conditions; i++) { auto it_cond = rModelPart.ConditionsBegin() + i; if(it_cond->Is(PERIODIC)) { this->AssemblePeriodicContributionToProjections(it_cond->GetGeometry()); } } rModelPart.GetCommunicator().AssembleNonHistoricalData(NODAL_AREA); rModelPart.GetCommunicator().AssembleNonHistoricalData(ADVPROJ); rModelPart.GetCommunicator().AssembleNonHistoricalData(DIVPROJ); #pragma omp parallel for for (int i = 0; i < num_nodes; i++) { auto it_node = rModelPart.NodesBegin() + i; this->CorrectContributionsOnPeriodicNode(*it_node); } } void AssemblePeriodicContributionToProjections(Geometry< Node<3> >& rGeometry) { unsigned int nodes_in_cond = rGeometry.PointsNumber(); double nodal_area = 0.0; array_1d<double,3> momentum_projection = ZeroVector(3); double mass_projection = 0.0; for ( unsigned int i = 0; i < nodes_in_cond; i++ ) { auto& r_node = rGeometry[i]; nodal_area += r_node.FastGetSolutionStepValue(NODAL_AREA); noalias(momentum_projection) += r_node.FastGetSolutionStepValue(ADVPROJ); mass_projection += r_node.FastGetSolutionStepValue(DIVPROJ); } for ( unsigned int i = 0; i < nodes_in_cond; i++ ) { auto& r_node = rGeometry[i]; /* Note that this loop is expected to be threadsafe in normal conditions, * since each node should belong to a single periodic link. However, I am * setting the locks for openmp in case that we try more complicated things * in the future (like having different periodic conditions for different * coordinate directions). */ r_node.SetLock(); r_node.GetValue(NODAL_AREA) = nodal_area; noalias(r_node.GetValue(ADVPROJ)) = momentum_projection; r_node.GetValue(DIVPROJ) = mass_projection; r_node.UnSetLock(); } } void CorrectContributionsOnPeriodicNode(Node<3>& rNode) { if (rNode.GetValue(NODAL_AREA) != 0.0) // Only periodic nodes will have a non-historical NODAL_AREA set. { rNode.FastGetSolutionStepValue(NODAL_AREA) = rNode.GetValue(NODAL_AREA); noalias(rNode.FastGetSolutionStepValue(ADVPROJ)) = rNode.GetValue(ADVPROJ); rNode.FastGetSolutionStepValue(DIVPROJ) = rNode.GetValue(DIVPROJ); } } //********************************************************************************* //Updating first time Derivative //********************************************************************************* void UpdateDisplacement(array_1d<double, 3 > & CurrentDisplacement, const array_1d<double, 3 > & OldDisplacement, const array_1d<double, 3 > & OldVelocity, const array_1d<double, 3 > & OldAcceleration, const array_1d<double, 3 > & CurrentAcceleration) { noalias(CurrentDisplacement) = OldDisplacement + ma3 * OldVelocity + ma4 * OldAcceleration + ma5*CurrentAcceleration; } //************************************************************************** void UpdateAcceleration(array_1d<double, 3 > & CurrentAcceleration, const array_1d<double, 3 > & DeltaVel, const array_1d<double, 3 > & OldAcceleration) { noalias(CurrentAcceleration) = ma0 * DeltaVel + ma2 * OldAcceleration; } //**************************************************************************** /** Kdyn = am*M + D + a1*K */ void AddDynamicsToLHS(LocalSystemMatrixType& LHS_Contribution, LocalSystemMatrixType& D, LocalSystemMatrixType& M, ProcessInfo& CurrentProcessInfo) { //multipling time scheme factor LHS_Contribution *= ma1; // adding mass contribution to the dynamic stiffness if (M.size1() != 0) // if M matrix declared { noalias(LHS_Contribution) += mam*M; } //adding damping contribution if (D.size1() != 0) // if M matrix declared { noalias(LHS_Contribution) += D; } } //**************************************************************************** /// Add Bossak contributions from the inertial term to the RHS vector. /** This essentially performs bdyn = b - M*acc for the current element. * Note that viscous/pressure contributions to the RHS are expected to be added by the element itself. * @param[in] rCurrentElement The fluid element we are assembling. * @param[in/out] rRHS_Contribution The right hand side term where the contribution will be added. * @param[in] rD The elemental velocity/pressure LHS matrix. * @param[in] rM The elemental acceleration LHS matrix. * @param[in] rCurrentProcessInfo ProcessInfo instance for the containing ModelPart. */ void AddDynamicsToRHS(Element::Pointer rCurrentElement, LocalSystemVectorType& rRHS_Contribution, LocalSystemMatrixType& rD, LocalSystemMatrixType& rM, ProcessInfo& rCurrentProcessInfo) { //adding inertia contribution if (rM.size1() != 0) { int k = OpenMPUtils::ThisThread(); rCurrentElement->GetSecondDerivativesVector(macc[k], 0); (macc[k]) *= (1.00 - mAlphaBossak); rCurrentElement->GetSecondDerivativesVector(maccold[k], 1); noalias(macc[k]) += mAlphaBossak * maccold[k]; noalias(rRHS_Contribution) -= prod(rM, macc[k]); } } /// Add Bossak contributions from the inertial term to the RHS vector. /** This essentially performs bdyn = b - M*acc for the current condition. * Note that viscous/pressure contributions to the RHS are expected to be added by the element condition. * @param[in] rCurrentCondition The fluid condition we are assembling. * @param[in/out] rRHS_Contribution The right hand side term where the contribution will be added. * @param[in] rD The elemental velocity/pressure LHS matrix. * @param[in] rM The elemental acceleration LHS matrix. * @param[in] rCurrentProcessInfo ProcessInfo instance for the containing ModelPart. */ void AddDynamicsToRHS( Condition::Pointer rCurrentCondition, LocalSystemVectorType& rRHS_Contribution, LocalSystemMatrixType& D, LocalSystemMatrixType& rM, ProcessInfo& rCurrentProcessInfo) { //adding inertia contribution if (rM.size1() != 0) { int k = OpenMPUtils::ThisThread(); rCurrentCondition->GetSecondDerivativesVector(macc[k], 0); (macc[k]) *= (1.00 - mAlphaBossak); rCurrentCondition->GetSecondDerivativesVector(maccold[k], 1); noalias(macc[k]) += mAlphaBossak * maccold[k]; noalias(rRHS_Contribution) -= prod(rM, macc[k]); } } /*@} */ /**@name Protected Operations*/ /*@{ */ /*@} */ /**@name Protected Access */ /*@{ */ /*@} */ /**@name Protected Inquiry */ /*@{ */ /*@} */ /**@name Protected LifeCycle */ /*@{ */ /*@} */ private: /**@name Static Member Variables */ /*@{ */ /*@} */ /**@name Member Variables */ /*@{ */ CoordinateTransformationUtils<LocalSystemMatrixType,LocalSystemVectorType,double> mRotationTool; const Variable<int>& mrPeriodicIdVar; Process::Pointer mpTurbulenceModel; typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater(); /*@} */ /**@name Private Operators*/ /*@{ */ /*@} */ /**@name Private Operations*/ /*@{ */ /*@} */ /**@name Private Access */ /*@{ */ /*@} */ /**@name Private Inquiry */ /*@{ */ /*@} */ /**@name Un accessible methods */ /*@{ */ /*@} */ }; /* Class Scheme */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_BOSSAK_SCHEME defined */
rotlet_direct_rsrc.c
#include "math.h" #include "rotlet_direct.h" #ifdef _OPENMP #include "omp.h" #endif void rotlet_direct_rsrc(double* restrict u, const double* restrict xt, const int Nt, const double* restrict x, const double* restrict f, const int N, const ewald_opts opt) { double r[3]; double xm[3]; int i1, i2, i3, m, n; const int nbox = 1; double rc2 = opt.rc * opt.rc; double xi = opt.xi; #ifdef _OPENMP #pragma omp parallel for \ private(r,xm,i1,i2,i3,m,n) \ default(shared) #endif for(m=0; m<Nt; m++) // for all evaluation points { double um[3] = {0.0, 0.0, 0.0}; xm[0] = xt[m ]; xm[1] = xt[m+Nt ]; xm[2] = xt[m+2*Nt]; for(n=0; n<N; n++) // for all particles { double xmn[3] = {xm[0]-x[n ], xm[1]-x[n+ N], xm[2]-x[n+2*N]}; double f0 = f[n]; double f1 = f[n+N]; double f2 = f[n+2*N]; for(i1 = -nbox; i1<=nbox; i1++) // image boxes for(i2 = -nbox; i2<=nbox; i2++) for(i3 = -nbox; i3<=nbox; i3++) { r[0] = xmn[0]+opt.box[0]*i1; r[1] = xmn[1]+opt.box[1]*i2; r[2] = xmn[2]+opt.box[2]*i3; double r2 = r[0]*r[0] + r[1]*r[1] + r[2]*r[2]; if(r2 > rc2 || r2 == 0) continue; // skip outside rc double rnorm = sqrt(r2); double rxi = rnorm*xi; double A = (erfc(rxi)/rnorm + 2*xi*exp(-rxi*rxi)/sqrt(PI) ) / r2; um[0] += A*(f1*r[2] - f2*r[1]); um[1] += A*(f2*r[0] - f0*r[2]); um[2] += A*(f0*r[1] - f1*r[0]); } } u[m ] = um[0]; u[m+Nt ] = um[1]; u[m+2*Nt] = um[2]; } }
GB_binop__eq_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_int8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__eq_int8) // A.*B function (eWiseMult): GB (_AemultB_03__eq_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_int8) // A*D function (colscale): GB (_AxD__eq_int8) // D*A function (rowscale): GB (_DxB__eq_int8) // C+=B function (dense accum): GB (_Cdense_accumB__eq_int8) // C+=b function (dense accum): GB (_Cdense_accumb__eq_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_int8) // C=scalar+B GB (_bind1st__eq_int8) // C=scalar+B' GB (_bind1st_tran__eq_int8) // C=A+scalar GB (_bind2nd__eq_int8) // C=A'+scalar GB (_bind2nd_tran__eq_int8) // C type: bool // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_INT8 || GxB_NO_EQ_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__eq_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__eq_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pcpdlpverifydsaca.c
/******************************************************************************* * Copyright 2005-2018 Intel Corporation * All Rights Reserved. * * If this software was obtained under the Intel Simplified Software License, * the following terms apply: * * The source code, information and material ("Material") contained herein is * owned by Intel Corporation or its suppliers or licensors, and title to such * Material remains with Intel Corporation or its suppliers or licensors. The * Material contains proprietary information of Intel or its suppliers and * licensors. The Material is protected by worldwide copyright laws and treaty * provisions. No part of the Material may be used, copied, reproduced, * modified, published, uploaded, posted, transmitted, distributed or disclosed * in any way without Intel's prior express written permission. No license under * any patent, copyright or other intellectual property rights in the Material * is granted to or conferred upon you, either expressly, by implication, * inducement, estoppel or otherwise. Any license under such intellectual * property rights must be express and approved by Intel in writing. * * Unless otherwise agreed by Intel in writing, you may not remove or alter this * notice or any other notice embedded in Materials by Intel or Intel's * suppliers or licensors in any way. * * * If this software was obtained under the Apache License, Version 2.0 (the * "License"), the following terms apply: * * You may not use this file except in compliance with the License. You may * obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 * * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ /* // // Purpose: // Cryptography Primitive. // DL over Prime Finite Field (Verify, DSA version) // // Contents: // ippsDLPVerifyDSA() // // */ #include "owndefs.h" #include "owncp.h" #include "pcpdlp.h" /*F* // Name: ippsDLPVerifyDSA // // Purpose: Verify Signature (DSA version) // // Returns: Reason: // ippStsNullPtrErr NULL == pDL // NULL == pMsgDigest // NULL == pSignR // NULL == pSignS // NULL == pResult // // ippStsContextMatchErr illegal pDL->idCtx // illegal pMsgDigest->idCtx // illegal pSignR->idCtx // illegal pSignS->idCtx // // ippStsIncompleteContextErr // incomplete context // // ippStsMessageErr MsgDigest >= R // MsgDigest < 0 // // ippStsNoErr no errors // // Parameters: // pMsgDigest pointer to the message representative to be signed // pSignR,pSignS pointer to the signature // pResult pointer to the result: IppSignIsValid/IppSignIsInvalid // pDSA pointer to the DL context // // Primitive sequence call: // 1) set up domain parameters // 2) set up (signatory's) public key *F*/ #if !defined(_OPENMP) IPPFUN(IppStatus, ippsDLPVerifyDSA,(const IppsBigNumState* pMsgDigest, const IppsBigNumState* pSignR, const IppsBigNumState* pSignS, IppDLResult* pResult, IppsDLPState* pDL)) { /* test context*/ IPP_BAD_PTR2_RET(pDL,pResult); pDL = (IppsDLPState*)( IPP_ALIGNED_PTR(pDL, DLP_ALIGNMENT) ); IPP_BADARG_RET(!DLP_VALID_ID(pDL), ippStsContextMatchErr); /* test operation flag */ IPP_BADARG_RET(!DLP_COMPLETE(pDL), ippStsIncompleteContextErr); /* test message representative */ IPP_BAD_PTR1_RET(pMsgDigest); pMsgDigest = (IppsBigNumState*)( IPP_ALIGNED_PTR(pMsgDigest, BN_ALIGNMENT) ); IPP_BADARG_RET(!BN_VALID_ID(pMsgDigest), ippStsContextMatchErr); IPP_BADARG_RET(BN_NEGATIVE(pMsgDigest), ippStsMessageErr); /* make sure msg <order */ IPP_BADARG_RET(0<=cpCmp_BNU(BN_NUMBER(pMsgDigest), BN_SIZE(pMsgDigest), DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL))), ippStsMessageErr); /* test signature */ IPP_BAD_PTR2_RET(pSignR,pSignS); pSignR = (IppsBigNumState*)( IPP_ALIGNED_PTR(pSignR, BN_ALIGNMENT) ); pSignS = (IppsBigNumState*)( IPP_ALIGNED_PTR(pSignS, BN_ALIGNMENT) ); IPP_BADARG_RET(!BN_VALID_ID(pSignR), ippStsContextMatchErr); IPP_BADARG_RET(!BN_VALID_ID(pSignS), ippStsContextMatchErr); /* test signature range */ if(0<cpBN_cmp(cpBN_OneRef(), pSignR)|| 0<=cpCmp_BNU(BN_NUMBER(pSignR),BN_SIZE(pSignR), DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL)))) { *pResult = ippDLInvalidSignature; return ippStsNoErr; } if(0<cpBN_cmp(cpBN_OneRef(), pSignS)|| 0<=cpCmp_BNU(BN_NUMBER(pSignS),BN_SIZE(pSignS), DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL)))) { *pResult = ippDLInvalidSignature; return ippStsNoErr; } { /* allocate BN resources */ BigNumNode* pList = DLP_BNCTX(pDL); IppsBigNumState* pW = cpBigNumListGet(&pList); IppsBigNumState* pU1 = cpBigNumListGet(&pList); IppsBigNumState* pU2 = cpBigNumListGet(&pList); IppsBigNumState* pOrder = cpBigNumListGet(&pList); ippsSet_BN(ippBigNumPOS, BITS2WORD32_SIZE(DLP_BITSIZER(pDL)), (Ipp32u*)DLP_R(pDL), pOrder); /* W = 1/SignS (mod R) */ ippsModInv_BN((IppsBigNumState*)pSignS, pOrder, pW); cpMontEnc_BN(pW, pW, DLP_MONTR(pDL)); /* reduct pMsgDigest if necessary */ if(0 < cpBN_cmp(pMsgDigest, pOrder)) ippsMod_BN((IppsBigNumState*)pMsgDigest, pOrder, pU1); else cpBN_copy(pU1, pMsgDigest); /* U1 = (MsgDigest*W) (mod R) */ cpMontMul_BN(pU1, pW, pU1, DLP_MONTR(pDL)); /* U2 = (SignR*W) (mod R) */ cpMontMul_BN(pU2, pSignR, pW, DLP_MONTR(pDL)); /* // V = ((G^U1)*(Y^U2) (mod P)) (mod R) */ /* precompute multi-exp table {1, G, Y, G*Y} */ { cpSize pSize = BITS_BNU_CHUNK( DLP_BITSIZEP(pDL) ); BNU_CHUNK_T* pX1 = BN_NUMBER(DLP_GENC(pDL)); BNU_CHUNK_T* pX2 = BN_NUMBER(DLP_YENC(pDL)); const BNU_CHUNK_T* ppX[2]; ppX[0] = pX1; ppX[1] = pX2; ZEXPAND_BNU(pX1, BN_SIZE(DLP_GENC(pDL)), pSize); ZEXPAND_BNU(pX2, BN_SIZE(DLP_YENC(pDL)), pSize); cpMontMultiExpInitArray(DLP_METBL(pDL), ppX, pSize*BITSIZE(BNU_CHUNK_T), 2, DLP_MONTP0(pDL)); } /* W = ((G^U1)*(Y^U2) (mod P) */ { cpSize sizeE1 = BN_SIZE(pU1); cpSize sizeE2 = BN_SIZE(pU2); cpSize sizeE = IPP_MAX(sizeE1, sizeE2); BNU_CHUNK_T* pE1 = BN_NUMBER(pU1); BNU_CHUNK_T* pE2 = BN_NUMBER(pU2); const Ipp8u* ppE[2]; ppE[0] = (Ipp8u*)pE1; ppE[1] = (Ipp8u*)pE2; ZEXPAND_BNU(pE1, sizeE1, sizeE); ZEXPAND_BNU(pE2, sizeE2, sizeE); cpFastMontMultiExp(BN_NUMBER(pW), DLP_METBL(pDL), ppE, sizeE*BITSIZE(BNU_CHUNK_T), 2, DLP_MONTP0(pDL)); BN_SIZE(pW) = BITS_BNU_CHUNK( DLP_BITSIZEP(pDL) ); BN_SIGN(pW) = ippBigNumPOS; } cpMontDec_BN(pW, pW, DLP_MONTP0(pDL)); BN_SIZE(pW) = cpMod_BNU(BN_NUMBER(pW), BN_SIZE(pW), BN_NUMBER(pOrder), BN_SIZE(pOrder)); /* result = W~R */ *pResult = 0==cpBN_cmp(pW, pSignR)? ippDLValid : ippDLInvalidSignature; return ippStsNoErr; } } //#endif #else IPPFUN(IppStatus, ippsDLPVerifyDSA,(const IppsBigNumState* pMsgDigest, const IppsBigNumState* pSignR, const IppsBigNumState* pSignS, IppDLResult* pResult, IppsDLPState* pDL)) { /* test context*/ IPP_BAD_PTR2_RET(pDL,pResult); pDL = (IppsDLPState*)( IPP_ALIGNED_PTR(pDL, DLP_ALIGNMENT) ); IPP_BADARG_RET(!DLP_VALID_ID(pDL), ippStsContextMatchErr); /* test operation flag */ IPP_BADARG_RET(!DLP_COMPLETE(pDL), ippStsIncompleteContextErr); /* test message representative */ IPP_BAD_PTR1_RET(pMsgDigest); pMsgDigest = (IppsBigNumState*)( IPP_ALIGNED_PTR(pMsgDigest, BN_ALIGNMENT) ); IPP_BADARG_RET(!BN_VALID_ID(pMsgDigest), ippStsContextMatchErr); IPP_BADARG_RET((0>cpBN_tst(pMsgDigest)), ippStsMessageErr); /* make sure msg <order */ IPP_BADARG_RET(0<=cpCmp_BNU(BN_NUMBER(pMsgDigest), BN_SIZE(pMsgDigest), DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL))), ippStsMessageErr); /* test signature */ IPP_BAD_PTR2_RET(pSignR,pSignS); pSignR = (IppsBigNumState*)( IPP_ALIGNED_PTR(pSignR, BN_ALIGNMENT) ); pSignS = (IppsBigNumState*)( IPP_ALIGNED_PTR(pSignS, BN_ALIGNMENT) ); IPP_BADARG_RET(!BN_VALID_ID(pSignR), ippStsContextMatchErr); IPP_BADARG_RET(!BN_VALID_ID(pSignS), ippStsContextMatchErr); /* test signature range */ if(0<cpBN_cmp(cpBN_OneRef(), pSignR)|| 0<=cpCmp_BNU(BN_NUMBER(pSignR),BN_SIZE(pSignR), DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL)))) { *pResult = ippDLInvalidSignature; return ippStsNoErr; } if(0<cpBN_cmp(cpBN_OneRef(), pSignS)|| 0<=cpCmp_BNU(BN_NUMBER(pSignS),BN_SIZE(pSignS), DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL)))) { *pResult = ippDLInvalidSignature; return ippStsNoErr; } { /* allocate BN resources */ BigNumNode* pList = DLP_BNCTX(pDL); IppsBigNumState* pV = cpBigNumListGet(&pList); IppsBigNumState* pW = cpBigNumListGet(&pList); IppsBigNumState* pU1 = cpBigNumListGet(&pList); IppsBigNumState* pU2 = cpBigNumListGet(&pList); IppsBigNumState* pOrder = cpBigNumListGet(&pList); ippsSet_BN(ippBigNumPOS, BITS2WORD32_SIZE(DLP_BITSIZER(pDL)), (Ipp32u*)DLP_R(pDL), pOrder); //int maxNumThreads = IPP_MIN(IPPCP_GET_NUM_THREADS(), 2); /* W = 1/SignS (mod R) */ ippsModInv_BN((IppsBigNumState*)pSignS, pOrder, pW); cpMontEnc_BN(pW, pW, DLP_MONTR(pDL)); /* reduct pMsgDigest if necessary */ if(0 < cpBN_cmp(pMsgDigest, pOrder)) ippsMod_BN((IppsBigNumState*)pMsgDigest, pOrder, pU1); else cpBN_copy(pU1, pMsgDigest); /* U1 = (MsgDigest*W) (mod R) */ cpMontMul_BN(pU1, pW, pU1, DLP_MONTR(pDL)); /* U2 = (SignR*W) (mod R) */ cpMontMul_BN(pU2, pSignR, pW, DLP_MONTR(pDL)); /* V = ((G^U1)*(Y^U2) (mod P)) (mod R) */ #pragma omp parallel sections IPPCP_OMP_LIMIT_MAX_NUM_THREADS(2) { /* W = (G^U1) (mod P) */ #pragma omp section { #if !defined(_USE_WINDOW_EXP_) //cpSafeMontExp_Binary(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL)); cpMontExpBin_BN(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL) ); #else if((DLP_EXPMETHOD(pDL)==BINARY) || (1==cpMontExp_WinSize(BITSIZE_BNU(BN_NUMBER(pU1), BN_SIZE(pU1))))) //cpSafeMontExp_Binary(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL)); cpMontExpBin_BN(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL) ); else //cpSafeMontExp_Window(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL), DLP_BNUCTX0(pDL)); cpMontExpWin_BN(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL), DLP_BNUCTX0(pDL)); #endif } /* V = (Y^U2) (mod P) */ #pragma omp section { #if !defined(_USE_WINDOW_EXP_) //cpSafeMontExp_Binary(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL)); cpMontExpBin_BN(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL) ); #else if((DLP_EXPMETHOD(pDL)==BINARY) || (1==cpMontExp_WinSize(BITSIZE_BNU(BN_NUMBER(pU2), BN_SIZE(pU2))))) //cpSafeMontExp_Binary(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL)); cpMontExpBin_BN(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL) ); else //cpSafeMontExp_Window(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL), DLP_BNUCTX1(pDL)); cpMontExpWin_BN(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL), DLP_BNUCTX1(pDL)); #endif } } cpMontMul_BN(pV, pW, pV, DLP_MONTP0(pDL)); cpMontDec_BN(pV, pV, DLP_MONTP0(pDL)); BN_SIZE(pV) = cpMod_BNU(BN_NUMBER(pV), BN_SIZE(pV), BN_NUMBER(pOrder), BN_SIZE(pOrder)); /* result = V~R */ *pResult = 0==cpBN_cmp(pV, pSignR)? ippDLValid : ippDLInvalidSignature; return ippStsNoErr; } } #endif /* _OPENMP */
kernel.h
// Copyright (c) 2021 Jisang Yoon // All rights reserved. // // This source code is licensed under the Apache 2.0 license found in the // LICENSE file in the root directory of this source tree. // #define EPS 1e-6f #pragma omp declare target inline float ReduceSum(const float* vec, const int length) { float s = 0.f; // #pragma omp parallel for reduction (+:s) for (int i = 0; i < length; i++) s += vec[i]; return s; } // reference: http://web.science.mq.edu.au/~mjohnson/code/digamma.c inline float Digamma(float x) { float result = 0.0f, xx, xx2, xx4; for ( ; x < 7.0f; ++x) result -= 1.0f / x; x -= 0.5f; xx = 1.0f / x; xx2 = xx * xx; xx4 = xx2 * xx2; result += logf(x) + 1.0f / 24.0f * xx2 - 7.0f / 960.0f * xx4 + 31.0f / 8064.0f * xx4 * xx2 - 127.0f / 30720.0f * xx4 * xx4; return result; } #pragma omp end declare target template<int numTeams, int numThreads, int smemSize> void EstepKernel( const int*__restrict cols, const int*__restrict indptr, const bool*__restrict vali, const float*__restrict counts, const bool init_gamma, const int num_cols, const int num_indptr, const int num_topics, const int num_iters, const float*__restrict alpha, const float*__restrict beta, float*__restrict gamma, float*__restrict grad_alpha, float*__restrict new_beta, float*__restrict train_losses, float*__restrict vali_losses, int*__restrict locks) { // storage for block #pragma omp target teams num_teams(numTeams) thread_limit(numThreads) { float shared_memory[smemSize]; #pragma omp parallel { const int blockIdx_x = omp_get_team_num(); const int threadIdx_x = omp_get_thread_num(); const int blockDim_x = numThreads; const int gridDim_x = numTeams; float*__restrict _new_gamma = &shared_memory[0]; float*__restrict _phi = &shared_memory[num_topics]; float*__restrict _loss_vec = &shared_memory[num_topics * 2]; float*__restrict _vali_phi_sum = &shared_memory[num_topics * 3]; float* _grad_alpha = grad_alpha + num_topics * blockIdx_x; for (int i = blockIdx_x; i < num_indptr; i += gridDim_x) { int beg = indptr[i], end = indptr[i + 1]; float* _gamma = gamma + num_topics * i; if (init_gamma) { for (int j = threadIdx_x; j < num_topics; j += blockDim_x) { _gamma[j] = alpha[j] + (end - beg) / num_topics; } } #pragma omp barrier // initiate phi sum for validation data for computing vali loss for (int j = threadIdx_x; j < num_topics; j += blockDim_x) _vali_phi_sum[j] = 0.0f; // iterate E step for (int j = 0; j < num_iters; ++j) { // initialize new gamma for (int k = threadIdx_x; k < num_topics; k += blockDim_x) _new_gamma[k] = 0.0f; #pragma omp barrier // compute phi from gamma for (int k = beg; k < end; ++k) { const int w = cols[k]; // word const bool _vali = vali[k]; const float c = counts[k]; // compute phi if (not _vali or j + 1 == num_iters) { for (int l = threadIdx_x; l < num_topics; l += blockDim_x) _phi[l] = beta[w * num_topics + l] * expf(Digamma(_gamma[l])); #pragma omp barrier // normalize phi and add it to new gamma and new beta float phi_sum = ReduceSum(_phi, num_topics); for (int l = threadIdx_x; l < num_topics; l += blockDim_x) { _phi[l] /= phi_sum; // update gamma for train data and phi_sum for computing loss if (_vali) _vali_phi_sum[l] += _phi[l] * c; else _new_gamma[l] += _phi[l] * c; } #pragma omp barrier } if (j + 1 == num_iters) { // update beta for train data if (not _vali) { // write access of w th vector of new_beta if (threadIdx_x == 0) { // while (atomicCAS(&locks[w], 0, 1)) {} int v; do { #pragma omp atomic capture { v = locks[w]; locks[w] = v == 0 ? 1 : v; } } while (v); } #pragma omp barrier for (int l = threadIdx_x; l < num_topics; l += blockDim_x) new_beta[w * num_topics + l] += _phi[l] * c; #pragma omp barrier // release lock if (threadIdx_x == 0) locks[w] = 0; #pragma omp barrier } // comput loss and reset shared mem // see Eq (15) in https://www.jmlr.org/papers/volume3/blei03a/blei03a.pdf for (int l = threadIdx_x; l < num_topics; l += blockDim_x) { _loss_vec[l] = logf(fmaxf(beta[w * num_topics + l], EPS)); _loss_vec[l] -= logf(fmaxf(_phi[l], EPS)); _loss_vec[l] *= _phi[l]; } #pragma omp barrier float _loss = ReduceSum(_loss_vec, num_topics) * c; if (threadIdx_x == 0) { if (_vali) vali_losses[blockIdx_x] += _loss; else train_losses[blockIdx_x] += _loss; } #pragma omp barrier } #pragma omp barrier } // update gamma for (int k = threadIdx_x; k < num_topics; k += blockDim_x) _gamma[k] = _new_gamma[k] + alpha[k]; #pragma omp barrier } // update gradient of alpha and loss from E[log(theta)] float gamma_sum = ReduceSum(_gamma, num_topics); for (int j = threadIdx_x; j < num_topics; j += blockDim_x) { float Elogthetad = Digamma(_gamma[j]) - Digamma(gamma_sum); _grad_alpha[j] += Elogthetad; _new_gamma[j] *= Elogthetad; _vali_phi_sum[j] *= Elogthetad; } // see Eq (15) in https://www.jmlr.org/papers/volume3/blei03a/blei03a.pdf float train_loss = ReduceSum(_new_gamma, num_topics); float vali_loss = ReduceSum(_vali_phi_sum, num_topics); if (threadIdx_x == 0) { train_losses[blockIdx_x] += train_loss; vali_losses[blockIdx_x] += vali_loss; } #pragma omp barrier } } } }
aomp_mappings.c
#include <stdio.h> #include <omp.h> #include <string.h> //Shared Variables int THREAD_LIMIT = 4; int MAX_TEAMS = 128; int GENERIC = 0; int SPMD = 1; int MAX_THREADS_PER_TEAM = 256; int WARP_SIZE = 64; /* * Function: recordError * Description: Updates error number and prints error messages */ void recordError(int* error , char *message, int iteration, int * array, unsigned long long *mask ){ (*error)++; if(mask == NULL) fprintf(stderr,"%s IS INCORRECT! Iteration: %d Value: %d\n", message, iteration, array[iteration]); else fprintf(stderr,"%s IS INCORRECT! Iteration: %d Value: %llx\n", message, iteration, mask[iteration]); } int main() { //Determine which GPU type (NVIDIA or AMD) char* nvidia= "sm"; char* aomp_gpu= getenv("AOMP_GPU"); int isAMDGPU = 1; if(aomp_gpu && strstr(aomp_gpu, nvidia) != NULL) isAMDGPU = 0; //Logic for correct shared variables - AMD vs NVIDIA GPU if(!isAMDGPU){ printf("%s\n", getenv("AOMP_GPU")); MAX_THREADS_PER_TEAM = 128; WARP_SIZE = 32; } int N = 128; int NN = 1024; int thread_num[NN]; int team_num[NN]; int default_dev[NN]; int warp_id[NN]; int lane_id[NN]; int smid[NN]; int is_spmd_mode[NN]; int master_thread_id[NN]; int num_teams[NN]; int num_threads[NN]; unsigned long long active_mask[NN]; unsigned long long mask = 0; int i; int correctTeamNum = -1; int correctNumTeams = -1; int correctWarpId = -1; int remainder = 0; int errors = 0; //Initialize arrays for (i=0; i<NN; i++) active_mask[i] = 0; for (i=0; i<NN; i++) thread_num[i]=team_num[i]=default_dev[i]=warp_id[i]=lane_id[i]=master_thread_id[i]=smid[i]=is_spmd_mode[i]=num_threads[i]=num_teams[i] = -1; fprintf(stderr,"#pragma omp target teams distribute parallel for thread_limit(4)\n"); #pragma omp target teams distribute parallel for thread_limit(4) { for (int j = 0; j< N; j++) { thread_num[j] = omp_get_thread_num(); num_threads[j] = omp_get_num_threads(); team_num[j] = omp_get_team_num(); num_teams[j] = omp_get_num_teams(); default_dev[j] = omp_get_default_device(); warp_id[j] = omp_ext_get_warp_id(); lane_id[j] = omp_ext_get_lane_id(); active_mask[j] = omp_ext_get_active_threads_mask(); smid[j] = omp_ext_get_smid(); master_thread_id[j] = omp_ext_get_master_thread_id(); is_spmd_mode[j] = omp_ext_is_spmd_mode(); } } fprintf(stderr," i thrd# team# dev# warp# lane# MastThrd smid SPMD num_threads num_teams ActiveMask\n"); for (i=0; i<N; i++) fprintf(stderr," %4d %4d %4d %4d %4d %4d %4d %4d %4d %10d %10d %16llx\n", i,thread_num[i],team_num[i],default_dev[i],warp_id[i],lane_id[i],master_thread_id[i],smid[i],is_spmd_mode[i],num_threads[i], num_teams[i],active_mask[i]); //Verify Results - #pragma omp target teams distribute parallel for thread_limit(4) for (i = 0; i < N; i++){ //check thread # if (thread_num[i] != i % THREAD_LIMIT) recordError(&errors, "THREAD NUMBER", i, thread_num, NULL); //check team # if (i % THREAD_LIMIT == 0){ correctTeamNum++; if(isAMDGPU) correctTeamNum = correctTeamNum % MAX_TEAMS; } if (team_num[i] != correctTeamNum) recordError(&errors, "TEAM NUMBER", i, team_num, NULL); //check device #, We use default device (0) for testing if (default_dev[i] != 0) recordError(&errors, "DEVICE NUMBER", i, default_dev, NULL); //check warp # if (warp_id[i] != 0) recordError(&errors, "WARP NUMBER", i, warp_id, NULL); //check lane # if (lane_id[i] != i % THREAD_LIMIT) recordError(&errors, "LANE NUMBER", i, lane_id, NULL); //check master thread # if (master_thread_id[i] != 0 ) recordError(&errors, "MASTER THREAD NUMBER", i, master_thread_id, NULL); //check SPMD mode # if (is_spmd_mode[i] != SPMD ) recordError(&errors, "SPMD NUMBER", i, is_spmd_mode, NULL); //check num threads if (num_threads[i] != THREAD_LIMIT ) recordError(&errors, "NUM THREADS", i, num_threads, NULL); //check num teams //If number of iterations is not divisible by THREAD_LIMIT get the ceiling if(N % THREAD_LIMIT != 0) correctNumTeams = ((N + num_threads[i]) / num_threads[i]); else correctNumTeams = N / THREAD_LIMIT; if (correctNumTeams > MAX_TEAMS && isAMDGPU) correctNumTeams = MAX_TEAMS; if (num_teams[i] != correctNumTeams) recordError(&errors, "NUM TEAMS", i, num_teams, NULL); //check active mask mask = 0; if(N % THREAD_LIMIT != 0){ remainder = N % THREAD_LIMIT; //set bit mask to proper value for (int j = 0 ; j < remainder; j++){ mask = mask << 1; mask = mask + 1; } } //Mask for last evenly divided iteration if (i < N - remainder){ mask = 0xf; } if (active_mask[i] != mask) recordError(&errors, "ACTIVE MASK", i, NULL, active_mask); } //Reset Arrays for (i=0; i<NN; i++) active_mask[i] = 0; for (i=0; i<NN; i++) thread_num[i]=team_num[i]=default_dev[i]=warp_id[i]=lane_id[i]=master_thread_id[i]=smid[i]=is_spmd_mode[i]=num_threads[i]=num_teams[i] = -1; fprintf(stderr,"#pragma omp target teams distribute parallel for\n"); #pragma omp target teams distribute parallel for { for (int j = 0; j< N; j++) { thread_num[j] = omp_get_thread_num(); num_threads[j] = omp_get_num_threads(); team_num[j] = omp_get_team_num(); num_teams[j] = omp_get_num_teams(); default_dev[j] = omp_get_default_device(); warp_id[j] = omp_ext_get_warp_id(); lane_id[j] = omp_ext_get_lane_id(); active_mask[j] = omp_ext_get_active_threads_mask(); smid[j] = omp_ext_get_smid(); master_thread_id[j] = omp_ext_get_master_thread_id(); is_spmd_mode[j] = omp_ext_is_spmd_mode(); } } fprintf(stderr," i thrd# team# dev# warp# lane# MastThrd smid SPMD num_threads num_teams ActiveMask\n"); for (i=0; i<N; i++) fprintf(stderr," %4d %4d %4d %4d %4d %4d %4d %4d %4d %10d %10d %16llx\n", i,thread_num[i],team_num[i],default_dev[i],warp_id[i],lane_id[i],master_thread_id[i],smid[i],is_spmd_mode[i],num_threads[i], num_teams[i],active_mask[i]); //Verify Results - #pragma omp target teams distribute parallel for correctTeamNum = -1; correctNumTeams = -1; //int correctWarpId = -1; //Verify Results for (i = 0; i < N; i++){ //check thread # if (thread_num[i] != i % MAX_THREADS_PER_TEAM) recordError(&errors, "THREAD NUMBER", i, thread_num, NULL); //check team # if (i % MAX_THREADS_PER_TEAM == 0){ correctTeamNum++; correctTeamNum = correctTeamNum % MAX_TEAMS; } if (team_num[i] != correctTeamNum) recordError(&errors, "TEAM NUMBER", i, team_num, NULL); //check device #, We use default device (0) for testing if (default_dev[i] != 0) recordError(&errors, "DEVICE NUMBER", i, default_dev, NULL); //check warp # if (i % WARP_SIZE == 0){ correctWarpId++; correctWarpId = correctWarpId % (MAX_THREADS_PER_TEAM/WARP_SIZE); } if (warp_id[i] != correctWarpId) recordError(&errors, "WARP NUMBER", i, warp_id, NULL); //check lane # if (lane_id[i] != i % WARP_SIZE) recordError(&errors, "LANE NUMBER", i, lane_id, NULL); //check master thread # if (master_thread_id[i] != MAX_THREADS_PER_TEAM - WARP_SIZE) recordError(&errors, "MASTER THREAD NUMBER", i, master_thread_id, NULL); //check SPMD mode # if (is_spmd_mode[i] != SPMD ) recordError(&errors, "SPMD NUMBER", i, is_spmd_mode, NULL); //check num threads if (num_threads[i] != MAX_THREADS_PER_TEAM ) recordError(&errors, "NUM THREADS", i, num_threads, NULL); //check num teams //If number of iterations is not divisible by MAX_THREADS_PER_TEAM get the ceiling if(N % MAX_THREADS_PER_TEAM != 0) correctNumTeams = ((N + num_threads[i]) / num_threads[i]); else correctNumTeams = N / MAX_THREADS_PER_TEAM; if (num_teams[i] != correctNumTeams) recordError(&errors, "NUM TEAMS", i, num_teams, NULL); //check active mask remainder = 0; mask = 0; //Set mask for 64 or fewer active threads in first warp if (N < WARP_SIZE + 1){ remainder = N % WARP_SIZE; } else remainder = (N % MAX_THREADS_PER_TEAM) % WARP_SIZE; //Set mask for warps with full (64) active threads if (i < N - remainder){ if(isAMDGPU) mask = 0xffffffffffffffff; else mask = 0xffffffff; } else{ //set mask for iterations with non full warps mask = 0; for (int j = 0 ; j < remainder; j++){ mask = mask << 1; mask = mask + 1; } } if (active_mask[i] != mask){ recordError(&errors, "ACTIVE MASK", i, NULL, active_mask); } } //Reset Arrays for (i=0; i<NN; i++) active_mask[i] = 0; for (i=0; i<NN; i++) thread_num[i]=team_num[i]=default_dev[i]=warp_id[i]=lane_id[i]=master_thread_id[i]=smid[i]=is_spmd_mode[i]=num_threads[i]=num_teams[i] = -1; fprintf(stderr,"#pragma omp target teams \n"); #pragma omp target teams { int j = omp_get_team_num(); thread_num[j] = omp_get_thread_num(); num_threads[j] = omp_get_num_threads(); team_num[j] = omp_get_team_num(); num_teams[j] = omp_get_num_teams(); default_dev[j] = omp_get_default_device(); warp_id[j] = omp_ext_get_warp_id(); lane_id[j] = omp_ext_get_lane_id(); active_mask[j] = omp_ext_get_active_threads_mask(); smid[j] = omp_ext_get_smid(); master_thread_id[j] = omp_ext_get_master_thread_id(); is_spmd_mode[j] = omp_ext_is_spmd_mode(); } fprintf(stderr," i thrd# team# dev# warp# lane# MastThrd smid SPMD num_threads num_teams ActiveMask\n"); for (i=0; i<N; i++) fprintf(stderr," %4d %4d %4d %4d %4d %4d %4d %4d %4d %10d %10d %16llx\n", i,thread_num[i],team_num[i],default_dev[i],warp_id[i],lane_id[i],master_thread_id[i],smid[i],is_spmd_mode[i],num_threads[i],num_teams[i],active_mask[i]); //Verify Results - #pragma omp target teams correctTeamNum = -1; correctNumTeams = -1; //Verify Results for (i = 0; i < N; i++){ //Only check iterations up to MAX_TEAMS if(i < MAX_TEAMS){ //check thread # if (thread_num[i] != 0) recordError(&errors, "THREAD NUMBER", i, thread_num, NULL); //check team # if (team_num[i] != i) recordError(&errors, "TEAM NUMBER", i, team_num, NULL); //check device #, We use default device (0) for testing if (default_dev[i] != 0) recordError(&errors, "DEVICE NUMBER", i, default_dev, NULL); //check warp # if (warp_id[i] != (MAX_THREADS_PER_TEAM - WARP_SIZE) / WARP_SIZE) recordError(&errors, "WARP NUMBER", i, warp_id, NULL); //check lane # if (lane_id[i] != 0) recordError(&errors, "LANE NUMBER", i, lane_id, NULL); //check master thread # if (master_thread_id[i] != MAX_THREADS_PER_TEAM - WARP_SIZE) recordError(&errors, "MASTER THREAD NUMBER", i, master_thread_id, NULL); //check SPMD mode # if (is_spmd_mode[i] != GENERIC ) recordError(&errors, "SPMD NUMBER", i, is_spmd_mode, NULL); //check num threads if (num_threads[i] != 1 ) recordError(&errors, "NUM THREADS", i, num_threads, NULL); //check num teams //If number of iterations is not divisible by MAX_THREADS_PER_TEAM get the ceiling if (num_teams[i] != MAX_TEAMS ) recordError(&errors, "NUM TEAMS", i, num_teams, NULL); //check active mask remainder = 0; mask = 1; if (active_mask[i] != mask){ recordError(&errors, "ACTIVE MASK", i, NULL, active_mask); } } else{ if(thread_num[i] != -1 || team_num[i] != -1 || default_dev[i] != -1 || warp_id[i] != -1 || lane_id[i] != -1 || master_thread_id[i] != -1 || is_spmd_mode[i] != -1 || num_threads[i] != -1 || num_teams[i] != -1 || active_mask[i] != 0){ fprintf(stderr, "Data after iteration %d is changed and should be untouched!!\n", MAX_TEAMS - 1); errors++; } } } //Print results and return total errors if(!errors){ fprintf(stderr, "Success\n"); return 0; } else { fprintf(stderr, "Fail\n"); fprintf(stderr, "Errors: %d\n", errors); return 1; } }
GB_AxB_saxpy3_slice_balanced.c
//------------------------------------------------------------------------------ // GB_AxB_saxpy3_slice_balanced: construct balanced tasks for GB_AxB_saxpy3 //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If the mask is present but must be discarded, this function returns // GrB_NO_VALUE, to indicate that the analysis was terminated early. #include "GB_AxB_saxpy3.h" // control parameters for generating parallel tasks #define GB_NTASKS_PER_THREAD 2 #define GB_COSTLY 1.2 #define GB_FINE_WORK 2 #define GB_MWORK_ALPHA 0.01 #define GB_MWORK_BETA 0.10 #define GB_FREE_WORKSPACE \ { \ GB_WERK_POP (Fine_fl, int64_t) ; \ GB_WERK_POP (Fine_slice, int64_t) ; \ GB_WERK_POP (Coarse_Work, int64_t) ; \ GB_WERK_POP (Coarse_initial, int64_t) ; \ } #define GB_FREE_ALL \ { \ GB_FREE_WORKSPACE ; \ GB_FREE_WORK (&SaxpyTasks, SaxpyTasks_size) ; \ } //------------------------------------------------------------------------------ // GB_hash_table_size //------------------------------------------------------------------------------ // flmax is the max flop count for computing A*B(:,j), for any vector j that // this task computes. If the mask M is present, flmax also includes the // number of entries in M(:,j). GB_hash_table_size determines the hash table // size for this task, which is twice the smallest power of 2 larger than // flmax. If flmax is large enough, the hash_size is returned as cvlen, so // that Gustavson's method will be used instead of the Hash method. // By default, Gustavson vs Hash is selected automatically. AxB_method can be // selected via the descriptor or a global setting, as the non-default // GxB_AxB_GUSTAVSON or GxB_AxB_HASH settings, to enforce the selection of // either of those methods. However, if Hash is selected but the hash table // equals or exceeds cvlen, then Gustavson's method is used instead. static inline int64_t GB_hash_table_size ( int64_t flmax, // max flop count for any vector computed by this task int64_t cvlen, // vector length of C const GrB_Desc_Value AxB_method // Default, Gustavson, or Hash ) { int64_t hash_size ; if (AxB_method == GxB_AxB_GUSTAVSON || flmax >= cvlen/2) { //---------------------------------------------------------------------- // use Gustavson if selected explicitly or if flmax is large //---------------------------------------------------------------------- hash_size = cvlen ; } else { //---------------------------------------------------------------------- // flmax is small; consider hash vs Gustavson //---------------------------------------------------------------------- // hash_size = 2 * (smallest power of 2 >= flmax) hash_size = ((uint64_t) 2) << (GB_FLOOR_LOG2 (flmax) + 1) ; bool use_Gustavson ; if (AxB_method == GxB_AxB_HASH) { // always use Hash method, unless the hash_size >= cvlen use_Gustavson = (hash_size >= cvlen) ; } else { // default: auto selection: // use Gustavson's method if hash_size is too big use_Gustavson = (hash_size >= cvlen/12) ; } if (use_Gustavson) { hash_size = cvlen ; } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- return (hash_size) ; } //------------------------------------------------------------------------------ // GB_create_coarse_task: create a single coarse task //------------------------------------------------------------------------------ // Compute the max flop count for any vector in a coarse task, determine the // hash table size, and construct the coarse task. static inline void GB_create_coarse_task ( int64_t kfirst, // coarse task consists of vectors kfirst:klast int64_t klast, GB_saxpy3task_struct *SaxpyTasks, int taskid, // taskid for this coarse task int64_t *Bflops, // size bnvec; cum sum of flop counts for vectors of B int64_t cvlen, // vector length of B and C double chunk, int nthreads_max, int64_t *Coarse_Work, // workspace for parallel reduction for flop count const GrB_Desc_Value AxB_method // Default, Gustavson, or Hash ) { //-------------------------------------------------------------------------- // find the max # of flops for any vector in this task //-------------------------------------------------------------------------- int64_t nk = klast - kfirst + 1 ; int nth = GB_nthreads (nk, chunk, nthreads_max) ; // each thread finds the max flop count for a subset of the vectors int tid ; #pragma omp parallel for num_threads(nth) schedule(static) for (tid = 0 ; tid < nth ; tid++) { int64_t my_flmax = 1, istart, iend ; GB_PARTITION (istart, iend, nk, tid, nth) ; for (int64_t i = istart ; i < iend ; i++) { int64_t kk = kfirst + i ; int64_t fl = Bflops [kk+1] - Bflops [kk] ; my_flmax = GB_IMAX (my_flmax, fl) ; } Coarse_Work [tid] = my_flmax ; } // combine results from each thread int64_t flmax = 1 ; for (tid = 0 ; tid < nth ; tid++) { flmax = GB_IMAX (flmax, Coarse_Work [tid]) ; } // check the parallel computation #ifdef GB_DEBUG int64_t flmax2 = 1 ; for (int64_t kk = kfirst ; kk <= klast ; kk++) { int64_t fl = Bflops [kk+1] - Bflops [kk] ; flmax2 = GB_IMAX (flmax2, fl) ; } ASSERT (flmax == flmax2) ; #endif //-------------------------------------------------------------------------- // define the coarse task //-------------------------------------------------------------------------- SaxpyTasks [taskid].start = kfirst ; SaxpyTasks [taskid].end = klast ; SaxpyTasks [taskid].vector = -1 ; SaxpyTasks [taskid].hsize = GB_hash_table_size (flmax, cvlen, AxB_method) ; SaxpyTasks [taskid].Hi = NULL ; // assigned later SaxpyTasks [taskid].Hf = NULL ; // assigned later SaxpyTasks [taskid].Hx = NULL ; // assigned later SaxpyTasks [taskid].my_cjnz = 0 ; // for fine tasks only SaxpyTasks [taskid].leader = taskid ; SaxpyTasks [taskid].team_size = 1 ; } //------------------------------------------------------------------------------ // GB_AxB_saxpy3_slice_balanced: create balanced tasks for saxpy3 //------------------------------------------------------------------------------ GrB_Info GB_AxB_saxpy3_slice_balanced ( // inputs GrB_Matrix C, // output matrix const GrB_Matrix M, // optional mask matrix const bool Mask_comp, // if true, use !M const GrB_Matrix A, // input matrix A const GrB_Matrix B, // input matrix B GrB_Desc_Value AxB_method, // Default, Gustavson, or Hash // outputs GB_saxpy3task_struct **SaxpyTasks_handle, size_t *SaxpyTasks_size_handle, bool *apply_mask, // if true, apply M during sapxy3 bool *M_in_place, // if true, use M in-place int *ntasks, // # of tasks created (coarse and fine) int *nfine, // # of fine tasks created int *nthreads, // # of threads to use GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; (*apply_mask) = false ; (*M_in_place) = false ; (*ntasks) = 0 ; (*nfine) = 0 ; (*nthreads) = 0 ; ASSERT_MATRIX_OK_OR_NULL (M, "M for saxpy3_slice_balanced A*B", GB0) ; ASSERT (!GB_PENDING (M)) ; ASSERT (GB_JUMBLED_OK (M)) ; ASSERT (!GB_ZOMBIES (M)) ; ASSERT_MATRIX_OK (A, "A for saxpy3_slice_balanced A*B", GB0) ; ASSERT (!GB_PENDING (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT_MATRIX_OK (B, "B for saxpy3_slice_balanced A*B", GB0) ; ASSERT (!GB_PENDING (B)) ; ASSERT (GB_JUMBLED_OK (B)) ; ASSERT (!GB_ZOMBIES (B)) ; //-------------------------------------------------------------------------- // determine the # of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; //-------------------------------------------------------------------------- // define result and workspace //-------------------------------------------------------------------------- GB_saxpy3task_struct *restrict SaxpyTasks = NULL ; size_t SaxpyTasks_size = 0 ; GB_WERK_DECLARE (Coarse_initial, int64_t) ; // initial coarse tasks GB_WERK_DECLARE (Coarse_Work, int64_t) ; // workspace for flop counts GB_WERK_DECLARE (Fine_slice, int64_t) ; GB_WERK_DECLARE (Fine_fl, int64_t) ; // size max(nnz(B(:,j))) //-------------------------------------------------------------------------- // get A, and B //-------------------------------------------------------------------------- const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t avlen = A->vlen ; const int64_t anvec = A->nvec ; const bool A_is_hyper = GB_IS_HYPERSPARSE (A) ; const int64_t *restrict Bp = B->p ; const int64_t *restrict Bh = B->h ; const int8_t *restrict Bb = B->b ; const int64_t *restrict Bi = B->i ; const int64_t bvdim = B->vdim ; const int64_t bnz = GB_nnz_held (B) ; const int64_t bnvec = B->nvec ; const int64_t bvlen = B->vlen ; const bool B_is_hyper = GB_IS_HYPERSPARSE (B) ; int64_t cvlen = avlen ; int64_t cvdim = bvdim ; //-------------------------------------------------------------------------- // compute flop counts for each vector of B and C //-------------------------------------------------------------------------- int64_t Mwork = 0 ; int64_t *restrict Bflops = C->p ; // use C->p as workspace for Bflops GB_OK (GB_AxB_saxpy3_flopcount (&Mwork, Bflops, M, Mask_comp, A, B, Context)) ; int64_t total_flops = Bflops [bnvec] ; double axbflops = total_flops - Mwork ; GBURBLE ("axbwork %g ", axbflops) ; if (Mwork > 0) GBURBLE ("mwork %g ", (double) Mwork) ; //-------------------------------------------------------------------------- // determine if the mask M should be applied, or done later //-------------------------------------------------------------------------- if (M == NULL) { //---------------------------------------------------------------------- // M is not present //---------------------------------------------------------------------- (*apply_mask) = false ; } else if (GB_IS_BITMAP (M) || GB_as_if_full (M)) { //---------------------------------------------------------------------- // M is present and full, bitmap, or sparse/hyper with all entries //---------------------------------------------------------------------- // Choose all-hash or all-Gustavson tasks, and apply M during saxpy3. (*apply_mask) = true ; // The work for M has not yet been added Bflops. // Each vector M(:,j) has cvlen entries. Mwork = cvlen * cvdim ; if (!(AxB_method == GxB_AxB_HASH || AxB_method == GxB_AxB_GUSTAVSON)) { if (axbflops < (double) Mwork * GB_MWORK_BETA) { // The mask is too costly to scatter into the Hf workspace. // Leave it in place and use all-hash tasks. AxB_method = GxB_AxB_HASH ; } else { // Scatter M into Hf and use all-Gustavson tasks. AxB_method = GxB_AxB_GUSTAVSON ; } } if (AxB_method == GxB_AxB_HASH) { // Use the hash method for all tasks (except for those tasks which // require a hash table size >= cvlen; those tasks use Gustavson). // Do not scatter the mask into the Hf hash workspace. The work // for the mask is not accounted for in Bflops, so the hash tables // can be small. (*M_in_place) = true ; GBURBLE ("(use mask in-place) ") ; } else { // Use the Gustavson method for all tasks, and scatter M into the // fine Gustavson workspace. The work for M is not yet in the // Bflops cumulative sum. Add it now. ASSERT (AxB_method == GxB_AxB_GUSTAVSON) int nth = GB_nthreads (bnvec, chunk, nthreads_max) ; int64_t kk ; #pragma omp parallel for num_threads(nth) schedule(static) for (kk = 0 ; kk <= bnvec ; kk++) { Bflops [kk] += cvlen * (kk+1) ; } total_flops = Bflops [bnvec] ; GBURBLE ("(use mask) ") ; } } else if (axbflops < ((double) Mwork * GB_MWORK_ALPHA)) { //---------------------------------------------------------------------- // M is costly to use; apply it after C=A*B //---------------------------------------------------------------------- // Do not use M during the computation of A*B. Instead, compute C=A*B // and then apply the mask later. Tell the caller that the mask should // not be applied, so that it will be applied later in GB_mxm. (*apply_mask) = false ; GBURBLE ("(discard mask) ") ; GB_FREE_ALL ; return (GrB_NO_VALUE) ; } else { //---------------------------------------------------------------------- // use M during saxpy3 //---------------------------------------------------------------------- (*apply_mask) = true ; GBURBLE ("(use mask) ") ; } //-------------------------------------------------------------------------- // determine # of threads and # of initial coarse tasks //-------------------------------------------------------------------------- (*nthreads) = GB_nthreads ((double) total_flops, chunk, nthreads_max) ; int ntasks_initial = ((*nthreads) == 1) ? 1 : (GB_NTASKS_PER_THREAD * (*nthreads)) ; //-------------------------------------------------------------------------- // give preference to Gustavson when using few threads //-------------------------------------------------------------------------- if ((*nthreads) <= 8 && (!(AxB_method == GxB_AxB_HASH || AxB_method == GxB_AxB_GUSTAVSON))) { // Unless a specific method has been explicitly requested, see if // Gustavson should be used with a small number of threads. // Matrix-vector has a maximum intensity of 1, so this heuristic only // applies to GrB_mxm. double abnz = GB_nnz (A) + GB_nnz (B) + 1 ; double workspace = (double) ntasks_initial * (double) cvlen ; double intensity = total_flops / abnz ; GBURBLE ("(intensity: %0.3g workspace/(nnz(A)+nnz(B)): %0.3g", intensity, workspace / abnz) ; if (intensity >= 8 && workspace < abnz) { // work intensity is large, and Gustvason workspace is modest; // use Gustavson for all tasks AxB_method = GxB_AxB_GUSTAVSON ; GBURBLE (": select Gustvason) ") ; } else { // use default task creation: mix of Hash and Gustavson GBURBLE (") ") ; } } //-------------------------------------------------------------------------- // determine target task size //-------------------------------------------------------------------------- double target_task_size = ((double) total_flops) / ntasks_initial ; target_task_size = GB_IMAX (target_task_size, chunk) ; double target_fine_size = target_task_size / GB_FINE_WORK ; target_fine_size = GB_IMAX (target_fine_size, chunk) ; //-------------------------------------------------------------------------- // determine # of parallel tasks //-------------------------------------------------------------------------- int ncoarse = 0 ; // # of coarse tasks int max_bjnz = 0 ; // max (nnz (B (:,j))) of fine tasks // FUTURE: also use ultra-fine tasks that compute A(i1:i2,k)*B(k,j) if (ntasks_initial > 1) { //---------------------------------------------------------------------- // construct initial coarse tasks //---------------------------------------------------------------------- GB_WERK_PUSH (Coarse_initial, ntasks_initial + 1, int64_t) ; if (Coarse_initial == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } GB_pslice (Coarse_initial, Bflops, bnvec, ntasks_initial, true) ; //---------------------------------------------------------------------- // split the work into coarse and fine tasks //---------------------------------------------------------------------- for (int taskid = 0 ; taskid < ntasks_initial ; taskid++) { // get the initial coarse task int64_t kfirst = Coarse_initial [taskid] ; int64_t klast = Coarse_initial [taskid+1] ; int64_t task_ncols = klast - kfirst ; int64_t task_flops = Bflops [klast] - Bflops [kfirst] ; if (task_ncols == 0) { // This coarse task is empty, having been squeezed out by // costly vectors in adjacent coarse tasks. } else if (task_flops > 2 * GB_COSTLY * target_task_size) { // This coarse task is too costly, because it contains one or // more costly vectors. Split its vectors into a mixture of // coarse and fine tasks. int64_t kcoarse_start = kfirst ; for (int64_t kk = kfirst ; kk < klast ; kk++) { // jflops = # of flops to compute a single vector A*B(:,j) // where j == GBH (Bh, kk) double jflops = Bflops [kk+1] - Bflops [kk] ; // bjnz = nnz (B (:,j)) int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]); if (jflops > GB_COSTLY * target_task_size && bjnz > 1) { // A*B(:,j) is costly; split it into 2 or more fine // tasks. First flush the prior coarse task, if any. if (kcoarse_start < kk) { // vectors kcoarse_start to kk-1 form a single // coarse task ncoarse++ ; } // next coarse task (if any) starts at kk+1 kcoarse_start = kk+1 ; // vectors kk will be split into multiple fine tasks max_bjnz = GB_IMAX (max_bjnz, bjnz) ; int team_size = ceil (jflops / target_fine_size) ; (*nfine) += team_size ; } } // flush the last coarse task, if any if (kcoarse_start < klast) { // vectors kcoarse_start to klast-1 form a single // coarse task ncoarse++ ; } } else { // This coarse task is OK as-is. ncoarse++ ; } } } else { //---------------------------------------------------------------------- // entire computation in a single fine or coarse task //---------------------------------------------------------------------- if (bnvec == 1) { // If B is a single vector, and is computed by a single thread, // then a single fine task is used. (*nfine) = 1 ; ncoarse = 0 ; } else { // One thread uses a single coarse task if B is not a vector. (*nfine) = 0 ; ncoarse = 1 ; } } (*ntasks) = ncoarse + (*nfine) ; //-------------------------------------------------------------------------- // allocate the tasks, and workspace to construct fine tasks //-------------------------------------------------------------------------- SaxpyTasks = GB_MALLOC_WORK ((*ntasks), GB_saxpy3task_struct, &SaxpyTasks_size) ; GB_WERK_PUSH (Coarse_Work, nthreads_max, int64_t) ; if (max_bjnz > 0) { // also allocate workspace to construct fine tasks GB_WERK_PUSH (Fine_slice, (*ntasks)+1, int64_t) ; // Fine_fl will only fit on the Werk stack if max_bjnz is small, // but try anyway, in case it fits. It is placed at the top of the // Werk stack. GB_WERK_PUSH (Fine_fl, max_bjnz+1, int64_t) ; } if (SaxpyTasks == NULL || Coarse_Work == NULL || (max_bjnz > 0 && (Fine_slice == NULL || Fine_fl == NULL))) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } // clear SaxpyTasks memset (SaxpyTasks, 0, SaxpyTasks_size) ; //-------------------------------------------------------------------------- // create the tasks //-------------------------------------------------------------------------- if (ntasks_initial > 1) { //---------------------------------------------------------------------- // create the coarse and fine tasks //---------------------------------------------------------------------- int nf = 0 ; // fine tasks have task id 0:nfine-1 int nc = (*nfine) ; // coarse task ids are nfine:ntasks-1 for (int taskid = 0 ; taskid < ntasks_initial ; taskid++) { // get the initial coarse task int64_t kfirst = Coarse_initial [taskid] ; int64_t klast = Coarse_initial [taskid+1] ; int64_t task_ncols = klast - kfirst ; int64_t task_flops = Bflops [klast] - Bflops [kfirst] ; if (task_ncols == 0) { // This coarse task is empty, having been squeezed out by // costly vectors in adjacent coarse tasks. } else if (task_flops > 2 * GB_COSTLY * target_task_size) { // This coarse task is too costly, because it contains one or // more costly vectors. Split its vectors into a mixture of // coarse and fine tasks. int64_t kcoarse_start = kfirst ; for (int64_t kk = kfirst ; kk < klast ; kk++) { // jflops = # of flops to compute a single vector A*B(:,j) double jflops = Bflops [kk+1] - Bflops [kk] ; // bjnz = nnz (B (:,j)) int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]); if (jflops > GB_COSTLY * target_task_size && bjnz > 1) { // A*B(:,j) is costly; split it into 2 or more fine // tasks. First flush the prior coarse task, if any. if (kcoarse_start < kk) { // kcoarse_start:kk-1 form a single coarse task GB_create_coarse_task (kcoarse_start, kk-1, SaxpyTasks, nc++, Bflops, cvlen, chunk, nthreads_max, Coarse_Work, AxB_method) ; } // next coarse task (if any) starts at kk+1 kcoarse_start = kk+1 ; // count the work for each entry B(k,j). Do not // include the work to scan M(:,j), since that will // be evenly divided between all tasks in this team. int64_t pB_start = GBP (Bp, kk, bvlen) ; int nth = GB_nthreads (bjnz, chunk, nthreads_max) ; int64_t s ; #pragma omp parallel for num_threads(nth) \ schedule(static) for (s = 0 ; s < bjnz ; s++) { // get B(k,j) Fine_fl [s] = 1 ; int64_t pB = pB_start + s ; if (!GBB (Bb, pB)) continue ; int64_t k = GBI (Bi, pB, bvlen) ; // fl = flop count for just A(:,k)*B(k,j) int64_t pA, pA_end ; int64_t pleft = 0 ; GB_lookup (A_is_hyper, Ah, Ap, avlen, &pleft, anvec-1, k, &pA, &pA_end) ; int64_t fl = pA_end - pA ; Fine_fl [s] = fl ; ASSERT (fl >= 0) ; } // cumulative sum of flops to compute A*B(:,j) GB_cumsum (Fine_fl, bjnz, NULL, nth, Context) ; // slice B(:,j) into fine tasks int team_size = ceil (jflops / target_fine_size) ; ASSERT (Fine_slice != NULL) ; GB_pslice (Fine_slice, Fine_fl, bjnz, team_size, false); // shared hash table for all fine tasks for A*B(:,j) int64_t hsize = GB_hash_table_size (jflops, cvlen, AxB_method) ; // construct the fine tasks for C(:,j)=A*B(:,j) int leader = nf ; for (int fid = 0 ; fid < team_size ; fid++) { int64_t pstart = Fine_slice [fid] ; int64_t pend = Fine_slice [fid+1] ; int64_t fl = Fine_fl [pend] - Fine_fl [pstart] ; SaxpyTasks [nf].start = pB_start + pstart ; SaxpyTasks [nf].end = pB_start + pend - 1 ; SaxpyTasks [nf].vector = kk ; SaxpyTasks [nf].hsize = hsize ; SaxpyTasks [nf].Hi = NULL ; // assigned later SaxpyTasks [nf].Hf = NULL ; // assigned later SaxpyTasks [nf].Hx = NULL ; // assigned later SaxpyTasks [nf].my_cjnz = 0 ; SaxpyTasks [nf].leader = leader ; SaxpyTasks [nf].team_size = team_size ; nf++ ; } } } // flush the last coarse task, if any if (kcoarse_start < klast) { // kcoarse_start:klast-1 form a single coarse task GB_create_coarse_task (kcoarse_start, klast-1, SaxpyTasks, nc++, Bflops, cvlen, chunk, nthreads_max, Coarse_Work, AxB_method) ; } } else { // This coarse task is OK as-is. GB_create_coarse_task (kfirst, klast-1, SaxpyTasks, nc++, Bflops, cvlen, chunk, nthreads_max, Coarse_Work, AxB_method) ; } } } else { //---------------------------------------------------------------------- // entire computation in a single fine or coarse task //---------------------------------------------------------------------- // create a single coarse task: hash or Gustavson GB_create_coarse_task (0, bnvec-1, SaxpyTasks, 0, Bflops, cvlen, 1, 1, Coarse_Work, AxB_method) ; if (bnvec == 1) { // convert the single coarse task into a single fine task SaxpyTasks [0].start = 0 ; // first entry in B(:,0) SaxpyTasks [0].end = bnz - 1 ; // last entry in B(:,0) SaxpyTasks [0].vector = 0 ; } } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; (*SaxpyTasks_handle) = SaxpyTasks ; (*SaxpyTasks_size_handle) = SaxpyTasks_size ; return (GrB_SUCCESS) ; }
fx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF X X % % F X X % % FFF X % % F X X % % F X X % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/annotate.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/decorate.h" #include "magick/distort.h" #include "magick/draw.h" #include "magick/effect.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/fx.h" #include "magick/fx-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/layer.h" #include "magick/list.h" #include "magick/log.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/opencl-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resize.h" #include "magick/resource_.h" #include "magick/splay-tree.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/transform.h" #include "magick/utility.h" /* Define declarations. */ #define LeftShiftOperator 0xf5U #define RightShiftOperator 0xf6U #define LessThanEqualOperator 0xf7U #define GreaterThanEqualOperator 0xf8U #define EqualOperator 0xf9U #define NotEqualOperator 0xfaU #define LogicalAndOperator 0xfbU #define LogicalOrOperator 0xfcU #define ExponentialNotation 0xfdU struct _FxInfo { const Image *images; char *expression; FILE *file; SplayTreeInfo *colors, *symbols; CacheView **view; RandomInfo *random_info; ExceptionInfo *exception; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireFxInfo() allocates the FxInfo structure. % % The format of the AcquireFxInfo method is: % % FxInfo *AcquireFxInfo(Image *images,const char *expression) % % A description of each parameter follows: % % o images: the image sequence. % % o expression: the expression. % */ MagickExport FxInfo *AcquireFxInfo(const Image *images,const char *expression) { char fx_op[2]; const Image *next; FxInfo *fx_info; register ssize_t i; fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info)); (void) memset(fx_info,0,sizeof(*fx_info)); fx_info->exception=AcquireExceptionInfo(); fx_info->images=images; fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength( fx_info->images),sizeof(*fx_info->view)); if (fx_info->view == (CacheView **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); i=0; next=GetFirstImageInList(fx_info->images); for ( ; next != (Image *) NULL; next=next->next) { fx_info->view[i]=AcquireVirtualCacheView(next,fx_info->exception); i++; } fx_info->random_info=AcquireRandomInfo(); fx_info->expression=ConstantString(expression); fx_info->file=stderr; (void) SubstituteString(&fx_info->expression," ",""); /* compact string */ /* Force right-to-left associativity for unary negation. */ (void) SubstituteString(&fx_info->expression,"-","-1.0*"); (void) SubstituteString(&fx_info->expression,"^-1.0*","^-"); (void) SubstituteString(&fx_info->expression,"E-1.0*","E-"); (void) SubstituteString(&fx_info->expression,"e-1.0*","e-"); /* Convert compound to simple operators. */ fx_op[1]='\0'; *fx_op=(char) LeftShiftOperator; (void) SubstituteString(&fx_info->expression,"<<",fx_op); *fx_op=(char) RightShiftOperator; (void) SubstituteString(&fx_info->expression,">>",fx_op); *fx_op=(char) LessThanEqualOperator; (void) SubstituteString(&fx_info->expression,"<=",fx_op); *fx_op=(char) GreaterThanEqualOperator; (void) SubstituteString(&fx_info->expression,">=",fx_op); *fx_op=(char) EqualOperator; (void) SubstituteString(&fx_info->expression,"==",fx_op); *fx_op=(char) NotEqualOperator; (void) SubstituteString(&fx_info->expression,"!=",fx_op); *fx_op=(char) LogicalAndOperator; (void) SubstituteString(&fx_info->expression,"&&",fx_op); *fx_op=(char) LogicalOrOperator; (void) SubstituteString(&fx_info->expression,"||",fx_op); *fx_op=(char) ExponentialNotation; (void) SubstituteString(&fx_info->expression,"**",fx_op); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d d N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AddNoiseImage() adds random noise to the image. % % The format of the AddNoiseImage method is: % % Image *AddNoiseImage(const Image *image,const NoiseType noise_type, % ExceptionInfo *exception) % Image *AddNoiseImageChannel(const Image *image,const ChannelType channel, % const NoiseType noise_type,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o noise_type: The type of noise: Uniform, Gaussian, Multiplicative, % Impulse, Laplacian, or Poisson. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type, ExceptionInfo *exception) { Image *noise_image; noise_image=AddNoiseImageChannel(image,DefaultChannels,noise_type,exception); return(noise_image); } MagickExport Image *AddNoiseImageChannel(const Image *image, const ChannelType channel,const NoiseType noise_type,ExceptionInfo *exception) { #define AddNoiseImageTag "AddNoise/Image" CacheView *image_view, *noise_view; const char *option; double attenuate; Image *noise_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize noise image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) noise_image=AccelerateAddNoiseImage(image,channel,noise_type,exception); if (noise_image != (Image *) NULL) return(noise_image); #endif noise_image=CloneImage(image,0,0,MagickTrue,exception); if (noise_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(noise_image,DirectClass) == MagickFalse) { InheritException(exception,&noise_image->exception); noise_image=DestroyImage(noise_image); return((Image *) NULL); } /* Add noise in each row. */ attenuate=1.0; option=GetImageArtifact(image,"attenuate"); if (option != (char *) NULL) attenuate=StringToDouble(option,(char **) NULL); status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); noise_view=AcquireAuthenticCacheView(noise_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,noise_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict noise_indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1, exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); noise_indexes=GetCacheViewAuthenticIndexQueue(noise_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(GenerateDifferentialNoise(random_info[id], GetPixelRed(p),noise_type,attenuate))); if (IsGrayColorspace(image->colorspace) != MagickFalse) { SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); } else { if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(GenerateDifferentialNoise( random_info[id],GetPixelGreen(p),noise_type,attenuate))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(GenerateDifferentialNoise( random_info[id],GetPixelBlue(p),noise_type,attenuate))); } if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(GenerateDifferentialNoise( random_info[id],GetPixelOpacity(p),noise_type,attenuate))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(noise_indexes+x,ClampToQuantum( GenerateDifferentialNoise(random_info[id],GetPixelIndex( indexes+x),noise_type,attenuate))); p++; q++; } sync=SyncCacheViewAuthenticPixels(noise_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AddNoiseImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } noise_view=DestroyCacheView(noise_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) noise_image=DestroyImage(noise_image); return(noise_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u e S h i f t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlueShiftImage() mutes the colors of the image to simulate a scene at % nighttime in the moonlight. % % The format of the BlueShiftImage method is: % % Image *BlueShiftImage(const Image *image,const double factor, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o factor: the shift factor. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlueShiftImage(const Image *image,const double factor, ExceptionInfo *exception) { #define BlueShiftImageTag "BlueShift/Image" CacheView *image_view, *shift_view; Image *shift_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate blue shift image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); shift_image=CloneImage(image,0,0,MagickTrue,exception); if (shift_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(shift_image,DirectClass) == MagickFalse) { InheritException(exception,&shift_image->exception); shift_image=DestroyImage(shift_image); return((Image *) NULL); } /* Blue-shift DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); shift_view=AcquireAuthenticCacheView(shift_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,shift_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; Quantum quantum; register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { quantum=GetPixelRed(p); if (GetPixelGreen(p) < quantum) quantum=GetPixelGreen(p); if (GetPixelBlue(p) < quantum) quantum=GetPixelBlue(p); pixel.red=0.5*(GetPixelRed(p)+factor*quantum); pixel.green=0.5*(GetPixelGreen(p)+factor*quantum); pixel.blue=0.5*(GetPixelBlue(p)+factor*quantum); quantum=GetPixelRed(p); if (GetPixelGreen(p) > quantum) quantum=GetPixelGreen(p); if (GetPixelBlue(p) > quantum) quantum=GetPixelBlue(p); pixel.red=0.5*(pixel.red+factor*quantum); pixel.green=0.5*(pixel.green+factor*quantum); pixel.blue=0.5*(pixel.blue+factor*quantum); SetPixelRed(q,ClampToQuantum(pixel.red)); SetPixelGreen(q,ClampToQuantum(pixel.green)); SetPixelBlue(q,ClampToQuantum(pixel.blue)); p++; q++; } sync=SyncCacheViewAuthenticPixels(shift_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BlueShiftImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); shift_view=DestroyCacheView(shift_view); if (status == MagickFalse) shift_image=DestroyImage(shift_image); return(shift_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a r c o a l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CharcoalImage() creates a new image that is a copy of an existing one with % the edge highlighted. It allocates the memory necessary for the new Image % structure and returns a pointer to the new image. % % The format of the CharcoalImage method is: % % Image *CharcoalImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CharcoalImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *charcoal_image, *edge_image; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) return((Image *) NULL); charcoal_image=(Image *) NULL; status=ClampImage(edge_image); if (status != MagickFalse) charcoal_image=BlurImage(edge_image,radius,sigma,exception); edge_image=DestroyImage(edge_image); if (charcoal_image == (Image *) NULL) return((Image *) NULL); status=NormalizeImage(charcoal_image); if (status != MagickFalse) status=NegateImage(charcoal_image,MagickFalse); if (status != MagickFalse) status=GrayscaleImage(charcoal_image,image->intensity); if (status == MagickFalse) charcoal_image=DestroyImage(charcoal_image); return(charcoal_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorizeImage() blends the fill color with each pixel in the image. % A percentage blend is specified with opacity. Control the application % of different color components by specifying a different percentage for % each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue). % % The format of the ColorizeImage method is: % % Image *ColorizeImage(const Image *image,const char *opacity, % const PixelPacket colorize,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o opacity: A character string indicating the level of opacity as a % percentage. % % o colorize: A color value. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ColorizeImage(const Image *image,const char *opacity, const PixelPacket colorize,ExceptionInfo *exception) { #define ColorizeImageTag "Colorize/Image" CacheView *colorize_view, *image_view; GeometryInfo geometry_info; Image *colorize_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket pixel; MagickStatusType flags; ssize_t y; /* Allocate colorized image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); colorize_image=CloneImage(image,0,0,MagickTrue,exception); if (colorize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(colorize_image,DirectClass) == MagickFalse) { InheritException(exception,&colorize_image->exception); colorize_image=DestroyImage(colorize_image); return((Image *) NULL); } if ((IsGrayColorspace(image->colorspace) != MagickFalse) || (IsPixelGray(&colorize) != MagickFalse)) (void) SetImageColorspace(colorize_image,sRGBColorspace); if ((colorize_image->matte == MagickFalse) && (colorize.opacity != OpaqueOpacity)) (void) SetImageAlphaChannel(colorize_image,OpaqueAlphaChannel); if (opacity == (const char *) NULL) return(colorize_image); /* Determine RGB values of the pen color. */ flags=ParseGeometry(opacity,&geometry_info); pixel.red=geometry_info.rho; pixel.green=geometry_info.rho; pixel.blue=geometry_info.rho; pixel.opacity=(MagickRealType) OpaqueOpacity; if ((flags & SigmaValue) != 0) pixel.green=geometry_info.sigma; if ((flags & XiValue) != 0) pixel.blue=geometry_info.xi; if ((flags & PsiValue) != 0) pixel.opacity=geometry_info.psi; /* Colorize DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); colorize_view=AcquireAuthenticCacheView(colorize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,colorize_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(colorize_view,0,y,colorize_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,((GetPixelRed(p)*(100.0-pixel.red)+ colorize.red*pixel.red)/100.0)); SetPixelGreen(q,((GetPixelGreen(p)*(100.0-pixel.green)+ colorize.green*pixel.green)/100.0)); SetPixelBlue(q,((GetPixelBlue(p)*(100.0-pixel.blue)+ colorize.blue*pixel.blue)/100.0)); if (colorize_image->matte == MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); else SetPixelOpacity(q,((GetPixelOpacity(p)*(100.0-pixel.opacity)+ colorize.opacity*pixel.opacity)/100.0)); p++; q++; } sync=SyncCacheViewAuthenticPixels(colorize_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ColorizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); colorize_view=DestroyCacheView(colorize_view); if (status == MagickFalse) colorize_image=DestroyImage(colorize_image); return(colorize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r M a t r i x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorMatrixImage() applies color transformation to an image. This method % permits saturation changes, hue rotation, luminance to alpha, and various % other effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the ColorMatrixImage method is: % % Image *ColorMatrixImage(const Image *image, % const KernelInfo *color_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o color_matrix: the color matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ColorMatrixImage(const Image *image, const KernelInfo *color_matrix,ExceptionInfo *exception) { #define ColorMatrixImageTag "ColorMatrix/Image" CacheView *color_view, *image_view; double ColorMatrix[6][6] = { { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 } }; Image *color_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t u, v, y; /* Create color matrix. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); i=0; for (v=0; v < (ssize_t) color_matrix->height; v++) for (u=0; u < (ssize_t) color_matrix->width; u++) { if ((v < 6) && (u < 6)) ColorMatrix[v][u]=color_matrix->values[i]; i++; } /* Initialize color image. */ color_image=CloneImage(image,0,0,MagickTrue,exception); if (color_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(color_image,DirectClass) == MagickFalse) { InheritException(exception,&color_image->exception); color_image=DestroyImage(color_image); return((Image *) NULL); } if (image->debug != MagickFalse) { char format[MaxTextExtent], *message; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " ColorMatrix image with color matrix:"); message=AcquireString(""); for (v=0; v < 6; v++) { *message='\0'; (void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < 6; u++) { (void) FormatLocaleString(format,MaxTextExtent,"%+f ", ColorMatrix[v][u]); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } /* ColorMatrix image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); color_view=AcquireAuthenticCacheView(color_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,color_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickRealType pixel; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; register IndexPacket *magick_restrict color_indexes; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); color_indexes=GetCacheViewAuthenticIndexQueue(color_view); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t v; size_t height; height=color_matrix->height > 6 ? 6UL : color_matrix->height; for (v=0; v < (ssize_t) height; v++) { pixel=ColorMatrix[v][0]*GetPixelRed(p)+ColorMatrix[v][1]* GetPixelGreen(p)+ColorMatrix[v][2]*GetPixelBlue(p); if (image->matte != MagickFalse) pixel+=ColorMatrix[v][3]*(QuantumRange-GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) pixel+=ColorMatrix[v][4]*GetPixelIndex(indexes+x); pixel+=QuantumRange*ColorMatrix[v][5]; switch (v) { case 0: SetPixelRed(q,ClampToQuantum(pixel)); break; case 1: SetPixelGreen(q,ClampToQuantum(pixel)); break; case 2: SetPixelBlue(q,ClampToQuantum(pixel)); break; case 3: { if (image->matte != MagickFalse) SetPixelAlpha(q,ClampToQuantum(pixel)); break; } case 4: { if (image->colorspace == CMYKColorspace) SetPixelIndex(color_indexes+x,ClampToQuantum(pixel)); break; } } } p++; q++; } if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ColorMatrixImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } color_view=DestroyCacheView(color_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) color_image=DestroyImage(color_image); return(color_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyFxInfo() deallocates memory associated with an FxInfo structure. % % The format of the DestroyFxInfo method is: % % ImageInfo *DestroyFxInfo(ImageInfo *fx_info) % % A description of each parameter follows: % % o fx_info: the fx info. % */ MagickExport FxInfo *DestroyFxInfo(FxInfo *fx_info) { register ssize_t i; fx_info->exception=DestroyExceptionInfo(fx_info->exception); fx_info->expression=DestroyString(fx_info->expression); fx_info->symbols=DestroySplayTree(fx_info->symbols); fx_info->colors=DestroySplayTree(fx_info->colors); for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--) fx_info->view[i]=DestroyCacheView(fx_info->view[i]); fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view); fx_info->random_info=DestroyRandomInfo(fx_info->random_info); fx_info=(FxInfo *) RelinquishMagickMemory(fx_info); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F x E v a l u a t e C h a n n e l E x p r e s s i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxEvaluateChannelExpression() evaluates an expression and returns the % results. % % The format of the FxEvaluateExpression method is: % % MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, % const ChannelType channel,const ssize_t x,const ssize_t y, % double *alpha,Exceptioninfo *exception) % MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,double *alpha, % Exceptioninfo *exception) % % A description of each parameter follows: % % o fx_info: the fx info. % % o channel: the channel. % % o x,y: the pixel position. % % o alpha: the result. % % o exception: return any errors or warnings in this structure. % */ static double FxChannelStatistics(FxInfo *fx_info,const Image *image, ChannelType channel,const char *symbol,ExceptionInfo *exception) { char channel_symbol[MaxTextExtent], key[MaxTextExtent], statistic[MaxTextExtent]; const char *value; register const char *p; for (p=symbol; (*p != '.') && (*p != '\0'); p++) ; *channel_symbol='\0'; if (*p == '.') { ssize_t option; (void) CopyMagickString(channel_symbol,p+1,MaxTextExtent); option=ParseCommandOption(MagickChannelOptions,MagickTrue,channel_symbol); if (option >= 0) channel=(ChannelType) option; } (void) FormatLocaleString(key,MaxTextExtent,"%p.%.20g.%s",(void *) image, (double) channel,symbol); value=(const char *) GetValueFromSplayTree(fx_info->symbols,key); if (value != (const char *) NULL) return(QuantumScale*StringToDouble(value,(char **) NULL)); (void) DeleteNodeFromSplayTree(fx_info->symbols,key); if (LocaleNCompare(symbol,"depth",5) == 0) { size_t depth; depth=GetImageChannelDepth(image,channel,exception); (void) FormatLocaleString(statistic,MaxTextExtent,"%.20g",(double) depth); } if (LocaleNCompare(symbol,"kurtosis",8) == 0) { double kurtosis, skewness; (void) GetImageChannelKurtosis(image,channel,&kurtosis,&skewness, exception); (void) FormatLocaleString(statistic,MaxTextExtent,"%.20g",kurtosis); } if (LocaleNCompare(symbol,"maxima",6) == 0) { double maxima, minima; (void) GetImageChannelRange(image,channel,&minima,&maxima,exception); (void) FormatLocaleString(statistic,MaxTextExtent,"%.20g",maxima); } if (LocaleNCompare(symbol,"mean",4) == 0) { double mean, standard_deviation; (void) GetImageChannelMean(image,channel,&mean,&standard_deviation, exception); (void) FormatLocaleString(statistic,MaxTextExtent,"%.20g",mean); } if (LocaleNCompare(symbol,"minima",6) == 0) { double maxima, minima; (void) GetImageChannelRange(image,channel,&minima,&maxima,exception); (void) FormatLocaleString(statistic,MaxTextExtent,"%.20g",minima); } if (LocaleNCompare(symbol,"skewness",8) == 0) { double kurtosis, skewness; (void) GetImageChannelKurtosis(image,channel,&kurtosis,&skewness, exception); (void) FormatLocaleString(statistic,MaxTextExtent,"%.20g",skewness); } if (LocaleNCompare(symbol,"standard_deviation",18) == 0) { double mean, standard_deviation; (void) GetImageChannelMean(image,channel,&mean,&standard_deviation, exception); (void) FormatLocaleString(statistic,MaxTextExtent,"%.20g", standard_deviation); } (void) AddValueToSplayTree(fx_info->symbols,ConstantString(key), ConstantString(statistic)); return(QuantumScale*StringToDouble(statistic,(char **) NULL)); } static double FxEvaluateSubexpression(FxInfo *,const ChannelType,const ssize_t, const ssize_t,const char *,const size_t,double *,ExceptionInfo *); static inline MagickBooleanType IsFxFunction(const char *expression, const char *name,const size_t length) { int c; c=expression[length]; if ((LocaleNCompare(expression,name,length) == 0) && ((isspace(c) == 0) || (c == '('))) return(MagickTrue); return(MagickFalse); } static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta) { if (beta != 0) return(FxGCD(beta,alpha % beta)); return(alpha); } static inline const char *FxSubexpression(const char *expression, ExceptionInfo *exception) { const char *subexpression; register ssize_t level; level=0; subexpression=expression; while ((*subexpression != '\0') && ((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL))) { if (strchr("(",(int) *subexpression) != (char *) NULL) level++; else if (strchr(")",(int) *subexpression) != (char *) NULL) level--; subexpression++; } if (*subexpression == '\0') (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnbalancedParenthesis","`%s'",expression); return(subexpression); } static double FxGetSymbol(FxInfo *fx_info,const ChannelType channel, const ssize_t x,const ssize_t y,const char *expression,const size_t depth, ExceptionInfo *exception) { char *q, symbol[MaxTextExtent]; const char *p, *value; double alpha, beta; Image *image; MagickBooleanType status; MagickPixelPacket pixel; PointInfo point; register ssize_t i; size_t level; p=expression; i=GetImageIndexInList(fx_info->images); level=0; point.x=(double) x; point.y=(double) y; if (isalpha((int) ((unsigned char) *(p+1))) == 0) { char *subexpression; subexpression=AcquireString(expression); if (strchr("suv",(int) *p) != (char *) NULL) { switch (*p) { case 's': default: { i=GetImageIndexInList(fx_info->images); break; } case 'u': i=0; break; case 'v': i=1; break; } p++; if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); i=(ssize_t) alpha; if (*p != '\0') p++; } if (*p == '.') p++; } if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0)) { p++; if (*p == '{') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '{') level++; else if (*p == '}') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x=alpha; point.y=beta; if (*p != '\0') p++; } else if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x+=alpha; point.y+=beta; if (*p != '\0') p++; } if (*p == '.') p++; } subexpression=DestroyString(subexpression); } image=GetImageFromList(fx_info->images,i); if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "NoSuchImage","`%s'",expression); return(0.0); } i=GetImageIndexInList(image); GetMagickPixelPacket(image,&pixel); status=InterpolateMagickPixelPacket(image,fx_info->view[i],image->interpolate, point.x,point.y,&pixel,exception); (void) status; if ((strlen(p) > 2) && (LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) && (LocaleCompare(p,"lightness") != 0)) { char name[MaxTextExtent]; (void) CopyMagickString(name,p,MaxTextExtent); for (q=name+(strlen(name)-1); q > name; q--) { if (*q == ')') break; if (*q == '.') { *q='\0'; break; } } if ((strlen(name) > 2) && (GetValueFromSplayTree(fx_info->symbols,name) == (const char *) NULL)) { MagickPixelPacket *color; color=(MagickPixelPacket *) GetValueFromSplayTree(fx_info->colors, name); if (color != (MagickPixelPacket *) NULL) { pixel=(*color); p+=strlen(name); } else if (QueryMagickColor(name,&pixel,fx_info->exception) != MagickFalse) { (void) AddValueToSplayTree(fx_info->colors,ConstantString(name), CloneMagickPixelPacket(&pixel)); p+=strlen(name); } } } (void) CopyMagickString(symbol,p,MaxTextExtent); StripString(symbol); if (*symbol == '\0') { switch (channel) { case RedChannel: return(QuantumScale*pixel.red); case GreenChannel: return(QuantumScale*pixel.green); case BlueChannel: return(QuantumScale*pixel.blue); case OpacityChannel: { double alpha; if (pixel.matte == MagickFalse) return(1.0); alpha=(double) (QuantumScale*GetPixelAlpha(&pixel)); return(alpha); } case IndexChannel: { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), ImageError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.index); } case DefaultChannels: return(QuantumScale*GetMagickPixelIntensity(image,&pixel)); default: break; } (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",p); return(0.0); } switch (*symbol) { case 'A': case 'a': { if (LocaleCompare(symbol,"a") == 0) return((double) (QuantumScale*GetPixelAlpha(&pixel))); break; } case 'B': case 'b': { if (LocaleCompare(symbol,"b") == 0) return(QuantumScale*pixel.blue); break; } case 'C': case 'c': { if (IsFxFunction(symbol,"channel",7) != MagickFalse) { GeometryInfo channel_info; MagickStatusType flags; flags=ParseGeometry(symbol+7,&channel_info); if (image->colorspace == CMYKColorspace) switch (channel) { case CyanChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case MagentaChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case YellowChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case OpacityChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } switch (channel) { case RedChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case GreenChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case BlueChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case OpacityChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case IndexChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } } if (LocaleCompare(symbol,"c") == 0) return(QuantumScale*pixel.red); break; } case 'D': case 'd': { if (LocaleNCompare(symbol,"depth",5) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'E': case 'e': { if (LocaleCompare(symbol,"extent") == 0) { if (image->extent != 0) return((double) image->extent); return((double) GetBlobSize(image)); } break; } case 'G': case 'g': { if (LocaleCompare(symbol,"g") == 0) return(QuantumScale*pixel.green); break; } case 'K': case 'k': { if (LocaleNCompare(symbol,"kurtosis",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"k") == 0) { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.index); } break; } case 'H': case 'h': { if (LocaleCompare(symbol,"h") == 0) return((double) image->rows); if (LocaleCompare(symbol,"hue") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green), ClampToQuantum(pixel.blue),&hue,&saturation,&lightness); return(hue); } break; } case 'I': case 'i': { if ((LocaleCompare(symbol,"image.depth") == 0) || (LocaleCompare(symbol,"image.minima") == 0) || (LocaleCompare(symbol,"image.maxima") == 0) || (LocaleCompare(symbol,"image.mean") == 0) || (LocaleCompare(symbol,"image.kurtosis") == 0) || (LocaleCompare(symbol,"image.skewness") == 0) || (LocaleCompare(symbol,"image.standard_deviation") == 0)) return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception)); if (LocaleCompare(symbol,"image.resolution.x") == 0) return(image->x_resolution); if (LocaleCompare(symbol,"image.resolution.y") == 0) return(image->y_resolution); if (LocaleCompare(symbol,"intensity") == 0) return(QuantumScale*GetMagickPixelIntensity(image,&pixel)); if (LocaleCompare(symbol,"i") == 0) return((double) x); break; } case 'J': case 'j': { if (LocaleCompare(symbol,"j") == 0) return((double) y); break; } case 'L': case 'l': { if (LocaleCompare(symbol,"lightness") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green), ClampToQuantum(pixel.blue),&hue,&saturation,&lightness); return(lightness); } if (LocaleCompare(symbol,"luma") == 0) { double luma; luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luma); } if (LocaleCompare(symbol,"luminance") == 0) { double luminance; luminance=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luminance); } break; } case 'M': case 'm': { if (LocaleNCompare(symbol,"maxima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"mean",4) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"minima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"m") == 0) return(QuantumScale*pixel.green); break; } case 'N': case 'n': { if (LocaleCompare(symbol,"n") == 0) return((double) GetImageListLength(fx_info->images)); break; } case 'O': case 'o': { if (LocaleCompare(symbol,"o") == 0) return(QuantumScale*pixel.opacity); break; } case 'P': case 'p': { if (LocaleCompare(symbol,"page.height") == 0) return((double) image->page.height); if (LocaleCompare(symbol,"page.width") == 0) return((double) image->page.width); if (LocaleCompare(symbol,"page.x") == 0) return((double) image->page.x); if (LocaleCompare(symbol,"page.y") == 0) return((double) image->page.y); if (LocaleCompare(symbol,"printsize.x") == 0) return(PerceptibleReciprocal(image->x_resolution)*image->columns); if (LocaleCompare(symbol,"printsize.y") == 0) return(PerceptibleReciprocal(image->y_resolution)*image->rows); break; } case 'Q': case 'q': { if (LocaleCompare(symbol,"quality") == 0) return((double) image->quality); break; } case 'R': case 'r': { if (LocaleCompare(symbol,"resolution.x") == 0) return(image->x_resolution); if (LocaleCompare(symbol,"resolution.y") == 0) return(image->y_resolution); if (LocaleCompare(symbol,"r") == 0) return(QuantumScale*pixel.red); break; } case 'S': case 's': { if (LocaleCompare(symbol,"saturation") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green), ClampToQuantum(pixel.blue),&hue,&saturation,&lightness); return(saturation); } if (LocaleNCompare(symbol,"skewness",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"standard_deviation",18) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'T': case 't': { if (LocaleCompare(symbol,"t") == 0) return((double) GetImageIndexInList(fx_info->images)); break; } case 'W': case 'w': { if (LocaleCompare(symbol,"w") == 0) return((double) image->columns); break; } case 'Y': case 'y': { if (LocaleCompare(symbol,"y") == 0) return(QuantumScale*pixel.blue); break; } case 'Z': case 'z': { if (LocaleCompare(symbol,"z") == 0) { double depth; depth=(double) GetImageChannelDepth(image,channel,fx_info->exception); return(depth); } break; } default: break; } value=(const char *) GetValueFromSplayTree(fx_info->symbols,symbol); if (value != (const char *) NULL) return(StringToDouble(value,(char **) NULL)); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",symbol); return(0.0); } static const char *FxOperatorPrecedence(const char *expression, ExceptionInfo *exception) { typedef enum { UndefinedPrecedence, NullPrecedence, BitwiseComplementPrecedence, ExponentPrecedence, ExponentialNotationPrecedence, MultiplyPrecedence, AdditionPrecedence, ShiftPrecedence, RelationalPrecedence, EquivalencyPrecedence, BitwiseAndPrecedence, BitwiseOrPrecedence, LogicalAndPrecedence, LogicalOrPrecedence, TernaryPrecedence, AssignmentPrecedence, CommaPrecedence, SeparatorPrecedence } FxPrecedence; FxPrecedence precedence, target; register const char *subexpression; register int c; size_t level; c=(-1); level=0; subexpression=(const char *) NULL; target=NullPrecedence; while ((c != '\0') && (*expression != '\0')) { precedence=UndefinedPrecedence; if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@')) { expression++; continue; } switch (*expression) { case 'A': case 'a': { #if defined(MAGICKCORE_HAVE_ACOSH) if (IsFxFunction(expression,"acosh",5) != MagickFalse) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (IsFxFunction(expression,"asinh",5) != MagickFalse) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ATANH) if (IsFxFunction(expression,"atanh",5) != MagickFalse) { expression+=5; break; } #endif if (IsFxFunction(expression,"atan2",5) != MagickFalse) { expression+=5; break; } break; } case 'E': case 'e': { if ((isdigit(c) != 0) && ((LocaleNCompare(expression,"E+",2) == 0) || (LocaleNCompare(expression,"E-",2) == 0))) { expression+=2; /* scientific notation */ break; } } case 'J': case 'j': { if ((IsFxFunction(expression,"j0",2) != MagickFalse) || (IsFxFunction(expression,"j1",2) != MagickFalse)) { expression+=2; break; } break; } case '#': { while (isxdigit((int) ((unsigned char) *(expression+1))) != 0) expression++; break; } default: break; } if ((c == (int) '{') || (c == (int) '[')) level++; else if ((c == (int) '}') || (c == (int) ']')) level--; if (level == 0) switch ((unsigned char) *expression) { case '~': case '!': { precedence=BitwiseComplementPrecedence; break; } case '^': case '@': { precedence=ExponentPrecedence; break; } default: { if (((c != 0) && ((isdigit(c) != 0) || (strchr(")",c) != (char *) NULL))) && (((islower((int) ((unsigned char) *expression)) != 0) || (strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) || ((isdigit(c) == 0) && (isdigit((int) ((unsigned char) *expression)) != 0))) && (strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL)) precedence=MultiplyPrecedence; break; } case '*': case '/': case '%': { precedence=MultiplyPrecedence; break; } case '+': case '-': { if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) || (isalpha(c) != 0)) precedence=AdditionPrecedence; break; } case LeftShiftOperator: case RightShiftOperator: { precedence=ShiftPrecedence; break; } case '<': case LessThanEqualOperator: case GreaterThanEqualOperator: case '>': { precedence=RelationalPrecedence; break; } case EqualOperator: case NotEqualOperator: { precedence=EquivalencyPrecedence; break; } case '&': { precedence=BitwiseAndPrecedence; break; } case '|': { precedence=BitwiseOrPrecedence; break; } case LogicalAndOperator: { precedence=LogicalAndPrecedence; break; } case LogicalOrOperator: { precedence=LogicalOrPrecedence; break; } case ExponentialNotation: { precedence=ExponentialNotationPrecedence; break; } case ':': case '?': { precedence=TernaryPrecedence; break; } case '=': { precedence=AssignmentPrecedence; break; } case ',': { precedence=CommaPrecedence; break; } case ';': { precedence=SeparatorPrecedence; break; } } if ((precedence == BitwiseComplementPrecedence) || (precedence == TernaryPrecedence) || (precedence == AssignmentPrecedence)) { if (precedence > target) { /* Right-to-left associativity. */ target=precedence; subexpression=expression; } } else if (precedence >= target) { /* Left-to-right associativity. */ target=precedence; subexpression=expression; } if (strchr("(",(int) *expression) != (char *) NULL) expression=FxSubexpression(expression,exception); c=(int) (*expression++); } return(subexpression); } static double FxEvaluateSubexpression(FxInfo *fx_info,const ChannelType channel, const ssize_t x,const ssize_t y,const char *expression,const size_t depth, double *beta,ExceptionInfo *exception) { #define FxMaxParenthesisDepth 58 #define FxMaxSubexpressionDepth 200 #define FxReturn(value) \ { \ subexpression=DestroyString(subexpression); \ return(value); \ } char *q, *subexpression; double alpha, gamma; register const char *p; *beta=0.0; subexpression=AcquireString(expression); *subexpression='\0'; if (depth > FxMaxSubexpressionDepth) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",expression); FxReturn(0.0); } if (exception->severity >= ErrorException) FxReturn(0.0); while (isspace((int) ((unsigned char) *expression)) != 0) expression++; if (*expression == '\0') FxReturn(0.0); p=FxOperatorPrecedence(expression,exception); if (p != (const char *) NULL) { (void) CopyMagickString(subexpression,expression,(size_t) (p-expression+1)); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); switch ((unsigned char) *p) { case '~': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) (~(size_t) *beta); FxReturn(*beta); } case '!': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta == 0.0 ? 1.0 : 0.0); } case '^': { *beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p, depth+1,beta,exception)); FxReturn(*beta); } case '*': case ExponentialNotation: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha*(*beta)); } case '/': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(PerceptibleReciprocal(*beta)*alpha); } case '%': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=fabs(floor((*beta)+0.5)); FxReturn(fmod(alpha,*beta)); } case '+': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha+(*beta)); } case '-': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha-(*beta)); } case LeftShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5)); FxReturn(*beta); } case RightShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5)); FxReturn(*beta); } case '<': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha < *beta ? 1.0 : 0.0); } case LessThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha <= *beta ? 1.0 : 0.0); } case '>': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha > *beta ? 1.0 : 0.0); } case GreaterThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha >= *beta ? 1.0 : 0.0); } case EqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0); } case NotEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0); } case '&': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5)); FxReturn(*beta); } case '|': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5)); FxReturn(*beta); } case LogicalAndOperator: { p++; if (alpha <= 0.0) { *beta=0.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case LogicalOrOperator: { p++; if (alpha > 0.0) { *beta=1.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case '?': { double gamma; (void) CopyMagickString(subexpression,++p,MaxTextExtent); q=subexpression; p=StringToken(":",&q); if (q == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } if (fabs(alpha) >= MagickEpsilon) gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); else gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q,depth+1,beta, exception); FxReturn(gamma); } case '=': { char numeric[MaxTextExtent]; q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); (void) FormatLocaleString(numeric,MaxTextExtent,"%.20g",(double) *beta); (void) DeleteNodeFromSplayTree(fx_info->symbols,subexpression); (void) AddValueToSplayTree(fx_info->symbols,ConstantString( subexpression),ConstantString(numeric)); FxReturn(*beta); } case ',': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha); } case ';': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta); } default: { gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1, beta,exception); FxReturn(gamma); } } } if (strchr("(",(int) *expression) != (char *) NULL) { if (depth >= FxMaxParenthesisDepth) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "ParenthesisNestedTooDeeply","`%s'",expression); (void) CopyMagickString(subexpression,expression+1,MaxTextExtent); if (strlen(subexpression) != 0) subexpression[strlen(subexpression)-1]='\0'; gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); FxReturn(gamma); } switch (*expression) { case '+': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(1.0*gamma); } case '-': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(-1.0*gamma); } case '~': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn((double) (~(size_t) (gamma+0.5))); } case 'A': case 'a': { if (IsFxFunction(expression,"abs",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(fabs(alpha)); } #if defined(MAGICKCORE_HAVE_ACOSH) if (IsFxFunction(expression,"acosh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(acosh(alpha)); } #endif if (IsFxFunction(expression,"acos",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(acos(alpha)); } #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"airy",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha); FxReturn(gamma*gamma); } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (IsFxFunction(expression,"asinh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(asinh(alpha)); } #endif if (IsFxFunction(expression,"asin",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(asin(alpha)); } if (IsFxFunction(expression,"alt",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0); } if (IsFxFunction(expression,"atan2",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atan2(alpha,*beta)); } #if defined(MAGICKCORE_HAVE_ATANH) if (IsFxFunction(expression,"atanh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atanh(alpha)); } #endif if (IsFxFunction(expression,"atan",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(atan(alpha)); } if (LocaleCompare(expression,"a") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'B': case 'b': { if (LocaleCompare(expression,"b") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'C': case 'c': { if (IsFxFunction(expression,"ceil",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(ceil(alpha)); } if (IsFxFunction(expression,"clamp",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha < 0.0) FxReturn(0.0); if (alpha > 1.0) FxReturn(1.0); FxReturn(alpha); } if (IsFxFunction(expression,"cosh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(cosh(alpha)); } if (IsFxFunction(expression,"cos",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(cos(alpha)); } if (LocaleCompare(expression,"c") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'D': case 'd': { if (IsFxFunction(expression,"debug",5) != MagickFalse) { const char *type; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (fx_info->images->colorspace == CMYKColorspace) switch (channel) { case CyanChannel: type="cyan"; break; case MagentaChannel: type="magenta"; break; case YellowChannel: type="yellow"; break; case OpacityChannel: type="opacity"; break; case BlackChannel: type="black"; break; default: type="unknown"; break; } else switch (channel) { case RedChannel: type="red"; break; case GreenChannel: type="green"; break; case BlueChannel: type="blue"; break; case OpacityChannel: type="opacity"; break; default: type="unknown"; break; } *subexpression='\0'; if (strlen(expression) > 6) (void) CopyMagickString(subexpression,expression+6,MaxTextExtent); if (strlen(subexpression) > 1) subexpression[strlen(subexpression)-1]='\0'; if (fx_info->file != (FILE *) NULL) (void) FormatLocaleFile(fx_info->file, "%s[%.20g,%.20g].%s: %s=%.*g\n",fx_info->images->filename, (double) x,(double) y,type,subexpression,GetMagickPrecision(), (double) alpha); FxReturn(0.0); } if (IsFxFunction(expression,"drc",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((alpha/(*beta*(alpha-1.0)+1.0))); } break; } case 'E': case 'e': { if (LocaleCompare(expression,"epsilon") == 0) FxReturn(MagickEpsilon); #if defined(MAGICKCORE_HAVE_ERF) if (IsFxFunction(expression,"erf",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(erf(alpha)); } #endif if (IsFxFunction(expression,"exp",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(exp(alpha)); } if (LocaleCompare(expression,"e") == 0) FxReturn(2.7182818284590452354); break; } case 'F': case 'f': { if (IsFxFunction(expression,"floor",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(floor(alpha)); } break; } case 'G': case 'g': { if (IsFxFunction(expression,"gauss",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); gamma=exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI); FxReturn(gamma); } if (IsFxFunction(expression,"gcd",3) != MagickFalse) { MagickOffsetType gcd; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+ 0.5)); FxReturn((double) gcd); } if (LocaleCompare(expression,"g") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'H': case 'h': { if (LocaleCompare(expression,"h") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (LocaleCompare(expression,"hue") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"hypot",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(hypot(alpha,*beta)); } break; } case 'K': case 'k': { if (LocaleCompare(expression,"k") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'I': case 'i': { if (LocaleCompare(expression,"intensity") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"int",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(floor(alpha)); } if (IsFxFunction(expression,"isnan",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn((double) !!IsNaN(alpha)); } if (LocaleCompare(expression,"i") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'J': case 'j': { if (LocaleCompare(expression,"j") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); #if defined(MAGICKCORE_HAVE_J0) if (IsFxFunction(expression,"j0",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j0(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"j1",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j1(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"jinc",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); gamma=(2.0*j1((MagickPI*alpha))/(MagickPI*alpha)); FxReturn(gamma); } #endif break; } case 'L': case 'l': { if (IsFxFunction(expression,"ln",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(log(alpha)); } if (IsFxFunction(expression,"logtwo",6) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn(log10(alpha)/log10(2.0)); } if (IsFxFunction(expression,"log",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(log10(alpha)); } if (LocaleCompare(expression,"lightness") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'M': case 'm': { if (LocaleCompare(expression,"MaxRGB") == 0) FxReturn((double) QuantumRange); if (LocaleNCompare(expression,"maxima",6) == 0) break; if (IsFxFunction(expression,"max",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha > *beta ? alpha : *beta); } if (LocaleNCompare(expression,"minima",6) == 0) break; if (IsFxFunction(expression,"min",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha < *beta ? alpha : *beta); } if (IsFxFunction(expression,"mod",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); gamma=alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta); FxReturn(gamma); } if (LocaleCompare(expression,"m") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'N': case 'n': { if (IsFxFunction(expression,"not",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((double) (alpha < MagickEpsilon)); } if (LocaleCompare(expression,"n") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'O': case 'o': { if (LocaleCompare(expression,"Opaque") == 0) FxReturn(1.0); if (LocaleCompare(expression,"o") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'P': case 'p': { if (LocaleCompare(expression,"phi") == 0) FxReturn(MagickPHI); if (LocaleCompare(expression,"pi") == 0) FxReturn(MagickPI); if (IsFxFunction(expression,"pow",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(pow(alpha,*beta)); } if (LocaleCompare(expression,"p") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Q': case 'q': { if (LocaleCompare(expression,"QuantumRange") == 0) FxReturn((double) QuantumRange); if (LocaleCompare(expression,"QuantumScale") == 0) FxReturn(QuantumScale); break; } case 'R': case 'r': { if (IsFxFunction(expression,"rand",4) != MagickFalse) { double alpha; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxEvaluateSubexpression) #endif alpha=GetPseudoRandomValue(fx_info->random_info); FxReturn(alpha); } if (IsFxFunction(expression,"round",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(floor(alpha+0.5)); } if (LocaleCompare(expression,"r") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'S': case 's': { if (LocaleCompare(expression,"saturation") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"sign",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(alpha < 0.0 ? -1.0 : 1.0); } if (IsFxFunction(expression,"sinc",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0) FxReturn(1.0); gamma=sin((MagickPI*alpha))/(MagickPI*alpha); FxReturn(gamma); } if (IsFxFunction(expression,"sinh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sinh(alpha)); } if (IsFxFunction(expression,"sin",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(sin(alpha)); } if (IsFxFunction(expression,"sqrt",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sqrt(alpha)); } if (IsFxFunction(expression,"squish",6) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn((1.0/(1.0+exp(-alpha)))); } if (LocaleCompare(expression,"s") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'T': case 't': { if (IsFxFunction(expression,"tanh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(tanh(alpha)); } if (IsFxFunction(expression,"tan",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(tan(alpha)); } if (LocaleCompare(expression,"Transparent") == 0) FxReturn(0.0); if (IsFxFunction(expression,"trunc",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha >= 0.0) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression,"t") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'U': case 'u': { if (LocaleCompare(expression,"u") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'V': case 'v': { if (LocaleCompare(expression,"v") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'W': case 'w': { if (IsFxFunction(expression,"while",5) != MagickFalse) { do { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); } while (fabs(alpha) >= MagickEpsilon); FxReturn(*beta); } if (LocaleCompare(expression,"w") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Y': case 'y': { if (LocaleCompare(expression,"y") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Z': case 'z': { if (LocaleCompare(expression,"z") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } default: break; } q=(char *) expression; alpha=InterpretSiPrefixValue(expression,&q); if (q == expression) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); FxReturn(alpha); } MagickExport MagickBooleanType FxEvaluateExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { MagickBooleanType status; status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception); return(status); } MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { FILE *file; MagickBooleanType status; file=fx_info->file; fx_info->file=(FILE *) NULL; status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception); fx_info->file=file; return(status); } MagickExport MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, const ChannelType channel,const ssize_t x,const ssize_t y,double *alpha, ExceptionInfo *exception) { double beta; beta=0.0; *alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0, &beta,exception); return(exception->severity == OptionError ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxImage() applies a mathematical expression to the specified image. % % The format of the FxImage method is: % % Image *FxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % Image *FxImageChannel(const Image *image,const ChannelType channel, % const char *expression,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o expression: A mathematical expression. % % o exception: return any errors or warnings in this structure. % */ static FxInfo **DestroyFxThreadSet(FxInfo **fx_info) { register ssize_t i; assert(fx_info != (FxInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (fx_info[i] != (FxInfo *) NULL) fx_info[i]=DestroyFxInfo(fx_info[i]); fx_info=(FxInfo **) RelinquishMagickMemory(fx_info); return(fx_info); } static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression, ExceptionInfo *exception) { char *fx_expression; double alpha; FxInfo **fx_info; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info)); if (fx_info == (FxInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return((FxInfo **) NULL); } (void) memset(fx_info,0,number_threads*sizeof(*fx_info)); if (*expression != '@') fx_expression=ConstantString(expression); else fx_expression=FileToString(expression+1,~0UL,exception); for (i=0; i < (ssize_t) number_threads; i++) { MagickBooleanType status; fx_info[i]=AcquireFxInfo(image,fx_expression); if (fx_info[i] == (FxInfo *) NULL) break; status=FxPreprocessExpression(fx_info[i],&alpha,exception); if (status == MagickFalse) break; } fx_expression=DestroyString(fx_expression); if (i < (ssize_t) number_threads) fx_info=DestroyFxThreadSet(fx_info); return(fx_info); } MagickExport Image *FxImage(const Image *image,const char *expression, ExceptionInfo *exception) { Image *fx_image; fx_image=FxImageChannel(image,GrayChannel,expression,exception); return(fx_image); } MagickExport Image *FxImageChannel(const Image *image,const ChannelType channel, const char *expression,ExceptionInfo *exception) { #define FxImageTag "Fx/Image" CacheView *fx_view; FxInfo **magick_restrict fx_info; Image *fx_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (expression == (const char *) NULL) return(CloneImage(image,0,0,MagickTrue,exception)); fx_info=AcquireFxThreadSet(image,expression,exception); if (fx_info == (FxInfo **) NULL) return((Image *) NULL); fx_image=CloneImage(image,0,0,MagickTrue,exception); if (fx_image == (Image *) NULL) { fx_info=DestroyFxThreadSet(fx_info); return((Image *) NULL); } if (SetImageStorageClass(fx_image,DirectClass) == MagickFalse) { InheritException(exception,&fx_image->exception); fx_info=DestroyFxThreadSet(fx_info); fx_image=DestroyImage(fx_image); return((Image *) NULL); } /* Fx image. */ status=MagickTrue; progress=0; fx_view=AcquireAuthenticCacheView(fx_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,fx_image,fx_image->rows,1) #endif for (y=0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); double alpha; register IndexPacket *magick_restrict fx_indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } fx_indexes=GetCacheViewAuthenticIndexQueue(fx_view); alpha=0.0; for (x=0; x < (ssize_t) fx_image->columns; x++) { if ((channel & RedChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],RedChannel,x,y, &alpha,exception); SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*alpha)); } if ((channel & GreenChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],GreenChannel,x,y, &alpha,exception); SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*alpha)); } if ((channel & BlueChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],BlueChannel,x,y, &alpha,exception); SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*alpha)); } if ((channel & OpacityChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],OpacityChannel,x,y, &alpha,exception); if (image->matte == MagickFalse) SetPixelOpacity(q,ClampToQuantum((MagickRealType) QuantumRange* alpha)); else SetPixelOpacity(q,ClampToQuantum((MagickRealType) (QuantumRange- QuantumRange*alpha))); } if (((channel & IndexChannel) != 0) && (fx_image->colorspace == CMYKColorspace)) { (void) FxEvaluateChannelExpression(fx_info[id],IndexChannel,x,y, &alpha,exception); SetPixelIndex(fx_indexes+x,ClampToQuantum((MagickRealType) QuantumRange*alpha)); } q++; } if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FxImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } fx_view=DestroyCacheView(fx_view); fx_info=DestroyFxThreadSet(fx_info); if (status == MagickFalse) fx_image=DestroyImage(fx_image); return(fx_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I m p l o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ImplodeImage() creates a new image that is a copy of an existing % one with the image pixels "implode" by the specified percentage. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ImplodeImage method is: % % Image *ImplodeImage(const Image *image,const double amount, % ExceptionInfo *exception) % % A description of each parameter follows: % % o implode_image: Method ImplodeImage returns a pointer to the image % after it is implode. A null image is returned if there is a memory % shortage. % % o image: the image. % % o amount: Define the extent of the implosion. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ImplodeImage(const Image *image,const double amount, ExceptionInfo *exception) { #define ImplodeImageTag "Implode/Image" CacheView *image_view, *implode_view; double radius; Image *implode_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; PointInfo center, scale; ssize_t y; /* Initialize implode image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); implode_image=CloneImage(image,0,0,MagickTrue,exception); if (implode_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(implode_image,DirectClass) == MagickFalse) { InheritException(exception,&implode_image->exception); implode_image=DestroyImage(implode_image); return((Image *) NULL); } if (implode_image->background_color.opacity != OpaqueOpacity) implode_image->matte=MagickTrue; /* Compute scaling factor. */ scale.x=1.0; scale.y=1.0; center.x=0.5*image->columns; center.y=0.5*image->rows; radius=center.x; if (image->columns > image->rows) scale.y=(double) image->columns/(double) image->rows; else if (image->columns < image->rows) { scale.x=(double) image->rows/(double) image->columns; radius=center.y; } /* Implode image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(implode_image,&zero); image_view=AcquireVirtualCacheView(image,exception); implode_view=AcquireAuthenticCacheView(implode_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,implode_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double distance; MagickPixelPacket pixel; PointInfo delta; register IndexPacket *magick_restrict implode_indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } implode_indexes=GetCacheViewAuthenticIndexQueue(implode_view); delta.y=scale.y*(double) (y-center.y); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { /* Determine if the pixel is within an ellipse. */ delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance < (radius*radius)) { double factor; /* Implode the pixel. */ factor=1.0; if (distance > 0.0) factor=pow(sin((double) (MagickPI*sqrt((double) distance)/ radius/2)),-amount); status=InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) (factor*delta.x/scale.x+ center.x),(double) (factor*delta.y/scale.y+center.y),&pixel, exception); if (status == MagickFalse) break; SetPixelPacket(implode_image,&pixel,q,implode_indexes+x); } q++; } if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ImplodeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } implode_view=DestroyCacheView(implode_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) implode_image=DestroyImage(implode_image); return(implode_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The MorphImages() method requires a minimum of two images. The first % image is transformed into the second by a number of intervening images % as specified by frames. % % The format of the MorphImage method is: % % Image *MorphImages(const Image *image,const size_t number_frames, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_frames: Define the number of in-between image to generate. % The more in-between frames, the smoother the morph. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphImages(const Image *image, const size_t number_frames,ExceptionInfo *exception) { #define MorphImageTag "Morph/Image" double alpha, beta; Image *morph_image, *morph_images; MagickBooleanType status; MagickOffsetType scene; register const Image *next; register ssize_t i; ssize_t y; /* Clone first frame in sequence. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); morph_images=CloneImage(image,0,0,MagickTrue,exception); if (morph_images == (Image *) NULL) return((Image *) NULL); if (GetNextImageInList(image) == (Image *) NULL) { /* Morph single image. */ for (i=1; i < (ssize_t) number_frames; i++) { morph_image=CloneImage(image,0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) i, number_frames); if (proceed == MagickFalse) status=MagickFalse; } } return(GetFirstImageInList(morph_images)); } /* Morph image sequence. */ status=MagickTrue; scene=0; next=image; for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next)) { for (i=0; i < (ssize_t) number_frames; i++) { CacheView *image_view, *morph_view; beta=(double) (i+1.0)/(double) (number_frames+1.0); alpha=1.0-beta; morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta* GetNextImageInList(next)->columns+0.5),(size_t) (alpha* next->rows+beta*GetNextImageInList(next)->rows+0.5), next->filter,next->blur,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } if (SetImageStorageClass(morph_image,DirectClass) == MagickFalse) { InheritException(exception,&morph_image->exception); morph_image=DestroyImage(morph_image); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns, morph_images->rows,GetNextImageInList(next)->filter, GetNextImageInList(next)->blur,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } image_view=AcquireVirtualCacheView(morph_image,exception); morph_view=AcquireAuthenticCacheView(morph_images,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(morph_image,morph_image,morph_image->rows,1) #endif for (y=0; y < (ssize_t) morph_images->rows; y++) { MagickBooleanType sync; register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1, exception); q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) morph_images->columns; x++) { SetPixelRed(q,ClampToQuantum(alpha* GetPixelRed(q)+beta*GetPixelRed(p))); SetPixelGreen(q,ClampToQuantum(alpha* GetPixelGreen(q)+beta*GetPixelGreen(p))); SetPixelBlue(q,ClampToQuantum(alpha* GetPixelBlue(q)+beta*GetPixelBlue(p))); SetPixelOpacity(q,ClampToQuantum(alpha* GetPixelOpacity(q)+beta*GetPixelOpacity(p))); p++; q++; } sync=SyncCacheViewAuthenticPixels(morph_view,exception); if (sync == MagickFalse) status=MagickFalse; } morph_view=DestroyCacheView(morph_view); image_view=DestroyCacheView(image_view); morph_image=DestroyImage(morph_image); } if (i < (ssize_t) number_frames) break; /* Clone last frame in sequence. */ morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphImageTag,scene, GetImageListLength(image)); if (proceed == MagickFalse) status=MagickFalse; } scene++; } if (GetNextImageInList(next) != (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } return(GetFirstImageInList(morph_images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P l a s m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PlasmaImage() initializes an image with plasma fractal values. The image % must be initialized with a base color and the random number generator % seeded before this method is called. % % The format of the PlasmaImage method is: % % MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment, % size_t attenuate,size_t depth) % % A description of each parameter follows: % % o image: the image. % % o segment: Define the region to apply plasma fractals values. % % o attenuate: Define the plasma attenuation factor. % % o depth: Limit the plasma recursion depth. % */ static inline Quantum PlasmaPixel(RandomInfo *random_info, const MagickRealType pixel,const double noise) { Quantum plasma; plasma=ClampToQuantum(pixel+noise*GetPseudoRandomValue(random_info)- noise/2.0); if (plasma <= 0) return((Quantum) 0); if (plasma >= QuantumRange) return(QuantumRange); return(plasma); } MagickExport MagickBooleanType PlasmaImageProxy(Image *image, CacheView *image_view,CacheView *u_view,CacheView *v_view, RandomInfo *random_info,const SegmentInfo *segment,size_t attenuate, size_t depth) { ExceptionInfo *exception; double plasma; PixelPacket u, v; ssize_t x, x_mid, y, y_mid; if ((fabs(segment->x2-segment->x1) <= MagickEpsilon) && (fabs(segment->y2-segment->y1) <= MagickEpsilon)) return(MagickTrue); if (depth != 0) { MagickBooleanType status; SegmentInfo local_info; /* Divide the area into quadrants and recurse. */ depth--; attenuate++; x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5); y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5); local_info=(*segment); local_info.x2=(double) x_mid; local_info.y2=(double) y_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth); local_info=(*segment); local_info.y1=(double) y_mid; local_info.x2=(double) x_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y2=(double) y_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y1=(double) y_mid; status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth); return(status); } x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5); y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5); if ((fabs(segment->x1-x_mid) < MagickEpsilon) && (fabs(segment->x2-x_mid) < MagickEpsilon) && (fabs(segment->y1-y_mid) < MagickEpsilon) && (fabs(segment->y2-y_mid) < MagickEpsilon)) return(MagickFalse); /* Average pixels and apply plasma. */ exception=(&image->exception); plasma=(double) QuantumRange/(2.0*attenuate); if ((fabs(segment->x1-x_mid) > MagickEpsilon) || (fabs(segment->x2-x_mid) > MagickEpsilon)) { register PixelPacket *magick_restrict q; /* Left pixel. */ x=(ssize_t) ceil(segment->x1-0.5); (void) GetOneCacheViewVirtualPixel(u_view,x,(ssize_t) ceil(segment->y1-0.5),&u,exception); (void) GetOneCacheViewVirtualPixel(v_view,x,(ssize_t) ceil(segment->y2-0.5),&v,exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickTrue); SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0, plasma)); SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+ v.green)/2.0,plasma)); SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/ 2.0,plasma)); (void) SyncCacheViewAuthenticPixels(image_view,exception); if (fabs(segment->x1-segment->x2) > MagickEpsilon) { /* Right pixel. */ x=(ssize_t) ceil(segment->x2-0.5); (void) GetOneCacheViewVirtualPixel(u_view,x,(ssize_t) ceil(segment->y1-0.5),&u,exception); (void) GetOneCacheViewVirtualPixel(v_view,x,(ssize_t) ceil(segment->y2-0.5),&v,exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickTrue); SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/ 2.0,plasma)); SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+ v.green)/2.0,plasma)); SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+ v.blue)/2.0,plasma)); (void) SyncCacheViewAuthenticPixels(image_view,exception); } } if ((fabs(segment->y1-y_mid) > MagickEpsilon) || (fabs(segment->y2-y_mid) > MagickEpsilon)) { if ((fabs(segment->x1-x_mid) > MagickEpsilon) || (fabs(segment->y2-y_mid) > MagickEpsilon)) { register PixelPacket *magick_restrict q; /* Bottom pixel. */ y=(ssize_t) ceil(segment->y2-0.5); (void) GetOneCacheViewVirtualPixel(u_view,(ssize_t) ceil(segment->x1-0.5),y,&u,exception); (void) GetOneCacheViewVirtualPixel(v_view,(ssize_t) ceil(segment->x2-0.5),y,&v,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickTrue); SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/ 2.0,plasma)); SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+ v.green)/2.0,plasma)); SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+ v.blue)/2.0,plasma)); (void) SyncCacheViewAuthenticPixels(image_view,exception); } if (fabs(segment->y1-segment->y2) > MagickEpsilon) { register PixelPacket *magick_restrict q; /* Top pixel. */ y=(ssize_t) ceil(segment->y1-0.5); (void) GetOneCacheViewVirtualPixel(u_view,(ssize_t) ceil(segment->x1-0.5),y,&u,exception); (void) GetOneCacheViewVirtualPixel(v_view,(ssize_t) ceil(segment->x2-0.5),y,&v,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickTrue); SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+ v.red)/2.0,plasma)); SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+ v.green)/2.0,plasma)); SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+ v.blue)/2.0,plasma)); (void) SyncCacheViewAuthenticPixels(image_view,exception); } } if ((fabs(segment->x1-segment->x2) > MagickEpsilon) || (fabs(segment->y1-segment->y2) > MagickEpsilon)) { register PixelPacket *magick_restrict q; /* Middle pixel. */ x=(ssize_t) ceil(segment->x1-0.5); y=(ssize_t) ceil(segment->y1-0.5); (void) GetOneCacheViewVirtualPixel(u_view,x,y,&u,exception); x=(ssize_t) ceil(segment->x2-0.5); y=(ssize_t) ceil(segment->y2-0.5); (void) GetOneCacheViewVirtualPixel(v_view,x,y,&v,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickTrue); SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0, plasma)); SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+ v.green)/2.0,plasma)); SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/ 2.0,plasma)); (void) SyncCacheViewAuthenticPixels(image_view,exception); } if ((fabs(segment->x2-segment->x1) < 3.0) && (fabs(segment->y2-segment->y1) < 3.0)) return(MagickTrue); return(MagickFalse); } MagickExport MagickBooleanType PlasmaImage(Image *image, const SegmentInfo *segment,size_t attenuate,size_t depth) { CacheView *image_view, *u_view, *v_view; MagickBooleanType status; RandomInfo *random_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,&image->exception); u_view=AcquireVirtualCacheView(image,&image->exception); v_view=AcquireVirtualCacheView(image,&image->exception); random_info=AcquireRandomInfo(); status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment, attenuate,depth); random_info=DestroyRandomInfo(random_info); v_view=DestroyCacheView(v_view); u_view=DestroyCacheView(u_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o l a r o i d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PolaroidImage() simulates a Polaroid picture. % % The format of the AnnotateImage method is: % % Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, % const double angle,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, const double angle,ExceptionInfo *exception) { const char *value; Image *bend_image, *caption_image, *flop_image, *picture_image, *polaroid_image, *rotate_image, *trim_image; size_t height; ssize_t quantum; /* Simulate a Polaroid picture. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double) image->rows)/25.0,10.0); height=image->rows+2*quantum; caption_image=(Image *) NULL; value=GetImageProperty(image,"Caption"); if (value != (const char *) NULL) { char *caption; /* Generate caption image. */ caption_image=CloneImage(image,image->columns,1,MagickTrue,exception); if (caption_image == (Image *) NULL) return((Image *) NULL); caption=InterpretImageProperties((ImageInfo *) NULL,(Image *) image, value); if (caption != (char *) NULL) { char geometry[MaxTextExtent]; DrawInfo *annotate_info; MagickBooleanType status; ssize_t count; TypeMetric metrics; annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info); (void) CloneString(&annotate_info->text,caption); count=FormatMagickCaption(caption_image,annotate_info,MagickTrue, &metrics,&caption); status=SetImageExtent(caption_image,image->columns,(size_t) ((count+1)*(metrics.ascent-metrics.descent)+0.5)); if (status == MagickFalse) caption_image=DestroyImage(caption_image); else { caption_image->background_color=image->border_color; (void) SetImageBackgroundColor(caption_image); (void) CloneString(&annotate_info->text,caption); (void) FormatLocaleString(geometry,MaxTextExtent,"+0+%.20g", metrics.ascent); if (annotate_info->gravity == UndefinedGravity) (void) CloneString(&annotate_info->geometry,AcquireString( geometry)); (void) AnnotateImage(caption_image,annotate_info); height+=caption_image->rows; } annotate_info=DestroyDrawInfo(annotate_info); caption=DestroyString(caption); } } picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue, exception); if (picture_image == (Image *) NULL) { if (caption_image != (Image *) NULL) caption_image=DestroyImage(caption_image); return((Image *) NULL); } picture_image->background_color=image->border_color; (void) SetImageBackgroundColor(picture_image); (void) CompositeImage(picture_image,OverCompositeOp,image,quantum,quantum); if (caption_image != (Image *) NULL) { (void) CompositeImage(picture_image,OverCompositeOp,caption_image, quantum,(ssize_t) (image->rows+3*quantum/2)); caption_image=DestroyImage(caption_image); } (void) QueryColorDatabase("none",&picture_image->background_color,exception); (void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel); rotate_image=RotateImage(picture_image,90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0* picture_image->columns,exception); picture_image=DestroyImage(picture_image); if (bend_image == (Image *) NULL) return((Image *) NULL); InheritException(&bend_image->exception,exception); picture_image=bend_image; rotate_image=RotateImage(picture_image,-90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; picture_image->background_color=image->background_color; polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3, exception); if (polaroid_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } flop_image=FlopImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (flop_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } polaroid_image=flop_image; (void) CompositeImage(polaroid_image,OverCompositeOp,picture_image, (ssize_t) (-0.01*picture_image->columns/2.0),0L); picture_image=DestroyImage(picture_image); (void) QueryColorDatabase("none",&polaroid_image->background_color,exception); rotate_image=RotateImage(polaroid_image,angle,exception); polaroid_image=DestroyImage(polaroid_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); polaroid_image=rotate_image; trim_image=TrimImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (trim_image == (Image *) NULL) return((Image *) NULL); polaroid_image=trim_image; return(polaroid_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p i a T o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSepiaToneImage() applies a special effect to the image, similar to the % effect achieved in a photo darkroom by sepia toning. Threshold ranges from % 0 to QuantumRange and is a measure of the extent of the sepia toning. A % threshold of 80% is a good starting point for a reasonable tone. % % The format of the SepiaToneImage method is: % % Image *SepiaToneImage(const Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: the tone threshold. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SepiaToneImage(const Image *image,const double threshold, ExceptionInfo *exception) { #define SepiaToneImageTag "SepiaTone/Image" CacheView *image_view, *sepia_view; Image *sepia_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize sepia-toned image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sepia_image=CloneImage(image,0,0,MagickTrue,exception); if (sepia_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(sepia_image,DirectClass) == MagickFalse) { InheritException(exception,&sepia_image->exception); sepia_image=DestroyImage(sepia_image); return((Image *) NULL); } /* Tone each row of the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); sepia_view=AcquireAuthenticCacheView(sepia_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sepia_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity, tone; intensity=GetPixelIntensity(image,p); tone=intensity > threshold ? (double) QuantumRange : intensity+ (double) QuantumRange-threshold; SetPixelRed(q,ClampToQuantum(tone)); tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange : intensity+(double) QuantumRange-7.0*threshold/6.0; SetPixelGreen(q,ClampToQuantum(tone)); tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0; SetPixelBlue(q,ClampToQuantum(tone)); tone=threshold/7.0; if ((double) GetPixelGreen(q) < tone) SetPixelGreen(q,ClampToQuantum(tone)); if ((double) GetPixelBlue(q) < tone) SetPixelBlue(q,ClampToQuantum(tone)); SetPixelOpacity(q,GetPixelOpacity(p)); p++; q++; } if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SepiaToneImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sepia_view=DestroyCacheView(sepia_view); image_view=DestroyCacheView(image_view); (void) NormalizeImage(sepia_image); (void) ContrastImage(sepia_image,MagickTrue); if (status == MagickFalse) sepia_image=DestroyImage(sepia_image); return(sepia_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d o w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadowImage() simulates a shadow from the specified image and returns it. % % The format of the ShadowImage method is: % % Image *ShadowImage(const Image *image,const double opacity, % const double sigma,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o opacity: percentage transparency. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x_offset: the shadow x-offset. % % o y_offset: the shadow y-offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadowImage(const Image *image,const double opacity, const double sigma,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define ShadowImageTag "Shadow/Image" CacheView *image_view; Image *border_image, *clone_image, *shadow_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo border_info; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(clone_image,sRGBColorspace); (void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod); clone_image->compose=OverCompositeOp; border_info.width=(size_t) floor(2.0*sigma+0.5); border_info.height=(size_t) floor(2.0*sigma+0.5); border_info.x=0; border_info.y=0; (void) QueryColorDatabase("none",&clone_image->border_color,exception); border_image=BorderImage(clone_image,&border_info,exception); clone_image=DestroyImage(clone_image); if (border_image == (Image *) NULL) return((Image *) NULL); if (border_image->matte == MagickFalse) (void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel); /* Shadow image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(border_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(border_image,border_image,border_image->rows,1) #endif for (y=0; y < (ssize_t) border_image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) border_image->columns; x++) { SetPixelRed(q,border_image->background_color.red); SetPixelGreen(q,border_image->background_color.green); SetPixelBlue(q,border_image->background_color.blue); if (border_image->matte == MagickFalse) SetPixelOpacity(q,border_image->background_color.opacity); else SetPixelOpacity(q,ClampToQuantum((double) (QuantumRange- GetPixelAlpha(q)*opacity/100.0))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ShadowImageTag,progress, border_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); shadow_image=BlurImageChannel(border_image,AlphaChannel,0.0,sigma,exception); border_image=DestroyImage(border_image); if (shadow_image == (Image *) NULL) return((Image *) NULL); if (shadow_image->page.width == 0) shadow_image->page.width=shadow_image->columns; if (shadow_image->page.height == 0) shadow_image->page.height=shadow_image->rows; shadow_image->page.width+=x_offset-(ssize_t) border_info.width; shadow_image->page.height+=y_offset-(ssize_t) border_info.height; shadow_image->page.x+=x_offset-(ssize_t) border_info.width; shadow_image->page.y+=y_offset-(ssize_t) border_info.height; return(shadow_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S k e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SketchImage() simulates a pencil sketch. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SketchImage() selects a suitable radius for you. Angle gives the angle % of the sketch. % % The format of the SketchImage method is: % % Image *SketchImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting % the center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SketchImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { CacheView *random_view; Image *blend_image, *blur_image, *dodge_image, *random_image, *sketch_image; MagickBooleanType status; MagickPixelPacket zero; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Sketch image. */ random_image=CloneImage(image,image->columns << 1,image->rows << 1, MagickTrue,exception); if (random_image == (Image *) NULL) return((Image *) NULL); status=MagickTrue; GetMagickPixelPacket(random_image,&zero); random_info=AcquireRandomInfoThreadSet(); random_view=AcquireAuthenticCacheView(random_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(random_image,random_image,random_image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) random_image->rows; y++) { const int id = GetOpenMPThreadId(); MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(random_view); pixel=zero; for (x=0; x < (ssize_t) random_image->columns; x++) { pixel.red=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); pixel.green=pixel.red; pixel.blue=pixel.red; if (image->colorspace == CMYKColorspace) pixel.index=pixel.red; SetPixelPacket(random_image,&pixel,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse) status=MagickFalse; } random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) { random_view=DestroyCacheView(random_view); random_image=DestroyImage(random_image); return(random_image); } random_view=DestroyCacheView(random_view); blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception); random_image=DestroyImage(random_image); if (blur_image == (Image *) NULL) return((Image *) NULL); dodge_image=EdgeImage(blur_image,radius,exception); blur_image=DestroyImage(blur_image); if (dodge_image == (Image *) NULL) return((Image *) NULL); status=ClampImage(dodge_image); if (status != MagickFalse) status=NormalizeImage(dodge_image); if (status != MagickFalse) status=NegateImage(dodge_image,MagickFalse); if (status != MagickFalse) status=TransformImage(&dodge_image,(char *) NULL,"50%"); sketch_image=CloneImage(image,0,0,MagickTrue,exception); if (sketch_image == (Image *) NULL) { dodge_image=DestroyImage(dodge_image); return((Image *) NULL); } (void) CompositeImage(sketch_image,ColorDodgeCompositeOp,dodge_image,0,0); dodge_image=DestroyImage(dodge_image); blend_image=CloneImage(image,0,0,MagickTrue,exception); if (blend_image == (Image *) NULL) { sketch_image=DestroyImage(sketch_image); return((Image *) NULL); } (void) SetImageArtifact(blend_image,"compose:args","20x80"); (void) CompositeImage(sketch_image,BlendCompositeOp,blend_image,0,0); blend_image=DestroyImage(blend_image); return(sketch_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S o l a r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SolarizeImage() applies a special effect to the image, similar to the effect % achieved in a photo darkroom by selectively exposing areas of photo % sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a % measure of the extent of the solarization. % % The format of the SolarizeImage method is: % % MagickBooleanType SolarizeImage(Image *image,const double threshold) % MagickBooleanType SolarizeImageChannel(Image *image, % const ChannelType channel,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o threshold: Define the extent of the solarization. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SolarizeImage(Image *image, const double threshold) { MagickBooleanType status; status=SolarizeImageChannel(image,DefaultChannels,threshold, &image->exception); return(status); } MagickExport MagickBooleanType SolarizeImageChannel(Image *image, const ChannelType channel,const double threshold,ExceptionInfo *exception) { #define SolarizeImageTag "Solarize/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); if (image->storage_class == PseudoClass) { register ssize_t i; /* Solarize colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) if ((double) image->colormap[i].red > threshold) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((channel & GreenChannel) != 0) if ((double) image->colormap[i].green > threshold) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((channel & BlueChannel) != 0) if ((double) image->colormap[i].blue > threshold) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } } /* Solarize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) if ((double) GetPixelRed(q) > threshold) SetPixelRed(q,QuantumRange-GetPixelRed(q)); if ((channel & GreenChannel) != 0) if ((double) GetPixelGreen(q) > threshold) SetPixelGreen(q,QuantumRange-GetPixelGreen(q)); if ((channel & BlueChannel) != 0) if ((double) GetPixelBlue(q) > threshold) SetPixelBlue(q,QuantumRange-GetPixelBlue(q)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SolarizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e g a n o I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SteganoImage() hides a digital watermark within the image. Recover % the hidden watermark later to prove that the authenticity of an image. % Offset defines the start position within the image to hide the watermark. % % The format of the SteganoImage method is: % % Image *SteganoImage(const Image *image,Image *watermark, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o watermark: the watermark image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SteganoImage(const Image *image,const Image *watermark, ExceptionInfo *exception) { #define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0) #define SetBit(alpha,i,set) (alpha)=(Quantum) ((set) != 0 ? (size_t) (alpha) \ | (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i))) #define SteganoImageTag "Stegano/Image" CacheView *stegano_view, *watermark_view; Image *stegano_image; int c; MagickBooleanType status; PixelPacket pixel; register PixelPacket *q; register ssize_t x; size_t depth, one; ssize_t i, j, k, y; /* Initialize steganographic image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(watermark != (const Image *) NULL); assert(watermark->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); one=1UL; stegano_image=CloneImage(image,0,0,MagickTrue,exception); if (stegano_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(stegano_image,DirectClass) == MagickFalse) { InheritException(exception,&stegano_image->exception); stegano_image=DestroyImage(stegano_image); return((Image *) NULL); } stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH; /* Hide watermark in low-order bits of image. */ c=0; i=0; j=0; depth=stegano_image->depth; k=image->offset; status=MagickTrue; watermark_view=AcquireVirtualCacheView(watermark,exception); stegano_view=AcquireAuthenticCacheView(stegano_image,exception); for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--) { for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++) { for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++) { (void) GetOneCacheViewVirtualPixel(watermark_view,x,y,&pixel,exception); if ((k/(ssize_t) stegano_image->columns) >= (ssize_t) stegano_image->rows) break; q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t) stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1, exception); if (q == (PixelPacket *) NULL) break; switch (c) { case 0: { SetBit(GetPixelRed(q),j,GetBit(ClampToQuantum(GetPixelIntensity( image,&pixel)),i)); break; } case 1: { SetBit(GetPixelGreen(q),j,GetBit(ClampToQuantum(GetPixelIntensity( image,&pixel)),i)); break; } case 2: { SetBit(GetPixelBlue(q),j,GetBit(ClampToQuantum(GetPixelIntensity( image,&pixel)),i)); break; } } if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse) break; c++; if (c == 3) c=0; k++; if (k == (ssize_t) (stegano_image->columns*stegano_image->columns)) k=0; if (k == image->offset) j++; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType) (depth-i),depth); if (proceed == MagickFalse) status=MagickFalse; } } stegano_view=DestroyCacheView(stegano_view); watermark_view=DestroyCacheView(watermark_view); if (stegano_image->storage_class == PseudoClass) (void) SyncImage(stegano_image); if (status == MagickFalse) stegano_image=DestroyImage(stegano_image); return(stegano_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e r e o A n a g l y p h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StereoAnaglyphImage() combines two images and produces a single image that % is the composite of a left and right image of a stereo pair. Special % red-green stereo glasses are required to view this effect. % % The format of the StereoAnaglyphImage method is: % % Image *StereoImage(const Image *left_image,const Image *right_image, % ExceptionInfo *exception) % Image *StereoAnaglyphImage(const Image *left_image, % const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o left_image: the left image. % % o right_image: the right image. % % o exception: return any errors or warnings in this structure. % % o x_offset: amount, in pixels, by which the left image is offset to the % right of the right image. % % o y_offset: amount, in pixels, by which the left image is offset to the % bottom of the right image. % % */ MagickExport Image *StereoImage(const Image *left_image, const Image *right_image,ExceptionInfo *exception) { return(StereoAnaglyphImage(left_image,right_image,0,0,exception)); } MagickExport Image *StereoAnaglyphImage(const Image *left_image, const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define StereoImageTag "Stereo/Image" const Image *image; Image *stereo_image; MagickBooleanType status; ssize_t y; assert(left_image != (const Image *) NULL); assert(left_image->signature == MagickCoreSignature); if (left_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", left_image->filename); assert(right_image != (const Image *) NULL); assert(right_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=left_image; if ((left_image->columns != right_image->columns) || (left_image->rows != right_image->rows)) ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer"); /* Initialize stereo image attributes. */ stereo_image=CloneImage(left_image,left_image->columns,left_image->rows, MagickTrue,exception); if (stereo_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(stereo_image,DirectClass) == MagickFalse) { InheritException(exception,&stereo_image->exception); stereo_image=DestroyImage(stereo_image); return((Image *) NULL); } (void) SetImageColorspace(stereo_image,sRGBColorspace); /* Copy left image to red channel and right image to blue channel. */ status=MagickTrue; for (y=0; y < (ssize_t) stereo_image->rows; y++) { register const PixelPacket *magick_restrict p, *magick_restrict q; register ssize_t x; register PixelPacket *magick_restrict r; p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1, exception); q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception); r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) stereo_image->columns; x++) { SetPixelRed(r,GetPixelRed(p)); SetPixelGreen(r,GetPixelGreen(q)); SetPixelBlue(r,GetPixelBlue(q)); SetPixelOpacity(r,(GetPixelOpacity(p)+q->opacity)/2); p++; q++; r++; } if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse) break; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y, stereo_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } if (status == MagickFalse) stereo_image=DestroyImage(stereo_image); return(stereo_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S w i r l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SwirlImage() swirls the pixels about the center of the image, where % degrees indicates the sweep of the arc through which each pixel is moved. % You get a more dramatic effect as the degrees move from 1 to 360. % % The format of the SwirlImage method is: % % Image *SwirlImage(const Image *image,double degrees, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o degrees: Define the tightness of the swirling effect. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SwirlImage(const Image *image,double degrees, ExceptionInfo *exception) { #define SwirlImageTag "Swirl/Image" CacheView *image_view, *swirl_view; double radius; Image *swirl_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; PointInfo center, scale; ssize_t y; /* Initialize swirl image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); swirl_image=CloneImage(image,0,0,MagickTrue,exception); if (swirl_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(swirl_image,DirectClass) == MagickFalse) { InheritException(exception,&swirl_image->exception); swirl_image=DestroyImage(swirl_image); return((Image *) NULL); } if (swirl_image->background_color.opacity != OpaqueOpacity) swirl_image->matte=MagickTrue; /* Compute scaling factor. */ center.x=(double) image->columns/2.0; center.y=(double) image->rows/2.0; radius=MagickMax(center.x,center.y); scale.x=1.0; scale.y=1.0; if (image->columns > image->rows) scale.y=(double) image->columns/(double) image->rows; else if (image->columns < image->rows) scale.x=(double) image->rows/(double) image->columns; degrees=(double) DegreesToRadians(degrees); /* Swirl image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(swirl_image,&zero); image_view=AcquireVirtualCacheView(image,exception); swirl_view=AcquireAuthenticCacheView(swirl_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,swirl_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double distance; MagickPixelPacket pixel; PointInfo delta; register IndexPacket *magick_restrict swirl_indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } swirl_indexes=GetCacheViewAuthenticIndexQueue(swirl_view); delta.y=scale.y*(double) (y-center.y); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { /* Determine if the pixel is within an ellipse. */ delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance < (radius*radius)) { double cosine, factor, sine; /* Swirl the pixel. */ factor=1.0-sqrt(distance)/radius; sine=sin((double) (degrees*factor*factor)); cosine=cos((double) (degrees*factor*factor)); status=InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) ((cosine*delta.x-sine*delta.y)/ scale.x+center.x),(double) ((sine*delta.x+cosine*delta.y)/scale.y+ center.y),&pixel,exception); if (status == MagickFalse) break; SetPixelPacket(swirl_image,&pixel,q,swirl_indexes+x); } q++; } if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SwirlImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } swirl_view=DestroyCacheView(swirl_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) swirl_image=DestroyImage(swirl_image); return(swirl_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TintImage() applies a color vector to each pixel in the image. The length % of the vector is 0 for black and white and at its maximum for the midtones. % The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5)))) % % The format of the TintImage method is: % % Image *TintImage(const Image *image,const char *opacity, % const PixelPacket tint,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o opacity: A color value used for tinting. % % o tint: A color value used for tinting. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TintImage(const Image *image,const char *opacity, const PixelPacket tint,ExceptionInfo *exception) { #define TintImageTag "Tint/Image" CacheView *image_view, *tint_view; GeometryInfo geometry_info; Image *tint_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket color_vector, pixel; MagickStatusType flags; ssize_t y; /* Allocate tint image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); tint_image=CloneImage(image,0,0,MagickTrue,exception); if (tint_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(tint_image,DirectClass) == MagickFalse) { InheritException(exception,&tint_image->exception); tint_image=DestroyImage(tint_image); return((Image *) NULL); } if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsPixelGray(&tint) == MagickFalse)) (void) SetImageColorspace(tint_image,sRGBColorspace); if (opacity == (const char *) NULL) return(tint_image); /* Determine RGB values of the tint color. */ flags=ParseGeometry(opacity,&geometry_info); pixel.red=geometry_info.rho; pixel.green=geometry_info.rho; pixel.blue=geometry_info.rho; pixel.opacity=(MagickRealType) OpaqueOpacity; if ((flags & SigmaValue) != 0) pixel.green=geometry_info.sigma; if ((flags & XiValue) != 0) pixel.blue=geometry_info.xi; if ((flags & PsiValue) != 0) pixel.opacity=geometry_info.psi; color_vector.red=(MagickRealType) (pixel.red*tint.red/100.0- PixelPacketIntensity(&tint)); color_vector.green=(MagickRealType) (pixel.green*tint.green/100.0- PixelPacketIntensity(&tint)); color_vector.blue=(MagickRealType) (pixel.blue*tint.blue/100.0- PixelPacketIntensity(&tint)); /* Tint image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); tint_view=AcquireAuthenticCacheView(tint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,tint_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double weight; MagickPixelPacket pixel; weight=QuantumScale*GetPixelRed(p)-0.5; pixel.red=(MagickRealType) GetPixelRed(p)+color_vector.red*(1.0-(4.0* (weight*weight))); SetPixelRed(q,ClampToQuantum(pixel.red)); weight=QuantumScale*GetPixelGreen(p)-0.5; pixel.green=(MagickRealType) GetPixelGreen(p)+color_vector.green*(1.0- (4.0*(weight*weight))); SetPixelGreen(q,ClampToQuantum(pixel.green)); weight=QuantumScale*GetPixelBlue(p)-0.5; pixel.blue=(MagickRealType) GetPixelBlue(p)+color_vector.blue*(1.0-(4.0* (weight*weight))); SetPixelBlue(q,ClampToQuantum(pixel.blue)); SetPixelOpacity(q,GetPixelOpacity(p)); p++; q++; } if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TintImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } tint_view=DestroyCacheView(tint_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) tint_image=DestroyImage(tint_image); return(tint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % V i g n e t t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % VignetteImage() softens the edges of the image in vignette style. % % The format of the VignetteImage method is: % % Image *VignetteImage(const Image *image,const double radius, % const double sigma,const ssize_t x,const ssize_t y, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x, y: Define the x and y ellipse offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *VignetteImage(const Image *image,const double radius, const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception) { char ellipse[MaxTextExtent]; DrawInfo *draw_info; Image *blur_image, *canvas_image, *oval_image, *vignette_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(canvas_image,DirectClass) == MagickFalse) { InheritException(exception,&canvas_image->exception); canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } canvas_image->matte=MagickTrue; oval_image=CloneImage(canvas_image,canvas_image->columns,canvas_image->rows, MagickTrue,exception); if (oval_image == (Image *) NULL) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } (void) QueryColorDatabase("#000000",&oval_image->background_color,exception); (void) SetImageBackgroundColor(oval_image); draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL); (void) QueryColorDatabase("#ffffff",&draw_info->fill,exception); (void) QueryColorDatabase("#ffffff",&draw_info->stroke,exception); (void) FormatLocaleString(ellipse,MaxTextExtent, "ellipse %g,%g,%g,%g,0.0,360.0",image->columns/2.0, image->rows/2.0,image->columns/2.0-x,image->rows/2.0-y); draw_info->primitive=AcquireString(ellipse); (void) DrawImage(oval_image,draw_info); draw_info=DestroyDrawInfo(draw_info); blur_image=BlurImage(oval_image,radius,sigma,exception); oval_image=DestroyImage(oval_image); if (blur_image == (Image *) NULL) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } blur_image->matte=MagickFalse; (void) CompositeImage(canvas_image,CopyOpacityCompositeOp,blur_image,0,0); blur_image=DestroyImage(blur_image); vignette_image=MergeImageLayers(canvas_image,FlattenLayer,exception); canvas_image=DestroyImage(canvas_image); if (vignette_image != (Image *) NULL) (void) TransformImageColorspace(vignette_image,image->colorspace); return(vignette_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WaveImage() creates a "ripple" effect in the image by shifting the pixels % vertically along a sine wave whose amplitude and wavelength is specified % by the given parameters. % % The format of the WaveImage method is: % % Image *WaveImage(const Image *image,const double amplitude, % const double wave_length,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o amplitude, wave_length: Define the amplitude and wave length of the % sine wave. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *WaveImage(const Image *image,const double amplitude, const double wave_length,ExceptionInfo *exception) { #define WaveImageTag "Wave/Image" CacheView *image_view, *wave_view; float *sine_map; Image *wave_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; register ssize_t i; ssize_t y; /* Initialize wave image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); wave_image=CloneImage(image,image->columns,(size_t) (image->rows+2.0* fabs(amplitude)),MagickTrue,exception); if (wave_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(wave_image,DirectClass) == MagickFalse) { InheritException(exception,&wave_image->exception); wave_image=DestroyImage(wave_image); return((Image *) NULL); } if (wave_image->background_color.opacity != OpaqueOpacity) wave_image->matte=MagickTrue; /* Allocate sine map. */ sine_map=(float *) AcquireQuantumMemory((size_t) wave_image->columns, sizeof(*sine_map)); if (sine_map == (float *) NULL) { wave_image=DestroyImage(wave_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) wave_image->columns; i++) sine_map[i]=(float) fabs(amplitude)+amplitude*sin((double) ((2.0*MagickPI*i)/wave_length)); /* Wave image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(wave_image,&zero); image_view=AcquireVirtualCacheView(image,exception); wave_view=AcquireAuthenticCacheView(wave_image,exception); (void) SetCacheViewVirtualPixelMethod(image_view, BackgroundVirtualPixelMethod); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,wave_image,wave_image->rows,1) #endif for (y=0; y < (ssize_t) wave_image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(wave_view); pixel=zero; for (x=0; x < (ssize_t) wave_image->columns; x++) { status=InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) x,(double) (y-sine_map[x]),&pixel, exception); if (status == MagickFalse) break; SetPixelPacket(wave_image,&pixel,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,WaveImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } wave_view=DestroyCacheView(wave_view); image_view=DestroyCacheView(image_view); sine_map=(float *) RelinquishMagickMemory(sine_map); if (status == MagickFalse) wave_image=DestroyImage(wave_image); return(wave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W a v e l e t D e n o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WaveletDenoiseImage() removes noise from the image using a wavelet % transform. The wavelet transform is a fast hierarchical scheme for % processing an image using a set of consecutive lowpass and high_pass filters, % followed by a decimation. This results in a decomposition into different % scales which can be regarded as different “frequency bands”, determined by % the mother wavelet. Adapted from dcraw.c by David Coffin. % % The format of the WaveletDenoiseImage method is: % % Image *WaveletDenoiseImage(const Image *image,const double threshold, % const double softness,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: set the threshold for smoothing. % % o softness: attenuate the smoothing threshold. % % o exception: return any errors or warnings in this structure. % */ static inline void HatTransform(const float *magick_restrict pixels, const size_t stride,const size_t extent,const size_t scale,float *kernel) { const float *magick_restrict p, *magick_restrict q, *magick_restrict r; register ssize_t i; p=pixels; q=pixels+scale*stride, r=pixels+scale*stride; for (i=0; i < (ssize_t) scale; i++) { kernel[i]=0.25f*(*p+(*p)+(*q)+(*r)); p+=stride; q-=stride; r+=stride; } for ( ; i < (ssize_t) (extent-scale); i++) { kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride)); p+=stride; } q=p-scale*stride; r=pixels+stride*(extent-2); for ( ; i < (ssize_t) extent; i++) { kernel[i]=0.25f*(*p+(*p)+(*q)+(*r)); p+=stride; q+=stride; r-=stride; } } MagickExport Image *WaveletDenoiseImage(const Image *image, const double threshold,const double softness,ExceptionInfo *exception) { CacheView *image_view, *noise_view; float *kernel, *pixels; Image *noise_image; MagickBooleanType status; MagickSizeType number_pixels; MemoryInfo *pixels_info; size_t max_channels; ssize_t channel; static const double noise_levels[]= { 0.8002, 0.2735, 0.1202, 0.0585, 0.0291, 0.0152, 0.0080, 0.0044 }; /* Initialize noise image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); noise_image=(Image *) NULL; #if defined(MAGICKCORE_OPENCL_SUPPORT) noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception); if (noise_image != (Image *) NULL) return(noise_image); #endif noise_image=CloneImage(image,0,0,MagickTrue,exception); if (noise_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(noise_image,DirectClass) == MagickFalse) { noise_image=DestroyImage(noise_image); return((Image *) NULL); } if (AcquireMagickResource(WidthResource,3*image->columns) == MagickFalse) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); pixels_info=AcquireVirtualMemory(3*image->columns,image->rows* sizeof(*pixels)); kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns)+1, GetOpenMPMaximumThreads()*sizeof(*kernel)); if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL)) { if (kernel != (float *) NULL) kernel=(float *) RelinquishMagickMemory(kernel); if (pixels_info != (MemoryInfo *) NULL) pixels_info=RelinquishVirtualMemory(pixels_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(float *) GetVirtualMemoryBlob(pixels_info); status=MagickTrue; number_pixels=image->columns*image->rows; max_channels=(size_t) (image->colorspace == CMYKColorspace ? 4 : 3); image_view=AcquireAuthenticCacheView(image,exception); noise_view=AcquireAuthenticCacheView(noise_image,exception); for (channel=0; channel < (ssize_t) max_channels; channel++) { register ssize_t i; size_t high_pass, low_pass; ssize_t level, y; if (status == MagickFalse) continue; /* Copy channel from image to wavelet pixel array. */ i=0; for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; ssize_t x; p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; break; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { switch (channel) { case 0: pixels[i]=(float) GetPixelRed(p); break; case 1: pixels[i]=(float) GetPixelGreen(p); break; case 2: pixels[i]=(float) GetPixelBlue(p); break; case 3: pixels[i]=(float) indexes[x]; break; default: break; } i++; p++; } } /* Low pass filter outputs are called approximation kernel & high pass filters are referred to as detail kernel. The detail kernel have high values in the noisy parts of the signal. */ high_pass=0; for (level=0; level < 5; level++) { double magnitude; ssize_t x, y; low_pass=(size_t) (number_pixels*((level & 0x01)+1)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register float *magick_restrict p, *magick_restrict q; register ssize_t x; p=kernel+id*image->columns; q=pixels+y*image->columns; HatTransform(q+high_pass,1,image->columns,(size_t) (1UL << level),p); q+=low_pass; for (x=0; x < (ssize_t) image->columns; x++) *q++=(*p++); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); register float *magick_restrict p, *magick_restrict q; register ssize_t y; p=kernel+id*image->rows; q=pixels+x+low_pass; HatTransform(q,image->columns,image->rows,(size_t) (1UL << level),p); for (y=0; y < (ssize_t) image->rows; y++) { *q=(*p++); q+=image->columns; } } /* To threshold, each coefficient is compared to a threshold value and attenuated / shrunk by some factor. */ magnitude=threshold*noise_levels[level]; for (i=0; i < (ssize_t) number_pixels; ++i) { pixels[high_pass+i]-=pixels[low_pass+i]; if (pixels[high_pass+i] < -magnitude) pixels[high_pass+i]+=magnitude-softness*magnitude; else if (pixels[high_pass+i] > magnitude) pixels[high_pass+i]-=magnitude-softness*magnitude; else pixels[high_pass+i]*=softness; if (high_pass != 0) pixels[i]+=pixels[high_pass+i]; } high_pass=low_pass; } /* Reconstruct image from the thresholded wavelet kernel. */ i=0; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register IndexPacket *magick_restrict noise_indexes; register PixelPacket *magick_restrict q; register ssize_t x; q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; break; } noise_indexes=GetCacheViewAuthenticIndexQueue(noise_view); for (x=0; x < (ssize_t) image->columns; x++) { float pixel; pixel=pixels[i]+pixels[low_pass+i]; switch (channel) { case 0: SetPixelRed(q,ClampToQuantum(pixel)); break; case 1: SetPixelGreen(q,ClampToQuantum(pixel)); break; case 2: SetPixelBlue(q,ClampToQuantum(pixel)); break; case 3: SetPixelIndex(noise_indexes+x,ClampToQuantum(pixel)); break; default: break; } i++; q++; } sync=SyncCacheViewAuthenticPixels(noise_view,exception); if (sync == MagickFalse) status=MagickFalse; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType) channel,max_channels); if (proceed == MagickFalse) status=MagickFalse; } } noise_view=DestroyCacheView(noise_view); image_view=DestroyCacheView(image_view); kernel=(float *) RelinquishMagickMemory(kernel); pixels_info=RelinquishVirtualMemory(pixels_info); return(noise_image); }
SplineR2RAdoptor.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign // Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign // Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory // // File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign ////////////////////////////////////////////////////////////////////////////////////// #ifndef QMCPLUSPLUS_EINSPLINE_R2RSOA_ADOPTOR_H #define QMCPLUSPLUS_EINSPLINE_R2RSOA_ADOPTOR_H #include <memory> #include <OhmmsSoA/Container.h> #include <spline2/MultiBspline.hpp> #include <spline2/MultiBsplineEval.hpp> #include "QMCWaveFunctions/BsplineFactory/SplineAdoptorBase.h" #include "QMCWaveFunctions/BsplineFactory/contraction_helper.hpp" #include "Utilities/FairDivide.h" namespace qmcplusplus { /** adoptor class to match ST real spline with TT real SPOs * @tparam ST precision of spline * @tparam TT precision of SPOs * @tparam D dimension * * Requires temporage storage and multiplication of the sign of the real part of the phase * Internal storage ST type arrays are aligned and padded. */ template<typename ST, typename TT> struct SplineR2RSoA : public SplineAdoptorBase<ST, 3> { static const int D = 3; bool IsGamma; using Base = SplineAdoptorBase<ST, 3>; using SplineType = typename bspline_traits<ST, 3>::SplineType; using BCType = typename bspline_traits<ST, 3>::BCType; using PointType = typename Base::PointType; using SingleSplineType = typename Base::SingleSplineType; using vContainer_type = Vector<ST, aligned_allocator<ST>>; using gContainer_type = VectorSoaContainer<ST, 3>; using hContainer_type = VectorSoaContainer<ST, 6>; using ghContainer_type = VectorSoaContainer<ST, 10>; using Base::first_spo; using Base::last_spo; using SplineAdoptorBase<ST, D>::HalfG; using Base::GGt; using Base::kPoints; using Base::offset; using Base::PrimLattice; ///multi bspline set std::shared_ptr<MultiBspline<ST>> SplineInst; vContainer_type myV; vContainer_type myL; gContainer_type myG; hContainer_type myH; ghContainer_type mygH; ///thread private ratios for reduction when using nested threading, numVP x numThread Matrix<TT> ratios_private; SplineR2RSoA() : Base() { this->is_complex = false; this->is_soa_ready = true; this->AdoptorName = "SplineR2RSoAAdoptor"; this->KeyWord = "SplineR2RSoA"; } inline void resizeStorage(size_t n, size_t nvals) { Base::init_base(n); const size_t npad = getAlignedSize<ST>(n); myV.resize(npad); myG.resize(npad); myL.resize(npad); myH.resize(npad); mygH.resize(npad); IsGamma = ((HalfG[0] == 0) && (HalfG[1] == 0) && (HalfG[2] == 0)); } void bcast_tables(Communicate* comm) { chunked_bcast(comm, SplineInst->getSplinePtr()); } void gather_tables(Communicate* comm) { if (comm->size() == 1) return; const int Nbands = kPoints.size(); const int Nbandgroups = comm->size(); offset.resize(Nbandgroups + 1, 0); FairDivideLow(Nbands, Nbandgroups, offset); gatherv(comm, SplineInst->getSplinePtr(), SplineInst->getSplinePtr()->z_stride, offset); } template<typename GT, typename BCT> void create_spline(GT& xyz_g, BCT& xyz_bc) { GGt = dot(transpose(PrimLattice.G), PrimLattice.G); SplineInst = std::make_shared<MultiBspline<ST>>(); SplineInst->create(xyz_g, xyz_bc, myV.size()); app_log() << "MEMORY " << SplineInst->sizeInByte() / (1 << 20) << " MB allocated " << "for the coefficients in 3D spline orbital representation" << std::endl; } inline void flush_zero() { SplineInst->flush_zero(); } inline void set_spline(SingleSplineType* spline_r, SingleSplineType* spline_i, int twist, int ispline, int level) { SplineInst->copy_spline(spline_r, ispline); } bool read_splines(hdf_archive& h5f) { std::ostringstream o; o << "spline_" << SplineAdoptorBase<ST, D>::MyIndex; einspline_engine<SplineType> bigtable(SplineInst->getSplinePtr()); return h5f.readEntry(bigtable, o.str().c_str()); //"spline_0"); } bool write_splines(hdf_archive& h5f) { std::ostringstream o; o << "spline_" << SplineAdoptorBase<ST, D>::MyIndex; einspline_engine<SplineType> bigtable(SplineInst->getSplinePtr()); return h5f.writeEntry(bigtable, o.str().c_str()); //"spline_0"); } /** convert position in PrimLattice unit and return sign */ inline int convertPos(const PointType& r, PointType& ru) { ru = PrimLattice.toUnit(r); int bc_sign = 0; for (int i = 0; i < D; i++) if (-std::numeric_limits<ST>::epsilon() < ru[i] && ru[i] < 0) ru[i] = ST(0.0); else { ST img = std::floor(ru[i]); ru[i] -= img; bc_sign += HalfG[i] * (int)img; } return bc_sign; } template<typename VV> inline void assign_v(int bc_sign, const vContainer_type& myV, VV& psi, int first, int last) const { // protect last last = last > kPoints.size() ? kPoints.size() : last; const ST signed_one = (bc_sign & 1) ? -1 : 1; #pragma omp simd for (size_t j = first; j < last; ++j) psi[first_spo + j] = signed_one * myV[j]; } template<typename VV> inline void evaluate_v(const ParticleSet& P, const int iat, VV& psi) { const PointType& r = P.activeR(iat); PointType ru; int bc_sign = convertPos(r, ru); #pragma omp parallel { int first, last; FairDivideAligned(myV.size(), getAlignment<ST>(), omp_get_num_threads(), omp_get_thread_num(), first, last); spline2::evaluate3d(SplineInst->getSplinePtr(), ru, myV, first, last); assign_v(bc_sign, myV, psi, first, last); } } template<typename VV, typename RT> inline void evaluateDetRatios(const VirtualParticleSet& VP, VV& psi, const VV& psiinv, std::vector<RT>& ratios) { const bool need_resize = ratios_private.rows() < VP.getTotalNum(); #pragma omp parallel { int tid = omp_get_thread_num(); // initialize thread private ratios if (need_resize) { if (tid == 0) // just like #pragma omp master, but one fewer call to the runtime ratios_private.resize(VP.getTotalNum(), omp_get_num_threads()); #pragma omp barrier } int first, last; FairDivideAligned(myV.size(), getAlignment<ST>(), omp_get_num_threads(), tid, first, last); const int last_real = kPoints.size() < last ? kPoints.size() : last; for (int iat = 0; iat < VP.getTotalNum(); ++iat) { const PointType& r = VP.activeR(iat); PointType ru; int bc_sign = convertPos(r, ru); spline2::evaluate3d(SplineInst->getSplinePtr(), ru, myV, first, last); assign_v(bc_sign, myV, psi, first, last_real); ratios_private[iat][tid] = simd::dot(psi.data() + first, psiinv.data() + first, last_real - first); } } // do the reduction manually for (int iat = 0; iat < VP.getTotalNum(); ++iat) { ratios[iat] = TT(0); for (int tid = 0; tid < ratios_private.cols(); tid++) ratios[iat] += ratios_private[iat][tid]; } } template<typename VV, typename GV> inline void assign_vgl(int bc_sign, VV& psi, GV& dpsi, VV& d2psi, int first, int last) const { // protect last last = last > kPoints.size() ? kPoints.size() : last; const ST signed_one = (bc_sign & 1) ? -1 : 1; const ST g00 = PrimLattice.G(0), g01 = PrimLattice.G(1), g02 = PrimLattice.G(2), g10 = PrimLattice.G(3), g11 = PrimLattice.G(4), g12 = PrimLattice.G(5), g20 = PrimLattice.G(6), g21 = PrimLattice.G(7), g22 = PrimLattice.G(8); const ST symGG[6] = {GGt[0], GGt[1] + GGt[3], GGt[2] + GGt[6], GGt[4], GGt[5] + GGt[7], GGt[8]}; const ST* restrict g0 = myG.data(0); const ST* restrict g1 = myG.data(1); const ST* restrict g2 = myG.data(2); const ST* restrict h00 = myH.data(0); const ST* restrict h01 = myH.data(1); const ST* restrict h02 = myH.data(2); const ST* restrict h11 = myH.data(3); const ST* restrict h12 = myH.data(4); const ST* restrict h22 = myH.data(5); #pragma omp simd for (size_t j = first; j < last; ++j) { const size_t psiIndex = first_spo + j; psi[psiIndex] = signed_one * myV[j]; dpsi[psiIndex][0] = signed_one * (g00 * g0[j] + g01 * g1[j] + g02 * g2[j]); dpsi[psiIndex][1] = signed_one * (g10 * g0[j] + g11 * g1[j] + g12 * g2[j]); dpsi[psiIndex][2] = signed_one * (g20 * g0[j] + g21 * g1[j] + g22 * g2[j]); d2psi[psiIndex] = signed_one * SymTrace(h00[j], h01[j], h02[j], h11[j], h12[j], h22[j], symGG); } } /** assign_vgl_from_l can be used when myL is precomputed and myV,myG,myL in cartesian */ template<typename VV, typename GV> inline void assign_vgl_from_l(int bc_sign, VV& psi, GV& dpsi, VV& d2psi) { const ST signed_one = (bc_sign & 1) ? -1 : 1; const ST* restrict g0 = myG.data(0); const ST* restrict g1 = myG.data(1); const ST* restrict g2 = myG.data(2); #pragma omp simd for (int psiIndex = first_spo; psiIndex < last_spo; ++psiIndex) { const size_t j = psiIndex - first_spo; psi[psiIndex] = signed_one * myV[j]; dpsi[psiIndex][0] = signed_one * g0[j]; dpsi[psiIndex][1] = signed_one * g1[j]; dpsi[psiIndex][2] = signed_one * g2[j]; d2psi[psiIndex] = signed_one * myL[j]; } } template<typename VV, typename GV> inline void evaluate_vgl(const ParticleSet& P, const int iat, VV& psi, GV& dpsi, VV& d2psi) { const PointType& r = P.activeR(iat); PointType ru; int bc_sign = convertPos(r, ru); #pragma omp parallel { int first, last; FairDivideAligned(myV.size(), getAlignment<ST>(), omp_get_num_threads(), omp_get_thread_num(), first, last); spline2::evaluate3d_vgh(SplineInst->getSplinePtr(), ru, myV, myG, myH, first, last); assign_vgl(bc_sign, psi, dpsi, d2psi, first, last); } } template<typename VV, typename GV> inline void mw_evaluate_vgl(const std::vector<SplineR2RSoA*>& sa_list, const std::vector<ParticleSet*>& P_list, int iat, const std::vector<VV*>& psi_v_list, const std::vector<GV*>& dpsi_v_list, const std::vector<VV*>& d2psi_v_list) { #pragma omp parallel for for (int iw = 0; iw < sa_list.size(); iw++) sa_list[iw]->evaluate_vgl(*P_list[iw], iat, *psi_v_list[iw], *dpsi_v_list[iw], *d2psi_v_list[iw]); } template<typename VV, typename GV, typename GGV> void assign_vgh(int bc_sign, VV& psi, GV& dpsi, GGV& grad_grad_psi, int first, int last) const { // protect last last = last > kPoints.size() ? kPoints.size() : last; const ST signed_one = (bc_sign & 1) ? -1 : 1; const ST g00 = PrimLattice.G(0), g01 = PrimLattice.G(1), g02 = PrimLattice.G(2), g10 = PrimLattice.G(3), g11 = PrimLattice.G(4), g12 = PrimLattice.G(5), g20 = PrimLattice.G(6), g21 = PrimLattice.G(7), g22 = PrimLattice.G(8); const ST* restrict g0 = myG.data(0); const ST* restrict g1 = myG.data(1); const ST* restrict g2 = myG.data(2); const ST* restrict h00 = myH.data(0); const ST* restrict h01 = myH.data(1); const ST* restrict h02 = myH.data(2); const ST* restrict h11 = myH.data(3); const ST* restrict h12 = myH.data(4); const ST* restrict h22 = myH.data(5); #pragma omp simd for (size_t j = first; j < last; ++j) { //dot(PrimLattice.G,myG[j]) const ST dX_r = g00 * g0[j] + g01 * g1[j] + g02 * g2[j]; const ST dY_r = g10 * g0[j] + g11 * g1[j] + g12 * g2[j]; const ST dZ_r = g20 * g0[j] + g21 * g1[j] + g22 * g2[j]; const size_t psiIndex = j + first_spo; psi[psiIndex] = signed_one * myV[j]; dpsi[psiIndex][0] = signed_one * dX_r; dpsi[psiIndex][1] = signed_one * dY_r; dpsi[psiIndex][2] = signed_one * dZ_r; const ST h_xx_r = v_m_v(h00[j], h01[j], h02[j], h11[j], h12[j], h22[j], g00, g01, g02, g00, g01, g02); const ST h_xy_r = v_m_v(h00[j], h01[j], h02[j], h11[j], h12[j], h22[j], g00, g01, g02, g10, g11, g12); const ST h_xz_r = v_m_v(h00[j], h01[j], h02[j], h11[j], h12[j], h22[j], g00, g01, g02, g20, g21, g22); const ST h_yx_r = v_m_v(h00[j], h01[j], h02[j], h11[j], h12[j], h22[j], g10, g11, g12, g00, g01, g02); const ST h_yy_r = v_m_v(h00[j], h01[j], h02[j], h11[j], h12[j], h22[j], g10, g11, g12, g10, g11, g12); const ST h_yz_r = v_m_v(h00[j], h01[j], h02[j], h11[j], h12[j], h22[j], g10, g11, g12, g20, g21, g22); const ST h_zx_r = v_m_v(h00[j], h01[j], h02[j], h11[j], h12[j], h22[j], g20, g21, g22, g00, g01, g02); const ST h_zy_r = v_m_v(h00[j], h01[j], h02[j], h11[j], h12[j], h22[j], g20, g21, g22, g10, g11, g12); const ST h_zz_r = v_m_v(h00[j], h01[j], h02[j], h11[j], h12[j], h22[j], g20, g21, g22, g20, g21, g22); grad_grad_psi[psiIndex][0] = signed_one * h_xx_r; grad_grad_psi[psiIndex][1] = signed_one * h_xy_r; grad_grad_psi[psiIndex][2] = signed_one * h_xz_r; grad_grad_psi[psiIndex][3] = signed_one * h_yx_r; grad_grad_psi[psiIndex][4] = signed_one * h_yy_r; grad_grad_psi[psiIndex][5] = signed_one * h_yz_r; grad_grad_psi[psiIndex][6] = signed_one * h_zx_r; grad_grad_psi[psiIndex][7] = signed_one * h_zy_r; grad_grad_psi[psiIndex][8] = signed_one * h_zz_r; } } template<typename VV, typename GV, typename GGV, typename GGGV> void assign_vghgh(int bc_sign, VV& psi, GV& dpsi, GGV& grad_grad_psi, GGGV& grad_grad_grad_psi, int first = 0, int last = -1) const { // protect last last = last < 0 ? kPoints.size() : (last > kPoints.size() ? kPoints.size() : last); const ST signed_one = (bc_sign & 1) ? -1 : 1; const ST g00 = PrimLattice.G(0), g01 = PrimLattice.G(1), g02 = PrimLattice.G(2), g10 = PrimLattice.G(3), g11 = PrimLattice.G(4), g12 = PrimLattice.G(5), g20 = PrimLattice.G(6), g21 = PrimLattice.G(7), g22 = PrimLattice.G(8); const ST* restrict g0 = myG.data(0); const ST* restrict g1 = myG.data(1); const ST* restrict g2 = myG.data(2); const ST* restrict h00 = myH.data(0); const ST* restrict h01 = myH.data(1); const ST* restrict h02 = myH.data(2); const ST* restrict h11 = myH.data(3); const ST* restrict h12 = myH.data(4); const ST* restrict h22 = myH.data(5); const ST* restrict gh000 = mygH.data(0); const ST* restrict gh001 = mygH.data(1); const ST* restrict gh002 = mygH.data(2); const ST* restrict gh011 = mygH.data(3); const ST* restrict gh012 = mygH.data(4); const ST* restrict gh022 = mygH.data(5); const ST* restrict gh111 = mygH.data(6); const ST* restrict gh112 = mygH.data(7); const ST* restrict gh122 = mygH.data(8); const ST* restrict gh222 = mygH.data(9); //SIMD doesn't work quite right yet. Comment out until further debugging. //#pragma omp simd for (size_t j = first; j < last; ++j) { const ST val_r = myV[j]; //dot(PrimLattice.G,myG[j]) const ST dX_r = g00 * g0[j] + g01 * g1[j] + g02 * g2[j]; const ST dY_r = g10 * g0[j] + g11 * g1[j] + g12 * g2[j]; const ST dZ_r = g20 * g0[j] + g21 * g1[j] + g22 * g2[j]; const size_t psiIndex = j + first_spo; psi[psiIndex] = signed_one * val_r; dpsi[psiIndex][0] = signed_one * dX_r; dpsi[psiIndex][1] = signed_one * dY_r; dpsi[psiIndex][2] = signed_one * dZ_r; //intermediates for computation of hessian. \partial_i \partial_j phi in cartesian coordinates. const ST f_xx_r = v_m_v(h00[j], h01[j], h02[j], h11[j], h12[j], h22[j], g00, g01, g02, g00, g01, g02); const ST f_xy_r = v_m_v(h00[j], h01[j], h02[j], h11[j], h12[j], h22[j], g00, g01, g02, g10, g11, g12); const ST f_xz_r = v_m_v(h00[j], h01[j], h02[j], h11[j], h12[j], h22[j], g00, g01, g02, g20, g21, g22); const ST f_yy_r = v_m_v(h00[j], h01[j], h02[j], h11[j], h12[j], h22[j], g10, g11, g12, g10, g11, g12); const ST f_yz_r = v_m_v(h00[j], h01[j], h02[j], h11[j], h12[j], h22[j], g10, g11, g12, g20, g21, g22); const ST f_zz_r = v_m_v(h00[j], h01[j], h02[j], h11[j], h12[j], h22[j], g20, g21, g22, g20, g21, g22); /* const ST h_xx_r=f_xx_r; const ST h_xy_r=f_xy_r+(kX*dY_i+kY*dX_i)-kX*kY*val_r; const ST h_xz_r=f_xz_r+(kX*dZ_i+kZ*dX_i)-kX*kZ*val_r; const ST h_yy_r=f_yy_r+2*kY*dY_i-kY*kY*val_r; const ST h_yz_r=f_yz_r+(kY*dZ_i+kZ*dY_i)-kY*kZ*val_r; const ST h_zz_r=f_zz_r+2*kZ*dZ_i-kZ*kZ*val_r; */ grad_grad_psi[psiIndex][0] = f_xx_r * signed_one; grad_grad_psi[psiIndex][1] = f_xy_r * signed_one; grad_grad_psi[psiIndex][2] = f_xz_r * signed_one; grad_grad_psi[psiIndex][4] = f_yy_r * signed_one; grad_grad_psi[psiIndex][5] = f_yz_r * signed_one; grad_grad_psi[psiIndex][8] = f_zz_r * signed_one; //symmetry: grad_grad_psi[psiIndex][3] = grad_grad_psi[psiIndex][1]; grad_grad_psi[psiIndex][6] = grad_grad_psi[psiIndex][2]; grad_grad_psi[psiIndex][7] = grad_grad_psi[psiIndex][5]; //These are the real and imaginary components of the third SPO derivative. _xxx denotes // third derivative w.r.t. x, _xyz, a derivative with resepect to x,y, and z, and so on. const ST f3_xxx_r = t3_contract(gh000[j], gh001[j], gh002[j], gh011[j], gh012[j], gh022[j], gh111[j], gh112[j], gh122[j], gh222[j], g00, g01, g02, g00, g01, g02, g00, g01, g02); const ST f3_xxy_r = t3_contract(gh000[j], gh001[j], gh002[j], gh011[j], gh012[j], gh022[j], gh111[j], gh112[j], gh122[j], gh222[j], g00, g01, g02, g00, g01, g02, g10, g11, g12); const ST f3_xxz_r = t3_contract(gh000[j], gh001[j], gh002[j], gh011[j], gh012[j], gh022[j], gh111[j], gh112[j], gh122[j], gh222[j], g00, g01, g02, g00, g01, g02, g20, g21, g22); const ST f3_xyy_r = t3_contract(gh000[j], gh001[j], gh002[j], gh011[j], gh012[j], gh022[j], gh111[j], gh112[j], gh122[j], gh222[j], g00, g01, g02, g10, g11, g12, g10, g11, g12); const ST f3_xyz_r = t3_contract(gh000[j], gh001[j], gh002[j], gh011[j], gh012[j], gh022[j], gh111[j], gh112[j], gh122[j], gh222[j], g00, g01, g02, g10, g11, g12, g20, g21, g22); const ST f3_xzz_r = t3_contract(gh000[j], gh001[j], gh002[j], gh011[j], gh012[j], gh022[j], gh111[j], gh112[j], gh122[j], gh222[j], g00, g01, g02, g20, g21, g22, g20, g21, g22); const ST f3_yyy_r = t3_contract(gh000[j], gh001[j], gh002[j], gh011[j], gh012[j], gh022[j], gh111[j], gh112[j], gh122[j], gh222[j], g10, g11, g12, g10, g11, g12, g10, g11, g12); const ST f3_yyz_r = t3_contract(gh000[j], gh001[j], gh002[j], gh011[j], gh012[j], gh022[j], gh111[j], gh112[j], gh122[j], gh222[j], g10, g11, g12, g10, g11, g12, g20, g21, g22); const ST f3_yzz_r = t3_contract(gh000[j], gh001[j], gh002[j], gh011[j], gh012[j], gh022[j], gh111[j], gh112[j], gh122[j], gh222[j], g10, g11, g12, g20, g21, g22, g20, g21, g22); const ST f3_zzz_r = t3_contract(gh000[j], gh001[j], gh002[j], gh011[j], gh012[j], gh022[j], gh111[j], gh112[j], gh122[j], gh222[j], g20, g21, g22, g20, g21, g22, g20, g21, g22); //Here is where we build up the components of the physical hessian gradient, namely, d^3/dx^3(e^{-ik*r}\phi(r) /* const ST gh_xxx_r= f3_xxx_r + 3*kX*f_xx_i - 3*kX*kX*dX_r - kX*kX*kX*val_i; const ST gh_xxy_r= f3_xxy_r +(kY*f_xx_i+2*kX*f_xy_i) - (kX*kX*dY_r+2*kX*kY*dX_r)-kX*kX*kY*val_i; const ST gh_xxz_r= f3_xxz_r +(kZ*f_xx_i+2*kX*f_xz_i) - (kX*kX*dZ_r+2*kX*kZ*dX_r)-kX*kX*kZ*val_i; const ST gh_xyy_r= f3_xyy_r +(2*kY*f_xy_i+kX*f_yy_i) - (2*kX*kY*dY_r+kY*kY*dX_r)-kX*kY*kY*val_i; const ST gh_xyz_r= f3_xyz_r +(kX*f_yz_i+kY*f_xz_i+kZ*f_xy_i)-(kX*kY*dZ_r+kY*kZ*dX_r+kZ*kX*dY_r) - kX*kY*kZ*val_i; const ST gh_xzz_r= f3_xzz_r +(2*kZ*f_xz_i+kX*f_zz_i) - (2*kX*kZ*dZ_r+kZ*kZ*dX_r)-kX*kZ*kZ*val_i; const ST gh_yyy_r= f3_yyy_r + 3*kY*f_yy_i - 3*kY*kY*dY_r - kY*kY*kY*val_i; const ST gh_yyz_r= f3_yyz_r +(kZ*f_yy_i+2*kY*f_yz_i) - (kY*kY*dZ_r+2*kY*kZ*dY_r)-kY*kY*kZ*val_i; const ST gh_yzz_r= f3_yzz_r +(2*kZ*f_yz_i+kY*f_zz_i) - (2*kY*kZ*dZ_r+kZ*kZ*dY_r)-kY*kZ*kZ*val_i; const ST gh_zzz_r= f3_zzz_r + 3*kZ*f_zz_i - 3*kZ*kZ*dZ_r - kZ*kZ*kZ*val_i;*/ //[x][xx] //These are the unique entries grad_grad_grad_psi[psiIndex][0][0] = signed_one * f3_xxx_r; grad_grad_grad_psi[psiIndex][0][1] = signed_one * f3_xxy_r; grad_grad_grad_psi[psiIndex][0][2] = signed_one * f3_xxz_r; grad_grad_grad_psi[psiIndex][0][4] = signed_one * f3_xyy_r; grad_grad_grad_psi[psiIndex][0][5] = signed_one * f3_xyz_r; grad_grad_grad_psi[psiIndex][0][8] = signed_one * f3_xzz_r; //filling in the symmetric terms. Filling out the xij terms grad_grad_grad_psi[psiIndex][0][3] = grad_grad_grad_psi[psiIndex][0][1]; grad_grad_grad_psi[psiIndex][0][6] = grad_grad_grad_psi[psiIndex][0][2]; grad_grad_grad_psi[psiIndex][0][7] = grad_grad_grad_psi[psiIndex][0][5]; //Now for everything that's a permutation of the above: grad_grad_grad_psi[psiIndex][1][0] = grad_grad_grad_psi[psiIndex][0][1]; grad_grad_grad_psi[psiIndex][1][1] = grad_grad_grad_psi[psiIndex][0][4]; grad_grad_grad_psi[psiIndex][1][2] = grad_grad_grad_psi[psiIndex][0][5]; grad_grad_grad_psi[psiIndex][1][3] = grad_grad_grad_psi[psiIndex][0][4]; grad_grad_grad_psi[psiIndex][1][6] = grad_grad_grad_psi[psiIndex][0][5]; grad_grad_grad_psi[psiIndex][2][0] = grad_grad_grad_psi[psiIndex][0][2]; grad_grad_grad_psi[psiIndex][2][1] = grad_grad_grad_psi[psiIndex][0][5]; grad_grad_grad_psi[psiIndex][2][2] = grad_grad_grad_psi[psiIndex][0][8]; grad_grad_grad_psi[psiIndex][2][3] = grad_grad_grad_psi[psiIndex][0][5]; grad_grad_grad_psi[psiIndex][2][6] = grad_grad_grad_psi[psiIndex][0][8]; grad_grad_grad_psi[psiIndex][1][4] = signed_one * f3_yyy_r; grad_grad_grad_psi[psiIndex][1][5] = signed_one * f3_yyz_r; grad_grad_grad_psi[psiIndex][1][8] = signed_one * f3_yzz_r; grad_grad_grad_psi[psiIndex][1][7] = grad_grad_grad_psi[psiIndex][1][5]; grad_grad_grad_psi[psiIndex][2][4] = grad_grad_grad_psi[psiIndex][1][5]; grad_grad_grad_psi[psiIndex][2][5] = grad_grad_grad_psi[psiIndex][1][8]; grad_grad_grad_psi[psiIndex][2][7] = grad_grad_grad_psi[psiIndex][1][8]; grad_grad_grad_psi[psiIndex][2][8] = signed_one * f3_zzz_r; } } template<typename VV, typename GV, typename GGV> void evaluate_vgh(const ParticleSet& P, const int iat, VV& psi, GV& dpsi, GGV& grad_grad_psi) { const PointType& r = P.activeR(iat); PointType ru; int bc_sign = convertPos(r, ru); #pragma omp parallel { int first, last; FairDivideAligned(myV.size(), getAlignment<ST>(), omp_get_num_threads(), omp_get_thread_num(), first, last); spline2::evaluate3d_vgh(SplineInst->getSplinePtr(), ru, myV, myG, myH, first, last); assign_vgh(bc_sign, psi, dpsi, grad_grad_psi, first, last); } } template<typename VV, typename GV, typename GGV, typename GGGV> void evaluate_vghgh(const ParticleSet& P, const int iat, VV& psi, GV& dpsi, GGV& grad_grad_psi, GGGV& grad_grad_grad_psi) { const PointType& r = P.activeR(iat); PointType ru; int bc_sign = convertPos(r, ru); #pragma omp parallel { int first, last; FairDivideAligned(myV.size(), getAlignment<ST>(), omp_get_num_threads(), omp_get_thread_num(), first, last); spline2::evaluate3d_vghgh(SplineInst->getSplinePtr(), ru, myV, myG, myH, mygH, first, last); assign_vghgh(bc_sign, psi, dpsi, grad_grad_psi, grad_grad_grad_psi, first, last); } } }; } // namespace qmcplusplus #endif
stream.c
/*-----------------------------------------------------------------------*/ /* Program: STREAM */ /* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */ /* Original code developed by John D. McCalpin */ /* Programmers: John D. McCalpin */ /* Joe R. Zagar */ /* */ /* This program measures memory transfer rates in MB/s for simple */ /* computational kernels coded in C. */ /*-----------------------------------------------------------------------*/ /* Copyright 1991-2013: John D. McCalpin */ /*-----------------------------------------------------------------------*/ /* License: */ /* 1. You are free to use this program and/or to redistribute */ /* this program. */ /* 2. You are free to modify this program for your own use, */ /* including commercial use, subject to the publication */ /* restrictions in item 3. */ /* 3. You are free to publish results obtained from running this */ /* program, or from works that you derive from this program, */ /* with the following limitations: */ /* 3a. In order to be referred to as "STREAM benchmark results", */ /* published results must be in conformance to the STREAM */ /* Run Rules, (briefly reviewed below) published at */ /* http://www.cs.virginia.edu/stream/ref.html */ /* and incorporated herein by reference. */ /* As the copyright holder, John McCalpin retains the */ /* right to determine conformity with the Run Rules. */ /* 3b. Results based on modified source code or on runs not in */ /* accordance with the STREAM Run Rules must be clearly */ /* labelled whenever they are published. Examples of */ /* proper labelling include: */ /* "tuned STREAM benchmark results" */ /* "based on a variant of the STREAM benchmark code" */ /* Other comparable, clear, and reasonable labelling is */ /* acceptable. */ /* 3c. Submission of results to the STREAM benchmark web site */ /* is encouraged, but not required. */ /* 4. Use of this program or creation of derived works based on this */ /* program constitutes acceptance of these licensing restrictions. */ /* 5. Absolutely no warranty is expressed or implied. */ /*-----------------------------------------------------------------------*/ # include <stdio.h> # include <unistd.h> # include <math.h> # include <float.h> # include <limits.h> # include <sys/time.h> /*----------------------------------------------------------------------- * INSTRUCTIONS: * * 1) STREAM requires different amounts of memory to run on different * systems, depending on both the system cache size(s) and the * granularity of the system timer. * You should adjust the value of 'STREAM_ARRAY_SIZE' (below) * to meet *both* of the following criteria: * (a) Each array must be at least 4 times the size of the * available cache memory. I don't worry about the difference * between 10^6 and 2^20, so in practice the minimum array size * is about 3.8 times the cache size. * Example 1: One Xeon E3 with 8 MB L3 cache * STREAM_ARRAY_SIZE should be >= 4 million, giving * an array size of 30.5 MB and a total memory requirement * of 91.5 MB. * Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP) * STREAM_ARRAY_SIZE should be >= 20 million, giving * an array size of 153 MB and a total memory requirement * of 458 MB. * (b) The size should be large enough so that the 'timing calibration' * output by the program is at least 20 clock-ticks. * Example: most versions of Windows have a 10 millisecond timer * granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds. * If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec. * This means the each array must be at least 1 GB, or 128M elements. * * Version 5.10 increases the default array size from 2 million * elements to 10 million elements in response to the increasing * size of L3 caches. The new default size is large enough for caches * up to 20 MB. * Version 5.10 changes the loop index variables from "register int" * to "ssize_t", which allows array indices >2^32 (4 billion) * on properly configured 64-bit systems. Additional compiler options * (such as "-mcmodel=medium") may be required for large memory runs. * * Array size can be set at compile time without modifying the source * code for the (many) compilers that support preprocessor definitions * on the compile line. E.g., * gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M * will override the default size of 10M with a new size of 100M elements * per array. */ #ifndef STREAM_ARRAY_SIZE # define STREAM_ARRAY_SIZE 14000000 #endif /* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result * for any iteration after the first, therefore the minimum value * for NTIMES is 2. * There are no rules on maximum allowable values for NTIMES, but * values larger than the default are unlikely to noticeably * increase the reported performance. * NTIMES can also be set on the compile line without changing the source * code using, for example, "-DNTIMES=7". */ #ifdef NTIMES #if NTIMES<=1 # define NTIMES 100 #endif #endif #ifndef NTIMES # define NTIMES 100 #endif /* Users are allowed to modify the "OFFSET" variable, which *may* change the * relative alignment of the arrays (though compilers may change the * effective offset by making the arrays non-contiguous on some systems). * Use of non-zero values for OFFSET can be especially helpful if the * STREAM_ARRAY_SIZE is set to a value close to a large power of 2. * OFFSET can also be set on the compile line without changing the source * code using, for example, "-DOFFSET=56". */ #ifndef OFFSET # define OFFSET 0 #endif /* * 3) Compile the code with optimization. Many compilers generate * unreasonably bad code before the optimizer tightens things up. * If the results are unreasonably good, on the other hand, the * optimizer might be too smart for me! * * For a simple single-core version, try compiling with: * cc -O stream.c -o stream * This is known to work on many, many systems.... * * To use multiple cores, you need to tell the compiler to obey the OpenMP * directives in the code. This varies by compiler, but a common example is * gcc -O -fopenmp stream.c -o stream_omp * The environment variable OMP_NUM_THREADS allows runtime control of the * number of threads/cores used when the resulting "stream_omp" program * is executed. * * To run with single-precision variables and arithmetic, simply add * -DSTREAM_TYPE=float * to the compile line. * Note that this changes the minimum array sizes required --- see (1) above. * * The preprocessor directive "TUNED" does not do much -- it simply causes the * code to call separate functions to execute each kernel. Trivial versions * of these functions are provided, but they are *not* tuned -- they just * provide predefined interfaces to be replaced with tuned code. * * * 4) Optional: Mail the results to mccalpin@cs.virginia.edu * Be sure to include info that will help me understand: * a) the computer hardware configuration (e.g., processor model, memory type) * b) the compiler name/version and compilation flags * c) any run-time information (such as OMP_NUM_THREADS) * d) all of the output from the test case. * * Thanks! * *-----------------------------------------------------------------------*/ # define HLINE "-------------------------------------------------------------\n" # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif #ifndef STREAM_TYPE #define STREAM_TYPE double #endif static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET], b[STREAM_ARRAY_SIZE+OFFSET], c[STREAM_ARRAY_SIZE+OFFSET]; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE }; extern double mysecond(); extern void checkSTREAMresults(); #ifdef TUNED extern void tuned_STREAM_Copy(); extern void tuned_STREAM_Scale(STREAM_TYPE scalar); extern void tuned_STREAM_Add(); extern void tuned_STREAM_Triad(STREAM_TYPE scalar); #endif #ifdef _OPENMP extern int omp_get_num_threads(); #endif int main() { int quantum, checktick(); int BytesPerWord; int k; ssize_t j; STREAM_TYPE scalar; double t, times[4][NTIMES]; /* --- SETUP --- determine precision and check timing --- */ printf(HLINE); printf("STREAM version $Revision: 5.10 $\n"); printf(HLINE); BytesPerWord = sizeof(STREAM_TYPE); printf("This system uses %d bytes per array element.\n", BytesPerWord); printf(HLINE); #ifdef N printf("***** WARNING: ******\n"); printf(" It appears that you set the preprocessor variable N when compiling this code.\n"); printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n"); printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE); printf("***** WARNING: ******\n"); #endif printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET); printf("Memory per array = %.1f MiB (= %.1f GiB).\n", BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0), BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0)); printf("Total memory required = %.1f MiB (= %.1f GiB).\n", (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.), (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.)); printf("Each kernel will be executed %d times.\n", NTIMES); printf(" The *best* time for each kernel (excluding the first iteration)\n"); printf(" will be used to compute the reported bandwidth.\n"); #ifdef _OPENMP printf(HLINE); #pragma omp parallel { #pragma omp master { k = omp_get_num_threads(); printf ("Number of Threads requested = %i\n",k); } } #endif #ifdef _OPENMP k = 0; #pragma omp parallel #pragma omp atomic k++; printf ("Number of Threads counted = %i\n",k); #endif /* Get initial value for system clock. */ #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } printf(HLINE); if ( (quantum = checktick()) >= 1) printf("Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); else { printf("Your clock granularity appears to be " "less than one microsecond.\n"); quantum = 1; } t = mysecond(); #pragma omp parallel for for (j = 0; j < STREAM_ARRAY_SIZE; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order" " of %d microseconds.\n", (int) t ); printf(" (= %d clock ticks)\n", (int) (t/quantum) ); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar = 3.0; for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); #ifdef TUNED tuned_STREAM_Copy(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; #endif times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef TUNED tuned_STREAM_Scale(scalar); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; #endif times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; #endif times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; #endif times[3][k] = mysecond() - times[3][k]; } /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Best Rate MB/s Avg time Min time Max time\n"); for (j=0; j<4; j++) { avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j], 1.0E-06 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]); } printf(HLINE); /* --- Check Results --- */ checkSTREAMresults(); printf(HLINE); return 0; } # define M 20 int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while( ((t2=mysecond()) - t1) < 1.0E-6 ) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. * This result will be our estimate (in microseconds) for the * clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1])); minDelta = MIN(minDelta, MAX(Delta,0)); } return(minDelta); } /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ #include <time.h> #include <sys/time.h> double mysecond() { struct timeval tv; gettimeofday(&tv, NULL); return ((double) tv.tv_sec + (double) tv.tv_usec * 1.e-6); } #ifndef abs #define abs(a) ((a) >= 0 ? (a) : -(a)) #endif void checkSTREAMresults () { STREAM_TYPE aj,bj,cj,scalar; STREAM_TYPE aSumErr,bSumErr,cSumErr; STREAM_TYPE aAvgErr,bAvgErr,cAvgErr; double epsilon; ssize_t j; int k,ierr,err; /* reproduce initialization */ aj = 1.0; bj = 2.0; cj = 0.0; /* a[] is modified during timing check */ aj = 2.0E0 * aj; /* now execute timing loop */ scalar = 3.0; for (k=0; k<NTIMES; k++) { cj = aj; bj = scalar*cj; cj = aj+bj; aj = bj+scalar*cj; } /* accumulate deltas between observed and expected results */ aSumErr = 0.0; bSumErr = 0.0; cSumErr = 0.0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { aSumErr += abs(a[j] - aj); bSumErr += abs(b[j] - bj); cSumErr += abs(c[j] - cj); // if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN } aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; if (sizeof(STREAM_TYPE) == 4) { epsilon = 1.e-6; } else if (sizeof(STREAM_TYPE) == 8) { epsilon = 1.e-13; } else { printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",sizeof(STREAM_TYPE)); epsilon = 1.e-6; } err = 0; if (abs(aAvgErr/aj) > epsilon) { err++; printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(a[j]/aj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,aj,a[j],abs((aj-a[j])/aAvgErr)); } #endif } } printf(" For array a[], %d errors were found.\n",ierr); } if (abs(bAvgErr/bj) > epsilon) { err++; printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(b[j]/bj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,bj,b[j],abs((bj-b[j])/bAvgErr)); } #endif } } printf(" For array b[], %d errors were found.\n",ierr); } if (abs(cAvgErr/cj) > epsilon) { err++; printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(c[j]/cj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,cj,c[j],abs((cj-c[j])/cAvgErr)); } #endif } } printf(" For array c[], %d errors were found.\n",ierr); } if (err == 0) { printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon); } #ifdef VERBOSE printf ("Results Validation Verbose Results: \n"); printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj); printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]); printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj)); #endif } #ifdef TUNED /* stubs for "tuned" versions of the kernels */ void tuned_STREAM_Copy() { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; } void tuned_STREAM_Scale(STREAM_TYPE scalar) { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; } void tuned_STREAM_Add() { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; } void tuned_STREAM_Triad(STREAM_TYPE scalar) { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; } /* end of stubs for the "tuned" versions of the kernels */ #endif
omp_alloc.c
// RUN: %libomp-compile-and-run // REQUIRES: openmp-5.0 #include <stdio.h> #include <stdint.h> #include <omp.h> #include "omp_testsuite.h" #define ARRAY_SIZE 10000 int test_omp_alloc() { int err; int i, j; int *shared_array; const omp_allocator_t *allocator; const omp_allocator_t *test_allocator; // Currently, only default memory allocator is implemented const omp_allocator_t *allocators[] = { omp_default_mem_alloc, }; err = 0; for (i = 0; i < sizeof(allocators) / sizeof(allocators[0]); ++i) { allocator = allocators[i]; printf("Using %p allocator\n", test_allocator); omp_set_default_allocator(allocator); test_allocator = omp_get_default_allocator(); if (test_allocator != allocator) { printf("error: omp_set|get_default_allocator() not working\n"); return 0; } shared_array = (int *)omp_alloc(sizeof(int) * ARRAY_SIZE, test_allocator); if (shared_array == NULL) { printf("error: shared_array is NULL\n"); return 0; } for (j = 0; j < ARRAY_SIZE; ++j) { shared_array[j] = j; } #pragma omp parallel shared(shared_array) { int i; int tid = omp_get_thread_num(); int *private_array = (int *)omp_alloc(sizeof(int) * ARRAY_SIZE, omp_default_mem_alloc); if (private_array == NULL) { printf("error: thread %d private_array is NULL\n", tid); #pragma omp atomic err++; } for (i = 0; i < ARRAY_SIZE; ++i) { private_array[i] = shared_array[i] + tid; } for (i = 0; i < ARRAY_SIZE; ++i) { if (private_array[i] != i + tid) { printf("error: thread %d element %d is %d instead of %d\n", tid, i, private_array[i], i + tid); #pragma omp atomic err++; } } omp_free(private_array, omp_default_mem_alloc); } /* end of parallel */ omp_free(shared_array, test_allocator); } return !err; } int main() { int i; int num_failed = 0; for (i = 0; i < REPETITIONS; i++) { if (!test_omp_alloc()) { num_failed++; } } return num_failed; }
tree_shap.h
/** * Fast recursive computation of SHAP values in trees. * See https://arxiv.org/abs/1802.03888 for details. * Scott Lundberg, 2018 (independent algorithm courtesy of Hugh Chen 2018) * * Fast TreeSHAP algorithm v1 and Fast TreeSHAP algorithm v2. * See https://arxiv.org/abs/2109.09847 for details. * Jilei Yang, 2021 */ #include <algorithm> #include <iostream> #include <fstream> #include <stdio.h> #include <cmath> #include <ctime> #if defined(_WIN32) || defined(WIN32) #include <malloc.h> #elif defined(__MVS__) #include <stdlib.h> #else #include <alloca.h> #endif using namespace std; typedef double tfloat; typedef tfloat (* transform_f)(const tfloat margin, const tfloat y); namespace FEATURE_DEPENDENCE { const unsigned independent = 0; const unsigned tree_path_dependent = 1; const unsigned global_path_dependent = 2; } namespace ALGORITHM { const unsigned v0 = 0; const unsigned v1 = 1; const unsigned v2 = 2; const unsigned v2_1 = 3; const unsigned v2_2 = 4; } struct TreeEnsemble { int *children_left; int *children_right; int *children_default; int *features; tfloat *thresholds; tfloat *values; tfloat *node_sample_weights; unsigned max_depth; unsigned tree_limit; tfloat *base_offset; unsigned max_nodes; unsigned num_outputs; TreeEnsemble() {} TreeEnsemble(int *children_left, int *children_right, int *children_default, int *features, tfloat *thresholds, tfloat *values, tfloat *node_sample_weights, unsigned max_depth, unsigned tree_limit, tfloat *base_offset, unsigned max_nodes, unsigned num_outputs) : children_left(children_left), children_right(children_right), children_default(children_default), features(features), thresholds(thresholds), values(values), node_sample_weights(node_sample_weights), max_depth(max_depth), tree_limit(tree_limit), base_offset(base_offset), max_nodes(max_nodes), num_outputs(num_outputs) {} void get_tree(TreeEnsemble &tree, const unsigned i) const { const unsigned d = i * max_nodes; tree.children_left = children_left + d; tree.children_right = children_right + d; tree.children_default = children_default + d; tree.features = features + d; tree.thresholds = thresholds + d; tree.values = values + d * num_outputs; tree.node_sample_weights = node_sample_weights + d; tree.max_depth = max_depth; tree.tree_limit = 1; tree.base_offset = base_offset; tree.max_nodes = max_nodes; tree.num_outputs = num_outputs; } bool is_leaf(unsigned pos)const { return children_left[pos] < 0; } void allocate(unsigned tree_limit_in, unsigned max_nodes_in, unsigned num_outputs_in) { tree_limit = tree_limit_in; max_nodes = max_nodes_in; num_outputs = num_outputs_in; children_left = new int[tree_limit * max_nodes]; children_right = new int[tree_limit * max_nodes]; children_default = new int[tree_limit * max_nodes]; features = new int[tree_limit * max_nodes]; thresholds = new tfloat[tree_limit * max_nodes]; values = new tfloat[tree_limit * max_nodes * num_outputs]; node_sample_weights = new tfloat[tree_limit * max_nodes]; } void free() { delete[] children_left; delete[] children_right; delete[] children_default; delete[] features; delete[] thresholds; delete[] values; delete[] node_sample_weights; } }; struct ExplanationDataset { tfloat *X; bool *X_missing; tfloat *y; tfloat *R; bool *R_missing; unsigned num_X; unsigned M; unsigned num_R; ExplanationDataset() {} ExplanationDataset(tfloat *X, bool *X_missing, tfloat *y, tfloat *R, bool *R_missing, unsigned num_X, unsigned M, unsigned num_R) : X(X), X_missing(X_missing), y(y), R(R), R_missing(R_missing), num_X(num_X), M(M), num_R(num_R) {} void get_x_instance(ExplanationDataset &instance, const unsigned i) const { instance.M = M; instance.X = X + i * M; instance.X_missing = X_missing + i * M; instance.num_X = 1; } }; // data we keep about our decision path // note that pweight is included for convenience and is not tied with the other attributes // the pweight of the i'th path element is the permuation weight of paths with i-1 ones in them struct PathElement { int feature_index; tfloat zero_fraction; tfloat one_fraction; tfloat pweight; PathElement() {} PathElement(int i, tfloat z, tfloat o, tfloat w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {} }; inline tfloat logistic_transform(const tfloat margin, const tfloat y) { return 1 / (1 + exp(-margin)); } inline tfloat logistic_nlogloss_transform(const tfloat margin, const tfloat y) { return log(1 + exp(margin)) - y * margin; // y is in {0, 1} } inline tfloat squared_loss_transform(const tfloat margin, const tfloat y) { return (margin - y) * (margin - y); } namespace MODEL_TRANSFORM { const unsigned identity = 0; const unsigned logistic = 1; const unsigned logistic_nlogloss = 2; const unsigned squared_loss = 3; } inline transform_f get_transform(unsigned model_transform) { transform_f transform = NULL; switch (model_transform) { case MODEL_TRANSFORM::logistic: transform = logistic_transform; break; case MODEL_TRANSFORM::logistic_nlogloss: transform = logistic_nlogloss_transform; break; case MODEL_TRANSFORM::squared_loss: transform = squared_loss_transform; break; } return transform; } inline tfloat *tree_predict(unsigned i, const TreeEnsemble &trees, const tfloat *x, const bool *x_missing) { const unsigned offset = i * trees.max_nodes; unsigned node = 0; while (true) { const unsigned pos = offset + node; const unsigned feature = trees.features[pos]; // we hit a leaf so return a pointer to the values if (trees.is_leaf(pos)) { return trees.values + pos * trees.num_outputs; } // otherwise we are at an internal node and need to recurse if (x_missing[feature]) { node = trees.children_default[pos]; } else if (x[feature] <= trees.thresholds[pos]) { node = trees.children_left[pos]; } else { node = trees.children_right[pos]; } } } inline void dense_tree_predict(tfloat *out, const TreeEnsemble &trees, const ExplanationDataset &data, unsigned model_transform) { tfloat *row_out = out; const tfloat *x = data.X; const bool *x_missing = data.X_missing; // see what transform (if any) we have transform_f transform = get_transform(model_transform); for (unsigned i = 0; i < data.num_X; ++i) { // add the base offset for (unsigned k = 0; k < trees.num_outputs; ++k) { row_out[k] += trees.base_offset[k]; } // add the leaf values from each tree for (unsigned j = 0; j < trees.tree_limit; ++j) { const tfloat *leaf_value = tree_predict(j, trees, x, x_missing); for (unsigned k = 0; k < trees.num_outputs; ++k) { row_out[k] += leaf_value[k]; } } // apply any needed transform if (transform != NULL) { const tfloat y_i = data.y == NULL ? 0 : data.y[i]; for (unsigned k = 0; k < trees.num_outputs; ++k) { row_out[k] = transform(row_out[k], y_i); } } x += data.M; x_missing += data.M; row_out += trees.num_outputs; } } inline void tree_update_weights(unsigned i, TreeEnsemble &trees, const tfloat *x, const bool *x_missing) { const unsigned offset = i * trees.max_nodes; unsigned node = 0; while (true) { const unsigned pos = offset + node; const unsigned feature = trees.features[pos]; // Record that a sample passed through this node trees.node_sample_weights[pos] += 1.0; // we hit a leaf so return a pointer to the values if (trees.children_left[pos] < 0) break; // otherwise we are at an internal node and need to recurse if (x_missing[feature]) { node = trees.children_default[pos]; } else if (x[feature] <= trees.thresholds[pos]) { node = trees.children_left[pos]; } else { node = trees.children_right[pos]; } } } inline void dense_tree_update_weights(TreeEnsemble &trees, const ExplanationDataset &data) { const tfloat *x = data.X; const bool *x_missing = data.X_missing; for (unsigned i = 0; i < data.num_X; ++i) { // add the leaf values from each tree for (unsigned j = 0; j < trees.tree_limit; ++j) { tree_update_weights(j, trees, x, x_missing); } x += data.M; x_missing += data.M; } } inline void tree_saabas(tfloat *out, const TreeEnsemble &tree, const ExplanationDataset &data) { unsigned curr_node = 0; unsigned next_node = 0; while (true) { // we hit a leaf and are done if (tree.children_left[curr_node] < 0) return; // otherwise we are at an internal node and need to recurse const unsigned feature = tree.features[curr_node]; if (data.X_missing[feature]) { next_node = tree.children_default[curr_node]; } else if (data.X[feature] <= tree.thresholds[curr_node]) { next_node = tree.children_left[curr_node]; } else { next_node = tree.children_right[curr_node]; } // assign credit to this feature as the difference in values at the current node vs. the next node for (unsigned i = 0; i < tree.num_outputs; ++i) { out[feature * tree.num_outputs + i] += tree.values[next_node * tree.num_outputs + i] - tree.values[curr_node * tree.num_outputs + i]; } curr_node = next_node; } } /** * This runs Tree SHAP with a per tree path conditional dependence assumption. */ inline void dense_tree_saabas(tfloat *out_contribs, const TreeEnsemble& trees, const ExplanationDataset &data) { tfloat *instance_out_contribs; TreeEnsemble tree; ExplanationDataset instance; // build explanation for each sample for (unsigned i = 0; i < data.num_X; ++i) { instance_out_contribs = out_contribs + i * (data.M + 1) * trees.num_outputs; data.get_x_instance(instance, i); // aggregate the effect of explaining each tree // (this works because of the linearity property of Shapley values) for (unsigned j = 0; j < trees.tree_limit; ++j) { trees.get_tree(tree, j); tree_saabas(instance_out_contribs, tree, instance); } // apply the base offset to the bias term for (unsigned j = 0; j < trees.num_outputs; ++j) { instance_out_contribs[data.M * trees.num_outputs + j] += trees.base_offset[j]; } } } // extend our decision path with a fraction of one and zero extensions inline void extend_path(PathElement *unique_path, unsigned unique_depth, tfloat zero_fraction, tfloat one_fraction, int feature_index) { unique_path[unique_depth].feature_index = feature_index; unique_path[unique_depth].zero_fraction = zero_fraction; unique_path[unique_depth].one_fraction = one_fraction; unique_path[unique_depth].pweight = (unique_depth == 0 ? 1.0f : 0.0f); for (int i = unique_depth - 1; i >= 0; i--) { unique_path[i + 1].pweight += one_fraction * unique_path[i].pweight * (i + 1) / static_cast<tfloat>(unique_depth + 1); unique_path[i].pweight = zero_fraction * unique_path[i].pweight * (unique_depth - i) / static_cast<tfloat>(unique_depth + 1); } } // undo a previous extension of the decision path inline void unwind_path(PathElement *unique_path, unsigned unique_depth, unsigned path_index) { const tfloat one_fraction = unique_path[path_index].one_fraction; const tfloat zero_fraction = unique_path[path_index].zero_fraction; tfloat next_one_portion = unique_path[unique_depth].pweight; for (int i = unique_depth - 1; i >= 0; --i) { if (one_fraction != 0) { const tfloat tmp = unique_path[i].pweight; unique_path[i].pweight = next_one_portion * (unique_depth + 1) / static_cast<tfloat>((i + 1) * one_fraction); next_one_portion = tmp - unique_path[i].pweight * zero_fraction * (unique_depth - i) / static_cast<tfloat>(unique_depth + 1); } else { unique_path[i].pweight = (unique_path[i].pweight * (unique_depth + 1)) / static_cast<tfloat>(zero_fraction * (unique_depth - i)); } } for (unsigned i = path_index; i < unique_depth; ++i) { unique_path[i].feature_index = unique_path[i+1].feature_index; unique_path[i].zero_fraction = unique_path[i+1].zero_fraction; unique_path[i].one_fraction = unique_path[i+1].one_fraction; } } // determine what the total permuation weight would be if // we unwound a previous extension in the decision path inline tfloat unwound_path_sum(const PathElement *unique_path, unsigned unique_depth, unsigned path_index) { const tfloat one_fraction = unique_path[path_index].one_fraction; const tfloat zero_fraction = unique_path[path_index].zero_fraction; tfloat next_one_portion = unique_path[unique_depth].pweight; tfloat total = 0; if (one_fraction != 0) { for (int i = unique_depth - 1; i >= 0; --i) { const tfloat tmp = next_one_portion / static_cast<tfloat>((i + 1) * one_fraction); total += tmp; next_one_portion = unique_path[i].pweight - tmp * zero_fraction * (unique_depth - i); } } else { for (int i = unique_depth - 1; i >= 0; --i) { total += unique_path[i].pweight / (zero_fraction * (unique_depth - i)); } } return total * (unique_depth + 1); } // recursive computation of SHAP values for a decision tree inline void tree_shap_recursive(const unsigned num_outputs, const int *children_left, const int *children_right, const int *children_default, const int *features, const tfloat *thresholds, const tfloat *values, const tfloat *node_sample_weight, const tfloat *x, const bool *x_missing, tfloat *phi, unsigned node_index, unsigned unique_depth, PathElement *parent_unique_path, tfloat parent_zero_fraction, tfloat parent_one_fraction, int parent_feature_index, int condition, unsigned condition_feature, tfloat condition_fraction) { // stop if we have no weight coming down to us if (condition_fraction == 0) return; // extend the unique path PathElement *unique_path = parent_unique_path + unique_depth + 1; std::copy(parent_unique_path, parent_unique_path + unique_depth + 1, unique_path); if (condition == 0 || condition_feature != static_cast<unsigned>(parent_feature_index)) { extend_path(unique_path, unique_depth, parent_zero_fraction, parent_one_fraction, parent_feature_index); } const unsigned split_index = features[node_index]; // leaf node if (children_right[node_index] < 0) { for (unsigned i = 1; i <= unique_depth; ++i) { const tfloat w = unwound_path_sum(unique_path, unique_depth, i); const PathElement &el = unique_path[i]; const unsigned phi_offset = el.feature_index * num_outputs; const unsigned values_offset = node_index * num_outputs; const tfloat scale = w * (el.one_fraction - el.zero_fraction) * condition_fraction; for (unsigned j = 0; j < num_outputs; ++j) { phi[phi_offset + j] += scale * values[values_offset + j]; } } // internal node } else { // find which branch is "hot" (meaning x would follow it) unsigned hot_index = 0; if (x_missing[split_index]) { hot_index = children_default[node_index]; } else if (x[split_index] <= thresholds[node_index]) { hot_index = children_left[node_index]; } else { hot_index = children_right[node_index]; } const unsigned cold_index = (static_cast<int>(hot_index) == children_left[node_index] ? children_right[node_index] : children_left[node_index]); const tfloat w = node_sample_weight[node_index]; const tfloat hot_zero_fraction = node_sample_weight[hot_index] / w; const tfloat cold_zero_fraction = node_sample_weight[cold_index] / w; tfloat incoming_zero_fraction = 1; tfloat incoming_one_fraction = 1; // see if we have already split on this feature, // if so we undo that split so we can redo it for this node unsigned path_index = 0; for (; path_index <= unique_depth; ++path_index) { if (static_cast<unsigned>(unique_path[path_index].feature_index) == split_index) break; } if (path_index != unique_depth + 1) { incoming_zero_fraction = unique_path[path_index].zero_fraction; incoming_one_fraction = unique_path[path_index].one_fraction; unwind_path(unique_path, unique_depth, path_index); unique_depth -= 1; } // divide up the condition_fraction among the recursive calls tfloat hot_condition_fraction = condition_fraction; tfloat cold_condition_fraction = condition_fraction; if (condition > 0 && split_index == condition_feature) { cold_condition_fraction = 0; unique_depth -= 1; } else if (condition < 0 && split_index == condition_feature) { hot_condition_fraction *= hot_zero_fraction; cold_condition_fraction *= cold_zero_fraction; unique_depth -= 1; } tree_shap_recursive( num_outputs, children_left, children_right, children_default, features, thresholds, values, node_sample_weight, x, x_missing, phi, hot_index, unique_depth + 1, unique_path, hot_zero_fraction * incoming_zero_fraction, incoming_one_fraction, split_index, condition, condition_feature, hot_condition_fraction ); tree_shap_recursive( num_outputs, children_left, children_right, children_default, features, thresholds, values, node_sample_weight, x, x_missing, phi, cold_index, unique_depth + 1, unique_path, cold_zero_fraction * incoming_zero_fraction, 0, split_index, condition, condition_feature, cold_condition_fraction ); } } inline int compute_expectations(TreeEnsemble &tree, int i = 0, int depth = 0) { unsigned max_depth = 0; if (tree.children_right[i] >= 0) { const unsigned li = tree.children_left[i]; const unsigned ri = tree.children_right[i]; const unsigned depth_left = compute_expectations(tree, li, depth + 1); const unsigned depth_right = compute_expectations(tree, ri, depth + 1); const tfloat left_weight = tree.node_sample_weights[li]; const tfloat right_weight = tree.node_sample_weights[ri]; const unsigned li_offset = li * tree.num_outputs; const unsigned ri_offset = ri * tree.num_outputs; const unsigned i_offset = i * tree.num_outputs; for (unsigned j = 0; j < tree.num_outputs; ++j) { if ((left_weight == 0) && (right_weight == 0)) { tree.values[i_offset + j] = 0.0; } else { const tfloat v = (left_weight * tree.values[li_offset + j] + right_weight * tree.values[ri_offset + j]) / (left_weight + right_weight); tree.values[i_offset + j] = v; } } max_depth = std::max(depth_left, depth_right) + 1; } if (depth == 0) tree.max_depth = max_depth; return max_depth; } inline void tree_shap(const TreeEnsemble& tree, const ExplanationDataset &data, tfloat *out_contribs, int condition, unsigned condition_feature) { // update the reference value with the expected value of the tree's predictions if (condition == 0) { for (unsigned j = 0; j < tree.num_outputs; ++j) { out_contribs[data.M * tree.num_outputs + j] += tree.values[j]; } } // Pre-allocate space for the unique path data const unsigned maxd = tree.max_depth + 2; // need a bit more space than the max depth PathElement *unique_path_data = new PathElement[(maxd * (maxd + 1)) / 2]; tree_shap_recursive( tree.num_outputs, tree.children_left, tree.children_right, tree.children_default, tree.features, tree.thresholds, tree.values, tree.node_sample_weights, data.X, data.X_missing, out_contribs, 0, 0, unique_path_data, 1, 1, -1, condition, condition_feature, 1 ); delete[] unique_path_data; } // extend our decision path with a fraction of one and zero extensions // update unique_path and pweights for the feature of the last split inline void extend_path_v1(PathElement *unique_path, tfloat *pweights, unsigned unique_depth, unsigned unique_depth_pweights, tfloat zero_fraction, tfloat one_fraction, int feature_index) { unique_path[unique_depth].feature_index = feature_index; unique_path[unique_depth].zero_fraction = zero_fraction; unique_path[unique_depth].one_fraction = one_fraction; if (one_fraction != 0) { // extend pweights iff the feature of the last split satisfies the threshold pweights[unique_depth_pweights] = (unique_depth_pweights == 0 ? 1.0f : 0.0f); for (int i = unique_depth_pweights - 1; i >= 0; i--) { pweights[i + 1] += pweights[i] * (i + 1) / static_cast<tfloat>(unique_depth + 1); pweights[i] *= zero_fraction * (unique_depth - i) / static_cast<tfloat>(unique_depth + 1); } } else { for (int i = unique_depth_pweights - 1; i >= 0; i--) { pweights[i] *= (unique_depth - i) / static_cast<tfloat>(unique_depth + 1); } } } // undo a previous extension of the decision path inline void unwind_path_v1(PathElement *unique_path, tfloat *pweights, unsigned unique_depth, unsigned unique_depth_pweights, unsigned path_index) { const tfloat one_fraction = unique_path[path_index].one_fraction; const tfloat zero_fraction = unique_path[path_index].zero_fraction; tfloat next_one_portion = pweights[unique_depth_pweights]; if (one_fraction != 0) { // shrink pweights iff the feature satisfies the threshold for (int i = unique_depth_pweights - 1; i >= 0; --i) { const tfloat tmp = pweights[i]; pweights[i] = next_one_portion * (unique_depth + 1) / static_cast<tfloat>(i + 1); next_one_portion = tmp - pweights[i] * zero_fraction * (unique_depth - i) / static_cast<tfloat>(unique_depth + 1); } } else { for (int i = unique_depth_pweights; i >= 0; --i) { pweights[i] *= (unique_depth + 1) / static_cast<tfloat>(unique_depth - i); } } for (unsigned i = path_index; i < unique_depth; ++i) { unique_path[i].feature_index = unique_path[i+1].feature_index; unique_path[i].zero_fraction = unique_path[i+1].zero_fraction; unique_path[i].one_fraction = unique_path[i+1].one_fraction; } } // determine what the total permuation weight would be if // we unwound a previous extension in the decision path (for feature satisfying the threshold) inline tfloat unwound_path_sum_v1(const PathElement *unique_path, const tfloat *pweights, unsigned unique_depth, unsigned unique_depth_pweights, unsigned path_index) { tfloat total = 0; const tfloat zero_fraction = unique_path[path_index].zero_fraction; tfloat next_one_portion = pweights[unique_depth_pweights]; for (int i = unique_depth_pweights - 1; i >= 0; --i) { const tfloat tmp = next_one_portion / static_cast<tfloat>(i + 1); total += tmp; next_one_portion = pweights[i] - tmp * zero_fraction * (unique_depth - i); } return total * (unique_depth + 1); } // determine what the total permuation weight would be if // we unwound a previous extension in the decision path (for features not satisfying the thresholds) inline tfloat unwound_path_sum_zero_v1(const tfloat *pweights, unsigned unique_depth, unsigned unique_depth_pweights) { tfloat total = 0; if (unique_depth > unique_depth_pweights) { for (int i = unique_depth_pweights; i >= 0; --i) { total += pweights[i] / static_cast<tfloat>(unique_depth - i); } } return total * (unique_depth + 1); } // recursive computation of SHAP values for a decision tree inline void tree_shap_recursive_v1(const unsigned num_outputs, const int *children_left, const int *children_right, const int *children_default, const int *features, const tfloat *thresholds, const tfloat *values, const tfloat *node_sample_weight, const tfloat *x, const bool *x_missing, tfloat *phi, unsigned node_index, unsigned unique_depth, unsigned unique_depth_pweights, PathElement *parent_unique_path, tfloat *parent_pweights, tfloat pweights_residual, tfloat parent_zero_fraction, tfloat parent_one_fraction, int parent_feature_index, int condition, unsigned condition_feature, tfloat condition_fraction) { // stop if we have no weight coming down to us if (condition_fraction == 0) return; // extend the unique path PathElement *unique_path = parent_unique_path + unique_depth + 1; std::copy(parent_unique_path, parent_unique_path + unique_depth + 1, unique_path); tfloat *pweights = parent_pweights + unique_depth_pweights + 1; std::copy(parent_pweights, parent_pweights + unique_depth_pweights + 1, pweights); if (condition == 0 || condition_feature != static_cast<unsigned>(parent_feature_index)) { extend_path_v1(unique_path, pweights, unique_depth, unique_depth_pweights, parent_zero_fraction, parent_one_fraction, parent_feature_index); // update pweights_residual iff the feature of the last split does not satisfy the threshold if (parent_one_fraction != 1) { pweights_residual *= parent_zero_fraction; unique_depth_pweights -= 1; } } const unsigned split_index = features[node_index]; // leaf node if (children_right[node_index] < 0) { const unsigned values_offset = node_index * num_outputs; unsigned values_nonzero_ind = 0; unsigned values_nonzero_count = 0; for (unsigned j = 0; j < num_outputs; ++j) { if (values[values_offset + j] != 0) { values_nonzero_ind = j; values_nonzero_count++; } } // pre-calculate w_zero for all features not satisfying the thresholds const tfloat w_zero = unwound_path_sum_zero_v1(pweights, unique_depth, unique_depth_pweights); const tfloat scale_zero = -w_zero * pweights_residual * condition_fraction; tfloat scale; for (unsigned i = 1; i <= unique_depth; ++i) { const PathElement &el = unique_path[i]; const unsigned phi_offset = el.feature_index * num_outputs; // update contributions to SHAP values for features satisfying the thresholds and not satisfying the thresholds separately if (el.one_fraction != 0) { const tfloat w = unwound_path_sum_v1(unique_path, pweights, unique_depth, unique_depth_pweights, i); scale = w * pweights_residual * (1 - el.zero_fraction) * condition_fraction; } else { scale = scale_zero; } if (values_nonzero_count == 1) { phi[phi_offset + values_nonzero_ind] += scale * values[values_offset + values_nonzero_ind]; } else { for (unsigned j = 0; j < num_outputs; ++j) { phi[phi_offset + j] += scale * values[values_offset + j]; } } } // internal node } else { // find which branch is "hot" (meaning x would follow it) unsigned hot_index = 0; if (x_missing[split_index]) { hot_index = children_default[node_index]; } else if (x[split_index] <= thresholds[node_index]) { hot_index = children_left[node_index]; } else { hot_index = children_right[node_index]; } const unsigned cold_index = (static_cast<int>(hot_index) == children_left[node_index] ? children_right[node_index] : children_left[node_index]); const tfloat w = node_sample_weight[node_index]; const tfloat hot_zero_fraction = node_sample_weight[hot_index] / w; const tfloat cold_zero_fraction = node_sample_weight[cold_index] / w; tfloat incoming_zero_fraction = 1; tfloat incoming_one_fraction = 1; // see if we have already split on this feature, // if so we undo that split so we can redo it for this node unsigned path_index = 0; for (; path_index <= unique_depth; ++path_index) { if (static_cast<unsigned>(unique_path[path_index].feature_index) == split_index) break; } if (path_index != unique_depth + 1) { incoming_zero_fraction = unique_path[path_index].zero_fraction; incoming_one_fraction = unique_path[path_index].one_fraction; unwind_path_v1(unique_path, pweights, unique_depth, unique_depth_pweights, path_index); unique_depth -= 1; // update pweights_residual iff the duplicated feature does not satisfy the threshold if (incoming_one_fraction != 0.) { unique_depth_pweights -= 1; } else { pweights_residual /= incoming_zero_fraction; } } // divide up the condition_fraction among the recursive calls tfloat hot_condition_fraction = condition_fraction; tfloat cold_condition_fraction = condition_fraction; if (condition > 0 && split_index == condition_feature) { cold_condition_fraction = 0; unique_depth -= 1; unique_depth_pweights -= 1; } else if (condition < 0 && split_index == condition_feature) { hot_condition_fraction *= hot_zero_fraction; cold_condition_fraction *= cold_zero_fraction; unique_depth -= 1; unique_depth_pweights -= 1; } tree_shap_recursive_v1( num_outputs, children_left, children_right, children_default, features, thresholds, values, node_sample_weight, x, x_missing, phi, hot_index, unique_depth + 1, unique_depth_pweights + 1, unique_path, pweights, pweights_residual, hot_zero_fraction * incoming_zero_fraction, incoming_one_fraction, split_index, condition, condition_feature, hot_condition_fraction ); tree_shap_recursive_v1( num_outputs, children_left, children_right, children_default, features, thresholds, values, node_sample_weight, x, x_missing, phi, cold_index, unique_depth + 1, unique_depth_pweights + 1, unique_path, pweights, pweights_residual, cold_zero_fraction * incoming_zero_fraction, 0, split_index, condition, condition_feature, cold_condition_fraction ); } } inline void tree_shap_v1(const TreeEnsemble& tree, const ExplanationDataset &data, tfloat *out_contribs, int condition, unsigned condition_feature) { // update the reference value with the expected value of the tree's predictions if (condition == 0) { for (unsigned j = 0; j < tree.num_outputs; ++j) { out_contribs[data.M * tree.num_outputs + j] += tree.values[j]; } } // pre-allocate space for the unique path data and pweights const unsigned maxd = tree.max_depth + 2; // need a bit more space than the max depth PathElement *unique_path_data = new PathElement[(maxd * (maxd + 1)) / 2]; tfloat *pweights = new tfloat[(maxd * (maxd + 1)) / 2]; tree_shap_recursive_v1( tree.num_outputs, tree.children_left, tree.children_right, tree.children_default, tree.features, tree.thresholds, tree.values, tree.node_sample_weights, data.X, data.X_missing, out_contribs, 0, 0, 0, unique_path_data, pweights, 1, 1, 1, -1, condition, condition_feature, 1 ); delete[] unique_path_data; delete[] pweights; } // recursive computation of combination_sum (matrix S) for a decision tree inline void compute_combination_sum_recursive_v2(const int *children_left, const int *children_right, const int *features, const tfloat *node_sample_weight, const int max_depth, tfloat *combination_sum, int *duplicated_node, unsigned node_index, unsigned unique_depth, int *parent_unique_depth_pweights, PathElement *parent_unique_path, tfloat *parent_pweights, tfloat parent_zero_fraction, int parent_feature_index, int *leaf_count) { // extend the unique path PathElement *unique_path = parent_unique_path + unique_depth; std::copy(parent_unique_path, parent_unique_path + unique_depth, unique_path); unique_path[unique_depth].feature_index = parent_feature_index; unique_path[unique_depth].zero_fraction = parent_zero_fraction; unsigned l; int *unique_depth_pweights; tfloat *pweights; tfloat *t_pweights; // extend pweights and update unique_depth_pweights if (unique_depth == 0) { l = 1; unique_depth_pweights = parent_unique_depth_pweights; unique_depth_pweights[0] = 0; pweights = parent_pweights; pweights[0] = 1.0f; } else { l = static_cast<int>(1 << (unique_depth - 1)); unique_depth_pweights = parent_unique_depth_pweights + l; std::copy(parent_unique_depth_pweights, parent_unique_depth_pweights + l, unique_depth_pweights); std::copy(parent_unique_depth_pweights, parent_unique_depth_pweights + l, unique_depth_pweights + l); pweights = parent_pweights + l * (max_depth + 1); std::copy(parent_pweights, parent_pweights + l * (max_depth + 1), pweights); std::copy(parent_pweights, parent_pweights + l * (max_depth + 1), pweights + l * (max_depth + 1)); for (unsigned t = 0; t < l; t++) { t_pweights = pweights + t * (max_depth + 1); for (int i = unique_depth_pweights[t] - 1; i >= 0; i--) { t_pweights[i] *= (unique_depth - i) / static_cast<tfloat>(unique_depth + 1); } unique_depth_pweights[t] -= 1; } for (unsigned t = l; t < 2 * l; t++) { t_pweights = pweights + t * (max_depth + 1); t_pweights[unique_depth_pweights[t]] = 0.0f; for (int i = unique_depth_pweights[t] - 1; i >= 0; i--) { t_pweights[i + 1] += t_pweights[i] * (i + 1) / static_cast<tfloat>(unique_depth + 1); t_pweights[i] *= parent_zero_fraction * (unique_depth - i) / static_cast<tfloat>(unique_depth + 1); } } } const unsigned split_index = features[node_index]; // leaf node if (children_right[node_index] < 0) { // calculate one row of combination_sum for the current path tfloat *leaf_combination_sum = combination_sum + leaf_count[0] * static_cast<int>(1 << max_depth); for (unsigned t = 0; t < 2 * l - 1; t++) { leaf_combination_sum[t] = 0; t_pweights = pweights + t * (max_depth + 1); for (int i = unique_depth_pweights[t]; i >= 0; i--) { leaf_combination_sum[t] += t_pweights[i] / static_cast<tfloat>(unique_depth - i); } leaf_combination_sum[t] *= (unique_depth + 1); } leaf_count[0] += 1; // internal node } else { const unsigned left_index = children_left[node_index]; const unsigned right_index = children_right[node_index]; const tfloat w = node_sample_weight[node_index]; const tfloat left_zero_fraction = node_sample_weight[left_index] / w; const tfloat right_zero_fraction = node_sample_weight[right_index] / w; tfloat incoming_zero_fraction = 1; // see if we have already split on this feature, // if so we undo that split so we can redo it for this node unsigned path_index = 0; for (; path_index <= unique_depth; ++path_index) { if (static_cast<unsigned>(unique_path[path_index].feature_index) == split_index) break; } if (path_index != unique_depth + 1) { duplicated_node[node_index] = path_index; // record node index of duplicated feature incoming_zero_fraction = unique_path[path_index].zero_fraction; // shrink pweights and unique_path, and update unique_depth_pweights, given the duplicated feature unsigned p = static_cast<int>(1 << (path_index - 1)); unsigned t = 0; tfloat *k_pweights; for (unsigned j = 0; j < 2 * l; j += 2 * p) { for (unsigned k = j; k < j + p; k++) { t_pweights = pweights + t * (max_depth + 1); k_pweights = pweights + k * (max_depth + 1); for (int i = unique_depth_pweights[k]; i >= 0; i--) { t_pweights[i] = k_pweights[i] * (unique_depth + 1) / static_cast<tfloat>(unique_depth - i); } unique_depth_pweights[t] = unique_depth_pweights[k]; t += 1; } } for (unsigned i = path_index; i < unique_depth; ++i) { unique_path[i].feature_index = unique_path[i + 1].feature_index; unique_path[i].zero_fraction = unique_path[i + 1].zero_fraction; } unique_depth -= 1; } else { duplicated_node[node_index] = -1; } for (unsigned t = 0; t < 2 * l; t++) { unique_depth_pweights[t] += 1; } compute_combination_sum_recursive_v2( children_left, children_right, features, node_sample_weight, max_depth, combination_sum, duplicated_node, left_index, unique_depth + 1, unique_depth_pweights, unique_path, pweights, incoming_zero_fraction * left_zero_fraction, split_index, leaf_count ); compute_combination_sum_recursive_v2( children_left, children_right, features, node_sample_weight, max_depth, combination_sum, duplicated_node, right_index, unique_depth + 1, unique_depth_pweights, unique_path, pweights, incoming_zero_fraction * right_zero_fraction, split_index, leaf_count ); } } // computation of combination_sum (matrix S) for a decision tree inline void compute_combination_sum_v2(const TreeEnsemble& tree, tfloat *combination_sum, int *duplicated_node) { // Pre-allocate space for the unique path data, pweights and unique_depth_pweights const unsigned max_combinations = static_cast<int>(1 << tree.max_depth); int *unique_depth_pweights = new int[2 * max_combinations]; tfloat *pweights = new tfloat[2 * max_combinations * (tree.max_depth + 1)]; PathElement *unique_path_data = new PathElement[(tree.max_depth + 1) * (tree.max_depth + 2) / 2]; int *leaf_count = new int[1]; leaf_count[0] = 0; compute_combination_sum_recursive_v2( tree.children_left, tree.children_right, tree.features, tree.node_sample_weights, tree.max_depth, combination_sum, duplicated_node, 0, 0, unique_depth_pweights, unique_path_data, pweights, 1, -1, leaf_count ); delete[] unique_depth_pweights; delete[] pweights; delete[] unique_path_data; delete[] leaf_count; } // recursive computation of SHAP values for a decision tree inline void tree_shap_recursive_v2(const unsigned num_outputs, const int *children_left, const int *children_right, const int *children_default, const int *features, const tfloat *thresholds, const tfloat *values, const tfloat *node_sample_weight, const int max_depth, const tfloat *combination_sum, const int *duplicated_node, const tfloat *x, const bool *x_missing, tfloat *phi, unsigned node_index, unsigned unique_depth, PathElement *parent_unique_path, tfloat pweights_residual, tfloat parent_zero_fraction, tfloat parent_one_fraction, int parent_feature_index, int *leaf_count) { // extend the unique path PathElement *unique_path = parent_unique_path + unique_depth; std::copy(parent_unique_path, parent_unique_path + unique_depth, unique_path); unique_path[unique_depth].feature_index = parent_feature_index; unique_path[unique_depth].zero_fraction = parent_zero_fraction; unique_path[unique_depth].one_fraction = parent_one_fraction; // update pweights_residual iff the feature of the last split does not satisfy the threshold if (parent_one_fraction != 1) { pweights_residual *= parent_zero_fraction; } const unsigned split_index = features[node_index]; // leaf node if (children_right[node_index] < 0) { const tfloat *leaf_combination_sum = combination_sum + leaf_count[0] * (1 << max_depth); // use combination_sum_ind to search in the row of combination_sum corresponding to the current path unsigned combination_sum_ind = 0; for (unsigned i = 1; i <= unique_depth; ++i) { if (unique_path[i].one_fraction != 0) { combination_sum_ind += 1 << (i - 1); } } // update contributions to SHAP values for features satisfying the thresholds and not satisfying the thresholds separately const unsigned values_offset = node_index * num_outputs; unsigned values_nonzero_ind = 0; unsigned values_nonzero_count = 0; for (unsigned j = 0; j < num_outputs; ++j) { if (values[values_offset + j] != 0) { values_nonzero_ind = j; values_nonzero_count++; } } const tfloat scale_zero = -leaf_combination_sum[combination_sum_ind] * pweights_residual; for (unsigned i = 1; i <= unique_depth; ++i) { const PathElement &el = unique_path[i]; const unsigned phi_offset = el.feature_index * num_outputs; const tfloat scale = (el.one_fraction != 0) ? leaf_combination_sum[combination_sum_ind - (1 << (i - 1))] * \ pweights_residual * (1 - el.zero_fraction) : scale_zero; if (values_nonzero_count == 1) { phi[phi_offset + values_nonzero_ind] += scale * values[values_offset + values_nonzero_ind]; } else { for (unsigned j = 0; j < num_outputs; ++j) { phi[phi_offset + j] += scale * values[values_offset + j]; } } } leaf_count[0] += 1; // internal node } else { const unsigned left_index = children_left[node_index]; const unsigned right_index = children_right[node_index]; const tfloat w = node_sample_weight[node_index]; const tfloat left_zero_fraction = node_sample_weight[left_index] / w; const tfloat right_zero_fraction = node_sample_weight[right_index] / w; tfloat incoming_zero_fraction = 1; tfloat incoming_one_fraction = 1; // see if we have already split on this feature, // if so we undo that split so we can redo it for this node const int path_index = duplicated_node[node_index]; if (path_index >= 0) { incoming_zero_fraction = unique_path[path_index].zero_fraction; incoming_one_fraction = unique_path[path_index].one_fraction; for (unsigned i = path_index; i < unique_depth; ++i) { unique_path[i].feature_index = unique_path[i + 1].feature_index; unique_path[i].zero_fraction = unique_path[i + 1].zero_fraction; unique_path[i].one_fraction = unique_path[i + 1].one_fraction; } unique_depth -= 1; // update pweights_residual iff the duplicated feature does not satisfy the threshold if (incoming_one_fraction != 1.) { pweights_residual /= incoming_zero_fraction; } } tree_shap_recursive_v2( num_outputs, children_left, children_right, children_default, features, thresholds, values, node_sample_weight, max_depth, combination_sum, duplicated_node, x, x_missing, phi, left_index, unique_depth + 1, unique_path, pweights_residual, left_zero_fraction * incoming_zero_fraction, incoming_one_fraction * int(x[split_index] <= thresholds[node_index]), split_index, leaf_count ); tree_shap_recursive_v2( num_outputs, children_left, children_right, children_default, features, thresholds, values, node_sample_weight, max_depth, combination_sum, duplicated_node, x, x_missing, phi, right_index, unique_depth + 1, unique_path, pweights_residual, right_zero_fraction * incoming_zero_fraction, incoming_one_fraction * int(x[split_index] > thresholds[node_index]), split_index, leaf_count ); } } inline void tree_shap_v2(const TreeEnsemble& tree, const tfloat *combination_sum, const int *duplicated_node, const ExplanationDataset &data, tfloat *out_contribs) { // update the reference value with the expected value of the tree's predictions for (unsigned j = 0; j < tree.num_outputs; ++j) { out_contribs[data.M * tree.num_outputs + j] += tree.values[j]; } // pre-allocate space for the unique path data PathElement *unique_path_data = new PathElement[(tree.max_depth + 1) * (tree.max_depth + 2) / 2]; int *leaf_count = new int[1]; leaf_count[0] = 0; tree_shap_recursive_v2( tree.num_outputs, tree.children_left, tree.children_right, tree.children_default, tree.features, tree.thresholds, tree.values, tree.node_sample_weights, tree.max_depth, combination_sum, duplicated_node, data.X, data.X_missing, out_contribs, 0, 0, unique_path_data, 1, 1, 1, -1, leaf_count ); delete[] unique_path_data; delete[] leaf_count; } inline unsigned build_merged_tree_recursive(TreeEnsemble &out_tree, const TreeEnsemble &trees, const tfloat *data, const bool *data_missing, int *data_inds, const unsigned num_background_data_inds, unsigned num_data_inds, unsigned M, unsigned row = 0, unsigned i = 0, unsigned pos = 0, tfloat *leaf_value = NULL) { //tfloat new_leaf_value[trees.num_outputs]; tfloat *new_leaf_value = (tfloat *) alloca(sizeof(tfloat) * trees.num_outputs); // allocate on the stack unsigned row_offset = row * trees.max_nodes; // we have hit a terminal leaf!!! if (trees.children_left[row_offset + i] < 0 && row + 1 == trees.tree_limit) { // create the leaf node const tfloat *vals = trees.values + (row * trees.max_nodes + i) * trees.num_outputs; if (leaf_value == NULL) { for (unsigned j = 0; j < trees.num_outputs; ++j) { out_tree.values[pos * trees.num_outputs + j] = vals[j]; } } else { for (unsigned j = 0; j < trees.num_outputs; ++j) { out_tree.values[pos * trees.num_outputs + j] = leaf_value[j] + vals[j]; } } out_tree.children_left[pos] = -1; out_tree.children_right[pos] = -1; out_tree.children_default[pos] = -1; out_tree.features[pos] = -1; out_tree.thresholds[pos] = 0; out_tree.node_sample_weights[pos] = num_background_data_inds; return pos; } // we hit an intermediate leaf (so just add the value to our accumulator and move to the next tree) if (trees.children_left[row_offset + i] < 0) { // accumulate the value of this original leaf so it will land on all eventual terminal leaves const tfloat *vals = trees.values + (row * trees.max_nodes + i) * trees.num_outputs; if (leaf_value == NULL) { for (unsigned j = 0; j < trees.num_outputs; ++j) { new_leaf_value[j] = vals[j]; } } else { for (unsigned j = 0; j < trees.num_outputs; ++j) { new_leaf_value[j] = leaf_value[j] + vals[j]; } } leaf_value = new_leaf_value; // move forward to the next tree row += 1; row_offset += trees.max_nodes; i = 0; } // split the data inds by this node's threshold const tfloat t = trees.thresholds[row_offset + i]; const int f = trees.features[row_offset + i]; const bool right_default = trees.children_default[row_offset + i] == trees.children_right[row_offset + i]; int low_ptr = 0; int high_ptr = num_data_inds - 1; unsigned num_left_background_data_inds = 0; int low_data_ind; while (low_ptr <= high_ptr) { low_data_ind = data_inds[low_ptr]; const int data_ind = std::abs(low_data_ind) * M + f; const bool is_missing = data_missing[data_ind]; if ((!is_missing && data[data_ind] > t) || (right_default && is_missing)) { data_inds[low_ptr] = data_inds[high_ptr]; data_inds[high_ptr] = low_data_ind; high_ptr -= 1; } else { if (low_data_ind >= 0) ++num_left_background_data_inds; // negative data_inds are not background samples low_ptr += 1; } } int *left_data_inds = data_inds; const unsigned num_left_data_inds = low_ptr; int *right_data_inds = data_inds + low_ptr; const unsigned num_right_data_inds = num_data_inds - num_left_data_inds; const unsigned num_right_background_data_inds = num_background_data_inds - num_left_background_data_inds; // all the data went right, so we skip creating this node and just recurse right if (num_left_data_inds == 0) { return build_merged_tree_recursive( out_tree, trees, data, data_missing, data_inds, num_background_data_inds, num_data_inds, M, row, trees.children_right[row_offset + i], pos, leaf_value ); // all the data went left, so we skip creating this node and just recurse left } else if (num_right_data_inds == 0) { return build_merged_tree_recursive( out_tree, trees, data, data_missing, data_inds, num_background_data_inds, num_data_inds, M, row, trees.children_left[row_offset + i], pos, leaf_value ); // data went both ways so we create this node and recurse down both paths } else { // build the left subtree const unsigned new_pos = build_merged_tree_recursive( out_tree, trees, data, data_missing, left_data_inds, num_left_background_data_inds, num_left_data_inds, M, row, trees.children_left[row_offset + i], pos + 1, leaf_value ); // fill in the data for this node out_tree.children_left[pos] = pos + 1; out_tree.children_right[pos] = new_pos + 1; if (trees.children_left[row_offset + i] == trees.children_default[row_offset + i]) { out_tree.children_default[pos] = pos + 1; } else { out_tree.children_default[pos] = new_pos + 1; } out_tree.features[pos] = trees.features[row_offset + i]; out_tree.thresholds[pos] = trees.thresholds[row_offset + i]; out_tree.node_sample_weights[pos] = num_background_data_inds; // build the right subtree return build_merged_tree_recursive( out_tree, trees, data, data_missing, right_data_inds, num_right_background_data_inds, num_right_data_inds, M, row, trees.children_right[row_offset + i], new_pos + 1, leaf_value ); } } inline void build_merged_tree(TreeEnsemble &out_tree, const ExplanationDataset &data, const TreeEnsemble &trees) { // create a joint data matrix from both X and R matrices tfloat *joined_data = new tfloat[(data.num_X + data.num_R) * data.M]; std::copy(data.X, data.X + data.num_X * data.M, joined_data); std::copy(data.R, data.R + data.num_R * data.M, joined_data + data.num_X * data.M); bool *joined_data_missing = new bool[(data.num_X + data.num_R) * data.M]; std::copy(data.X_missing, data.X_missing + data.num_X * data.M, joined_data_missing); std::copy(data.R_missing, data.R_missing + data.num_R * data.M, joined_data_missing + data.num_X * data.M); // create an starting array of data indexes we will recursively sort int *data_inds = new int[data.num_X + data.num_R]; for (unsigned i = 0; i < data.num_X; ++i) data_inds[i] = i; for (unsigned i = data.num_X; i < data.num_X + data.num_R; ++i) { data_inds[i] = -i; // a negative index means it won't be recorded as a background sample } build_merged_tree_recursive( out_tree, trees, joined_data, joined_data_missing, data_inds, data.num_R, data.num_X + data.num_R, data.M ); delete[] joined_data; delete[] joined_data_missing; delete[] data_inds; } // Independent Tree SHAP functions below here // ------------------------------------------ struct Node { short cl, cr, cd, pnode, feat, pfeat; // uint_16 float thres, value; char from_flag; }; #define FROM_NEITHER 0 #define FROM_X_NOT_R 1 #define FROM_R_NOT_X 2 // https://www.geeksforgeeks.org/space-and-time-efficient-binomial-coefficient/ inline int bin_coeff(int n, int k) { int res = 1; if (k > n - k) k = n - k; for (int i = 0; i < k; ++i) { res *= (n - i); res /= (i + 1); } return res; } // note this only handles single output models, so multi-output models get explained using multiple passes inline void tree_shap_indep(const unsigned max_depth, const unsigned num_feats, const unsigned num_nodes, const tfloat *x, const bool *x_missing, const tfloat *r, const bool *r_missing, tfloat *out_contribs, float *pos_lst, float *neg_lst, signed short *feat_hist, float *memoized_weights, int *node_stack, Node *mytree) { // const bool DEBUG = true; // ofstream myfile; // if (DEBUG) { // myfile.open ("/homes/gws/hughchen/shap/out.txt",fstream::app); // myfile << "Entering tree_shap_indep\n"; // } int ns_ctr = 0; std::fill_n(feat_hist, num_feats, 0); short node = 0, feat, cl, cr, cd, pnode, pfeat = -1; short next_xnode = -1, next_rnode = -1; short next_node = -1, from_child = -1; float thres, pos_x = 0, neg_x = 0, pos_r = 0, neg_r = 0; char from_flag; unsigned M = 0, N = 0; Node curr_node = mytree[node]; feat = curr_node.feat; thres = curr_node.thres; cl = curr_node.cl; cr = curr_node.cr; cd = curr_node.cd; // short circut when this is a stump tree (with no splits) if (cl < 0) { out_contribs[num_feats] += curr_node.value; return; } // if (DEBUG) { // myfile << "\nNode: " << node << "\n"; // myfile << "x[feat]: " << x[feat] << ", r[feat]: " << r[feat] << "\n"; // myfile << "thres: " << thres << "\n"; // } if (x_missing[feat]) { next_xnode = cd; } else if (x[feat] > thres) { next_xnode = cr; } else if (x[feat] <= thres) { next_xnode = cl; } if (r_missing[feat]) { next_rnode = cd; } else if (r[feat] > thres) { next_rnode = cr; } else if (r[feat] <= thres) { next_rnode = cl; } if (next_xnode != next_rnode) { mytree[next_xnode].from_flag = FROM_X_NOT_R; mytree[next_rnode].from_flag = FROM_R_NOT_X; } else { mytree[next_xnode].from_flag = FROM_NEITHER; } // Check if x and r go the same way if (next_xnode == next_rnode) { next_node = next_xnode; } // If not, go left if (next_node < 0) { next_node = cl; if (next_rnode == next_node) { // rpath N = N+1; feat_hist[feat] -= 1; } else if (next_xnode == next_node) { // xpath M = M+1; N = N+1; feat_hist[feat] += 1; } } node_stack[ns_ctr] = node; ns_ctr += 1; while (true) { node = next_node; curr_node = mytree[node]; feat = curr_node.feat; thres = curr_node.thres; cl = curr_node.cl; cr = curr_node.cr; cd = curr_node.cd; pnode = curr_node.pnode; pfeat = curr_node.pfeat; from_flag = curr_node.from_flag; // if (DEBUG) { // myfile << "\nNode: " << node << "\n"; // myfile << "N: " << N << ", M: " << M << "\n"; // myfile << "from_flag==FROM_X_NOT_R: " << (from_flag==FROM_X_NOT_R) << "\n"; // myfile << "from_flag==FROM_R_NOT_X: " << (from_flag==FROM_R_NOT_X) << "\n"; // myfile << "from_flag==FROM_NEITHER: " << (from_flag==FROM_NEITHER) << "\n"; // myfile << "feat_hist[feat]: " << feat_hist[feat] << "\n"; // } // At a leaf if (cl < 0) { // if (DEBUG) { // myfile << "At a leaf\n"; // } if (M == 0) { out_contribs[num_feats] += mytree[node].value; } // Currently assuming a single output if (N != 0) { if (M != 0) { pos_lst[node] = mytree[node].value * memoized_weights[N + max_depth * (M-1)]; } if (M != N) { neg_lst[node] = -mytree[node].value * memoized_weights[N + max_depth * M]; } } // if (DEBUG) { // myfile << "pos_lst[node]: " << pos_lst[node] << "\n"; // myfile << "neg_lst[node]: " << neg_lst[node] << "\n"; // } // Pop from node_stack ns_ctr -= 1; next_node = node_stack[ns_ctr]; from_child = node; // Unwind if (feat_hist[pfeat] > 0) { feat_hist[pfeat] -= 1; } else if (feat_hist[pfeat] < 0) { feat_hist[pfeat] += 1; } if (feat_hist[pfeat] == 0) { if (from_flag == FROM_X_NOT_R) { N = N-1; M = M-1; } else if (from_flag == FROM_R_NOT_X) { N = N-1; } } continue; } const bool x_right = x[feat] > thres; const bool r_right = r[feat] > thres; if (x_missing[feat]) { next_xnode = cd; } else if (x_right) { next_xnode = cr; } else if (!x_right) { next_xnode = cl; } if (r_missing[feat]) { next_rnode = cd; } else if (r_right) { next_rnode = cr; } else if (!r_right) { next_rnode = cl; } if (next_xnode >= 0) { if (next_xnode != next_rnode) { mytree[next_xnode].from_flag = FROM_X_NOT_R; mytree[next_rnode].from_flag = FROM_R_NOT_X; } else { mytree[next_xnode].from_flag = FROM_NEITHER; } } // Arriving at node from parent if (from_child == -1) { // if (DEBUG) { // myfile << "Arriving at node from parent\n"; // } node_stack[ns_ctr] = node; ns_ctr += 1; next_node = -1; // if (DEBUG) { // myfile << "feat_hist[feat]" << feat_hist[feat] << "\n"; // } // Feature is set upstream if (feat_hist[feat] > 0) { next_node = next_xnode; feat_hist[feat] += 1; } else if (feat_hist[feat] < 0) { next_node = next_rnode; feat_hist[feat] -= 1; } // x and r go the same way if (next_node < 0) { if (next_xnode == next_rnode) { next_node = next_xnode; } } // Go down one path if (next_node >= 0) { continue; } // Go down both paths, but go left first next_node = cl; if (next_rnode == next_node) { N = N+1; feat_hist[feat] -= 1; } else if (next_xnode == next_node) { M = M+1; N = N+1; feat_hist[feat] += 1; } from_child = -1; continue; } // Arriving at node from child if (from_child != -1) { // if (DEBUG) { // myfile << "Arriving at node from child\n"; // } next_node = -1; // Check if we should unroll immediately if ((next_rnode == next_xnode) || (feat_hist[feat] != 0)) { next_node = pnode; } // Came from a single path, so unroll if (next_node >= 0) { // if (DEBUG) { // myfile << "Came from a single path, so unroll\n"; // } // At the root node if (node == 0) { break; } // Update and unroll pos_lst[node] = pos_lst[from_child]; neg_lst[node] = neg_lst[from_child]; // if (DEBUG) { // myfile << "pos_lst[node]: " << pos_lst[node] << "\n"; // myfile << "neg_lst[node]: " << neg_lst[node] << "\n"; // } from_child = node; ns_ctr -= 1; // Unwind if (feat_hist[pfeat] > 0) { feat_hist[pfeat] -= 1; } else if (feat_hist[pfeat] < 0) { feat_hist[pfeat] += 1; } if (feat_hist[pfeat] == 0) { if (from_flag == FROM_X_NOT_R) { N = N-1; M = M-1; } else if (from_flag == FROM_R_NOT_X) { N = N-1; } } continue; // Go right - Arriving from the left child } else if (from_child == cl) { // if (DEBUG) { // myfile << "Go right - Arriving from the left child\n"; // } node_stack[ns_ctr] = node; ns_ctr += 1; next_node = cr; if (next_xnode == next_node) { M = M+1; N = N+1; feat_hist[feat] += 1; } else if (next_rnode == next_node) { N = N+1; feat_hist[feat] -= 1; } from_child = -1; continue; // Compute stuff and unroll - Arriving from the right child } else if (from_child == cr) { // if (DEBUG) { // myfile << "Compute stuff and unroll - Arriving from the right child\n"; // } pos_x = 0; neg_x = 0; pos_r = 0; neg_r = 0; if ((next_xnode == cr) && (next_rnode == cl)) { pos_x = pos_lst[cr]; neg_x = neg_lst[cr]; pos_r = pos_lst[cl]; neg_r = neg_lst[cl]; } else if ((next_xnode == cl) && (next_rnode == cr)) { pos_x = pos_lst[cl]; neg_x = neg_lst[cl]; pos_r = pos_lst[cr]; neg_r = neg_lst[cr]; } // out_contribs needs to have been initialized as all zeros // if (pos_x + neg_r != 0) { // std::cout << "val " << pos_x + neg_r << "\n"; // } out_contribs[feat] += pos_x + neg_r; pos_lst[node] = pos_x + pos_r; neg_lst[node] = neg_x + neg_r; // if (DEBUG) { // myfile << "out_contribs[feat]: " << out_contribs[feat] << "\n"; // myfile << "pos_lst[node]: " << pos_lst[node] << "\n"; // myfile << "neg_lst[node]: " << neg_lst[node] << "\n"; // } // Check if at root if (node == 0) { break; } // Pop ns_ctr -= 1; next_node = node_stack[ns_ctr]; from_child = node; // Unwind if (feat_hist[pfeat] > 0) { feat_hist[pfeat] -= 1; } else if (feat_hist[pfeat] < 0) { feat_hist[pfeat] += 1; } if (feat_hist[pfeat] == 0) { if (from_flag == FROM_X_NOT_R) { N = N-1; M = M-1; } else if (from_flag == FROM_R_NOT_X) { N = N-1; } } continue; } } } // if (DEBUG) { // myfile.close(); // } } inline void print_progress_bar(tfloat &last_print, tfloat start_time, unsigned i, unsigned total_count) { const tfloat elapsed_seconds = difftime(time(NULL), start_time); if (elapsed_seconds > 10 && elapsed_seconds - last_print > 0.5) { const tfloat fraction = static_cast<tfloat>(i) / total_count; const double total_seconds = elapsed_seconds / fraction; last_print = elapsed_seconds; PySys_WriteStderr( "\r%3.0f%%|%.*s%.*s| %d/%d [%02d:%02d<%02d:%02d] ", fraction * 100, int(0.5 + fraction*20), "===================", 20-int(0.5 + fraction*20), " ", i, total_count, int(elapsed_seconds/60), int(elapsed_seconds) % 60, int((total_seconds - elapsed_seconds)/60), int(total_seconds - elapsed_seconds) % 60 ); // Get handle to python stderr file and flush it (https://mail.python.org/pipermail/python-list/2004-November/294912.html) PyObject *pyStderr = PySys_GetObject("stderr"); if (pyStderr) { PyObject *result = PyObject_CallMethod(pyStderr, "flush", NULL); Py_XDECREF(result); } } } /** * Runs Tree SHAP with feature independence assumptions on dense data. */ inline void dense_independent(const TreeEnsemble& trees, const ExplanationDataset &data, tfloat *out_contribs, tfloat transform(const tfloat, const tfloat)) { // reformat the trees for faster access Node *node_trees = new Node[trees.tree_limit * trees.max_nodes]; for (unsigned i = 0; i < trees.tree_limit; ++i) { Node *node_tree = node_trees + i * trees.max_nodes; for (unsigned j = 0; j < trees.max_nodes; ++j) { const unsigned en_ind = i * trees.max_nodes + j; node_tree[j].cl = trees.children_left[en_ind]; node_tree[j].cr = trees.children_right[en_ind]; node_tree[j].cd = trees.children_default[en_ind]; if (j == 0) { node_tree[j].pnode = 0; } if (trees.children_left[en_ind] >= 0) { // relies on all unused entries having negative values in them node_tree[trees.children_left[en_ind]].pnode = j; node_tree[trees.children_left[en_ind]].pfeat = trees.features[en_ind]; } if (trees.children_right[en_ind] >= 0) { // relies on all unused entries having negative values in them node_tree[trees.children_right[en_ind]].pnode = j; node_tree[trees.children_right[en_ind]].pfeat = trees.features[en_ind]; } node_tree[j].thres = trees.thresholds[en_ind]; node_tree[j].feat = trees.features[en_ind]; } } // preallocate arrays needed by the algorithm float *pos_lst = new float[trees.max_nodes]; float *neg_lst = new float[trees.max_nodes]; int *node_stack = new int[(unsigned) trees.max_depth]; signed short *feat_hist = new signed short[data.M]; tfloat *tmp_out_contribs = new tfloat[(data.M + 1)]; // precompute all the weight coefficients float *memoized_weights = new float[(trees.max_depth+1) * (trees.max_depth+1)]; for (unsigned n = 0; n <= trees.max_depth; ++n) { for (unsigned m = 0; m <= trees.max_depth; ++m) { memoized_weights[n + trees.max_depth * m] = 1.0 / (n * bin_coeff(n-1, m)); } } // compute the explanations for each sample tfloat *instance_out_contribs; tfloat rescale_factor = 1.0; tfloat margin_x = 0; tfloat margin_r = 0; time_t start_time = time(NULL); tfloat last_print = 0; for (unsigned oind = 0; oind < trees.num_outputs; ++oind) { // set the values int he reformated tree to the current output index for (unsigned i = 0; i < trees.tree_limit; ++i) { Node *node_tree = node_trees + i * trees.max_nodes; for (unsigned j = 0; j < trees.max_nodes; ++j) { const unsigned en_ind = i * trees.max_nodes + j; node_tree[j].value = trees.values[en_ind * trees.num_outputs + oind]; } } // loop over all the samples for (unsigned i = 0; i < data.num_X; ++i) { const tfloat *x = data.X + i * data.M; const bool *x_missing = data.X_missing + i * data.M; instance_out_contribs = out_contribs + i * (data.M + 1) * trees.num_outputs; const tfloat y_i = data.y == NULL ? 0 : data.y[i]; print_progress_bar(last_print, start_time, oind * data.num_X + i, data.num_X * trees.num_outputs); // compute the model's margin output for x if (transform != NULL) { margin_x = trees.base_offset[oind]; for (unsigned k = 0; k < trees.tree_limit; ++k) { margin_x += tree_predict(k, trees, x, x_missing)[oind]; } } for (unsigned j = 0; j < data.num_R; ++j) { const tfloat *r = data.R + j * data.M; const bool *r_missing = data.R_missing + j * data.M; std::fill_n(tmp_out_contribs, (data.M + 1), 0); // compute the model's margin output for r if (transform != NULL) { margin_r = trees.base_offset[oind]; for (unsigned k = 0; k < trees.tree_limit; ++k) { margin_r += tree_predict(k, trees, r, r_missing)[oind]; } } for (unsigned k = 0; k < trees.tree_limit; ++k) { tree_shap_indep( trees.max_depth, data.M, trees.max_nodes, x, x_missing, r, r_missing, tmp_out_contribs, pos_lst, neg_lst, feat_hist, memoized_weights, node_stack, node_trees + k * trees.max_nodes ); } // compute the rescale factor if (transform != NULL) { if (margin_x == margin_r) { rescale_factor = 1.0; } else { rescale_factor = (*transform)(margin_x, y_i) - (*transform)(margin_r, y_i); rescale_factor /= margin_x - margin_r; } } // add the effect of the current reference to our running total // this is where we can do per reference scaling for non-linear transformations for (unsigned k = 0; k < data.M; ++k) { instance_out_contribs[k * trees.num_outputs + oind] += tmp_out_contribs[k] * rescale_factor; } // Add the base offset if (transform != NULL) { instance_out_contribs[data.M * trees.num_outputs + oind] += (*transform)(trees.base_offset[oind] + tmp_out_contribs[data.M], 0); } else { instance_out_contribs[data.M * trees.num_outputs + oind] += trees.base_offset[oind] + tmp_out_contribs[data.M]; } } // average the results over all the references. for (unsigned j = 0; j < (data.M + 1); ++j) { instance_out_contribs[j * trees.num_outputs + oind] /= data.num_R; } // apply the base offset to the bias term // for (unsigned j = 0; j < trees.num_outputs; ++j) { // instance_out_contribs[data.M * trees.num_outputs + j] += (*transform)(trees.base_offset[j], 0); // } } } delete[] tmp_out_contribs; delete[] node_trees; delete[] pos_lst; delete[] neg_lst; delete[] node_stack; delete[] feat_hist; delete[] memoized_weights; } /** * This calculates array for distributing threads evenly across trees (in terms of tree size) for algorithm v2 */ inline void tree_thread_v2(int *tree_thread, const unsigned int n_jobs, const unsigned int tree_limit) { unsigned t = 0; for (unsigned i = 0; i < n_jobs; ++i) { unsigned j = i; while (j < tree_limit) { tree_thread[t] = j; j += n_jobs; t++; } } } /** * This runs Tree SHAP with a per tree path conditional dependence assumption. */ inline void dense_tree_path_dependent(const TreeEnsemble& trees, const ExplanationDataset &data, tfloat *out_contribs, tfloat transform(const tfloat, const tfloat), const int algorithm, const int n_jobs) { tfloat *instance_out_contribs; TreeEnsemble tree; ExplanationDataset instance; // pre-define variables for algorithm v2 const unsigned max_leaves = (trees.max_nodes + 1) / 2; const unsigned max_combinations = static_cast<int>(1 << trees.max_depth); tfloat *combination_sum; int *duplicated_node; int *tree_thread; // dispatch to the correct algorithm version switch (algorithm) { case ALGORITHM::v0: // build explanation for each sample #pragma omp parallel for private(instance_out_contribs, tree, instance) num_threads(n_jobs) for (unsigned i = 0; i < data.num_X; ++i) { instance_out_contribs = out_contribs + i * (data.M + 1) * trees.num_outputs; data.get_x_instance(instance, i); // aggregate the effect of explaining each tree // (this works because of the linearity property of Shapley values) for (unsigned j = 0; j < trees.tree_limit; ++j) { trees.get_tree(tree, j); tree_shap(tree, instance, instance_out_contribs, 0, 0); } } return; case ALGORITHM::v1: // build explanation for each sample #pragma omp parallel for private(instance_out_contribs, tree, instance) num_threads(n_jobs) for (unsigned i = 0; i < data.num_X; ++i) { instance_out_contribs = out_contribs + i * (data.M + 1) * trees.num_outputs; data.get_x_instance(instance, i); // aggregate the effect of explaining each tree // (this works because of the linearity property of Shapley values) for (unsigned j = 0; j < trees.tree_limit; ++j) { trees.get_tree(tree, j); tree_shap_v1(tree, instance, instance_out_contribs, 0, 0); } } return; case ALGORITHM::v2_1: // pre-define variables for parallel computing tfloat *out_contribs_local; tfloat *instance_out_contribs_local; // array for distributing threads evenly across trees tree_thread = new int[trees.tree_limit]; tree_thread_v2(tree_thread, n_jobs, trees.tree_limit); // compute combination sum for each tree and aggregate the effect of explaining each tree // (this works because of the linearity property of Shapley values) #pragma omp parallel private(instance_out_contribs, tree, instance, combination_sum, duplicated_node, \ out_contribs_local, instance_out_contribs_local) num_threads(n_jobs) { out_contribs_local = new tfloat[data.num_X * (data.M + 1) * trees.num_outputs]; for (unsigned i = 0; i < data.num_X; ++i) { instance_out_contribs_local = out_contribs_local + i * (data.M + 1) * trees.num_outputs; for (unsigned k = 0; k < (data.M + 1) * trees.num_outputs; ++k) { instance_out_contribs_local[k] = 0; } } combination_sum = new tfloat[max_leaves * max_combinations]; duplicated_node = new int[trees.max_nodes]; #pragma omp for for (unsigned j = 0; j < trees.tree_limit; ++j) { trees.get_tree(tree, tree_thread[j]); compute_combination_sum_v2(tree, combination_sum, duplicated_node); // build explanation for each sample for (unsigned i = 0; i < data.num_X; ++i) { instance_out_contribs_local = out_contribs_local + i * (data.M + 1) * trees.num_outputs; data.get_x_instance(instance, i); tree_shap_v2(tree, combination_sum, duplicated_node, instance, instance_out_contribs_local); } } delete[] combination_sum; delete[] duplicated_node; #pragma omp critical for (unsigned i = 0; i < data.num_X; ++i) { instance_out_contribs = out_contribs + i * (data.M + 1) * trees.num_outputs; instance_out_contribs_local = out_contribs_local + i * (data.M + 1) * trees.num_outputs; for (unsigned k = 0; k < (data.M + 1) * trees.num_outputs; ++k) { instance_out_contribs[k] += instance_out_contribs_local[k]; } } delete[] out_contribs_local; } delete[] tree_thread; return; case ALGORITHM::v2_2: // pre-allocate space for combination sum and duplicated node combination_sum = new tfloat[max_leaves * max_combinations * trees.tree_limit]; duplicated_node = new int[trees.max_nodes * trees.tree_limit]; // pre-define variables for parallel computing tfloat *combination_sum_local; int *duplicated_node_local; // array for distributing threads evenly across trees tree_thread = new int[trees.tree_limit]; tree_thread_v2(tree_thread, n_jobs, trees.tree_limit); // compute combination sum for each tree #pragma omp parallel private(tree, combination_sum_local, duplicated_node_local) num_threads(n_jobs) { #pragma omp for for (unsigned j = 0; j < trees.tree_limit; ++j) { combination_sum_local = combination_sum + tree_thread[j] * max_leaves * max_combinations; duplicated_node_local = duplicated_node + tree_thread[j] * trees.max_nodes; trees.get_tree(tree, tree_thread[j]); compute_combination_sum_v2(tree, combination_sum_local, duplicated_node_local); } } // aggregate the effect of explaining each tree // (this works because of the linearity property of Shapley values) #pragma omp parallel private(instance_out_contribs, tree, instance, combination_sum_local, duplicated_node_local) num_threads(n_jobs) { #pragma omp for for (unsigned i = 0; i < data.num_X; ++i) { instance_out_contribs = out_contribs + i * (data.M + 1) * trees.num_outputs; data.get_x_instance(instance, i); for (unsigned j = 0; j < trees.tree_limit; ++j) { combination_sum_local = combination_sum + j * max_leaves * max_combinations; duplicated_node_local = duplicated_node + j * trees.max_nodes; trees.get_tree(tree, j); tree_shap_v2(tree, combination_sum_local, duplicated_node_local, instance, instance_out_contribs); } } } delete[] combination_sum; delete[] duplicated_node; delete[] tree_thread; return; } // apply the base offset to the bias term for (unsigned i = 0; i < data.num_X; ++i) { instance_out_contribs = out_contribs + i * (data.M + 1) * trees.num_outputs; for (unsigned j = 0; j < trees.num_outputs; ++j) { instance_out_contribs[data.M * trees.num_outputs + j] += trees.base_offset[j]; } } } // phi = np.zeros((self._current_X.shape[1] + 1, self._current_X.shape[1] + 1, self.n_outputs)) // phi_diag = np.zeros((self._current_X.shape[1] + 1, self.n_outputs)) // for t in range(self.tree_limit): // self.tree_shap(self.trees[t], self._current_X[i,:], self._current_x_missing, phi_diag) // for j in self.trees[t].unique_features: // phi_on = np.zeros((self._current_X.shape[1] + 1, self.n_outputs)) // phi_off = np.zeros((self._current_X.shape[1] + 1, self.n_outputs)) // self.tree_shap(self.trees[t], self._current_X[i,:], self._current_x_missing, phi_on, 1, j) // self.tree_shap(self.trees[t], self._current_X[i,:], self._current_x_missing, phi_off, -1, j) // phi[j] += np.true_divide(np.subtract(phi_on,phi_off),2.0) // phi_diag[j] -= np.sum(np.true_divide(np.subtract(phi_on,phi_off),2.0)) // for j in range(self._current_X.shape[1]+1): // phi[j][j] = phi_diag[j] // phi /= self.tree_limit // return phi inline void dense_tree_interactions_path_dependent(const TreeEnsemble& trees, const ExplanationDataset &data, tfloat *out_contribs, tfloat transform(const tfloat, const tfloat), const int algorithm, const int n_jobs) { // build a list of all the unique features in each tree int amount_of_unique_features = min(data.M, trees.max_nodes); int *unique_features = new int[trees.tree_limit * amount_of_unique_features]; std::fill(unique_features, unique_features + trees.tree_limit * amount_of_unique_features, -1); for (unsigned j = 0; j < trees.tree_limit; ++j) { const int *features_row = trees.features + j * trees.max_nodes; int *unique_features_row = unique_features + j * amount_of_unique_features; for (unsigned k = 0; k < trees.max_nodes; ++k) { for (unsigned l = 0; l < amount_of_unique_features; ++l) { if (features_row[k] == unique_features_row[l]) break; if (unique_features_row[l] < 0) { unique_features_row[l] = features_row[k]; break; } } } } // build an interaction explanation for each sample tfloat *instance_out_contribs; TreeEnsemble tree; ExplanationDataset instance; const unsigned contrib_row_size = (data.M + 1) * trees.num_outputs; tfloat *diag_contribs; tfloat *on_contribs; tfloat *off_contribs; // dispatch to the correct algorithm version switch (algorithm) { case ALGORITHM::v0: #pragma omp parallel private(instance_out_contribs, tree, instance, diag_contribs, on_contribs, off_contribs) num_threads(n_jobs) { diag_contribs = new tfloat[contrib_row_size]; on_contribs = new tfloat[contrib_row_size]; off_contribs = new tfloat[contrib_row_size]; #pragma omp for for (unsigned i = 0; i < data.num_X; ++i) { instance_out_contribs = out_contribs + i * (data.M + 1) * contrib_row_size; data.get_x_instance(instance, i); // aggregate the effect of explaining each tree // (this works because of the linearity property of Shapley values) std::fill(diag_contribs, diag_contribs + contrib_row_size, 0); for (unsigned j = 0; j < trees.tree_limit; ++j) { trees.get_tree(tree, j); tree_shap(tree, instance, diag_contribs, 0, 0); const int *unique_features_row = unique_features + j * amount_of_unique_features; for (unsigned k = 0; k < amount_of_unique_features; ++k) { const int ind = unique_features_row[k]; if (ind < 0) break; // < 0 means we have seen all the features for this tree // compute the shap value with this feature held on and off std::fill(on_contribs, on_contribs + contrib_row_size, 0); std::fill(off_contribs, off_contribs + contrib_row_size, 0); tree_shap(tree, instance, on_contribs, 1, ind); tree_shap(tree, instance, off_contribs, -1, ind); // save the difference between on and off as the interaction value for (unsigned l = 0; l < contrib_row_size; ++l) { const tfloat val = (on_contribs[l] - off_contribs[l]) / 2; instance_out_contribs[ind * contrib_row_size + l] += val; diag_contribs[l] -= val; } } } // set the diagonal for (unsigned j = 0; j < data.M + 1; ++j) { const unsigned offset = j * contrib_row_size + j * trees.num_outputs; for (unsigned k = 0; k < trees.num_outputs; ++k) { instance_out_contribs[offset + k] = diag_contribs[j * trees.num_outputs + k]; } } // apply the base offset to the bias term const unsigned last_ind = (data.M * (data.M + 1) + data.M) * trees.num_outputs; for (unsigned j = 0; j < trees.num_outputs; ++j) { instance_out_contribs[last_ind + j] += trees.base_offset[j]; } } delete[] diag_contribs; delete[] on_contribs; delete[] off_contribs; } return; case ALGORITHM::v1: #pragma omp parallel private(instance_out_contribs, tree, instance, diag_contribs, on_contribs, off_contribs) num_threads(n_jobs) { diag_contribs = new tfloat[contrib_row_size]; on_contribs = new tfloat[contrib_row_size]; off_contribs = new tfloat[contrib_row_size]; #pragma omp for for (unsigned i = 0; i < data.num_X; ++i) { instance_out_contribs = out_contribs + i * (data.M + 1) * contrib_row_size; data.get_x_instance(instance, i); // aggregate the effect of explaining each tree // (this works because of the linearity property of Shapley values) std::fill(diag_contribs, diag_contribs + contrib_row_size, 0); for (unsigned j = 0; j < trees.tree_limit; ++j) { trees.get_tree(tree, j); tree_shap_v1(tree, instance, diag_contribs, 0, 0); const int *unique_features_row = unique_features + j * amount_of_unique_features; for (unsigned k = 0; k < amount_of_unique_features; ++k) { const int ind = unique_features_row[k]; if (ind < 0) break; // < 0 means we have seen all the features for this tree // compute the shap value with this feature held on and off std::fill(on_contribs, on_contribs + contrib_row_size, 0); std::fill(off_contribs, off_contribs + contrib_row_size, 0); tree_shap_v1(tree, instance, on_contribs, 1, ind); tree_shap_v1(tree, instance, off_contribs, -1, ind); // save the difference between on and off as the interaction value for (unsigned l = 0; l < contrib_row_size; ++l) { const tfloat val = (on_contribs[l] - off_contribs[l]) / 2; instance_out_contribs[ind * contrib_row_size + l] += val; diag_contribs[l] -= val; } } } // set the diagonal for (unsigned j = 0; j < data.M + 1; ++j) { const unsigned offset = j * contrib_row_size + j * trees.num_outputs; for (unsigned k = 0; k < trees.num_outputs; ++k) { instance_out_contribs[offset + k] = diag_contribs[j * trees.num_outputs + k]; } } // apply the base offset to the bias term const unsigned last_ind = (data.M * (data.M + 1) + data.M) * trees.num_outputs; for (unsigned j = 0; j < trees.num_outputs; ++j) { instance_out_contribs[last_ind + j] += trees.base_offset[j]; } } delete[] diag_contribs; delete[] on_contribs; delete[] off_contribs; } return; case ALGORITHM::v2_1: std::cerr << "ALGORITHM::v2 does not support interactions!\n"; return; case ALGORITHM::v2_2: std::cerr << "ALGORITHM::v2 does not support interactions!\n"; return; } delete[] unique_features; } /** * This runs Tree SHAP with a global path conditional dependence assumption. * * By first merging all the trees in a tree ensemble into an equivalent single tree * this method allows arbitrary marginal transformations and also ensures that all the * evaluations of the model are consistent with some training data point. */ inline void dense_global_path_dependent(const TreeEnsemble& trees, const ExplanationDataset &data, tfloat *out_contribs, tfloat transform(const tfloat, const tfloat)) { // allocate space for our new merged tree (we save enough room to totally split all samples if need be) TreeEnsemble merged_tree; merged_tree.allocate(1, (data.num_X + data.num_R) * 2, trees.num_outputs); // collapse the ensemble of trees into a single tree that has the same behavior // for all the X and R samples in the dataset build_merged_tree(merged_tree, data, trees); // compute the expected value and depth of the new merged tree compute_expectations(merged_tree); // explain each sample using our new merged tree ExplanationDataset instance; tfloat *instance_out_contribs; for (unsigned i = 0; i < data.num_X; ++i) { instance_out_contribs = out_contribs + i * (data.M + 1) * trees.num_outputs; data.get_x_instance(instance, i); // since we now just have a single merged tree we can just use the tree_path_dependent algorithm tree_shap(merged_tree, instance, instance_out_contribs, 0, 0); // apply the base offset to the bias term for (unsigned j = 0; j < trees.num_outputs; ++j) { instance_out_contribs[data.M * trees.num_outputs + j] += trees.base_offset[j]; } } merged_tree.free(); } /** * The main method for computing Tree SHAP on models using dense data. */ inline void dense_tree_shap(const TreeEnsemble& trees, const ExplanationDataset &data, tfloat *out_contribs, const int feature_dependence, unsigned model_transform, const int algorithm, const int n_jobs, bool interactions) { // see what transform (if any) we have transform_f transform = get_transform(model_transform); // dispatch to the correct algorithm handler switch (feature_dependence) { case FEATURE_DEPENDENCE::independent: if (interactions) { std::cerr << "FEATURE_DEPENDENCE::independent does not support interactions!\n"; } else dense_independent(trees, data, out_contribs, transform); return; case FEATURE_DEPENDENCE::tree_path_dependent: if (interactions) dense_tree_interactions_path_dependent(trees, data, out_contribs, transform, algorithm, n_jobs); else dense_tree_path_dependent(trees, data, out_contribs, transform, algorithm, n_jobs); return; case FEATURE_DEPENDENCE::global_path_dependent: if (interactions) { std::cerr << "FEATURE_DEPENDENCE::global_path_dependent does not support interactions!\n"; } else dense_global_path_dependent(trees, data, out_contribs, transform); return; } }
task_types_serialized.c
// RUN: %libomp-compile-and-run | FileCheck %s // REQUIRES: ompt #include "callback.h" #include <omp.h> __attribute__ ((noinline)) // workaround for bug in icc void print_task_type(int id) { #pragma omp critical { int task_type; char buffer[2048]; ompt_get_task_info(0, &task_type, NULL, NULL, NULL, NULL); format_task_type(task_type, buffer); printf("%" PRIu64 ": id=%d task_type=%s=%d\n", ompt_get_thread_data()->value, id, buffer, task_type); } }; int main() { //initial task print_task_type(0); int x; //implicit task #pragma omp parallel num_threads(1) { print_task_type(1); x++; } #pragma omp parallel num_threads(1) #pragma omp master { //explicit task #pragma omp task { print_task_type(2); x++; } //explicit task with undeferred #pragma omp task if(0) { print_task_type(3); x++; } //explicit task with untied #pragma omp task untied { print_task_type(4); x++; } //explicit task with final #pragma omp task final(1) { print_task_type(5); x++; //nested explicit task with final and undeferred #pragma omp task { print_task_type(6); x++; } } /* //TODO:not working //explicit task with mergeable #pragma omp task mergeable { print_task_type(7); x++; } */ //TODO: merged task } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task' // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_initial_task_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, actual_parallelism=1, index=1, flags=1 // CHECK: {{^}}[[MASTER_ID]]: id=0 task_type=ompt_task_initial=1 // CHECK: {{^}}[[MASTER_ID]]: id=1 task_type=ompt_task_implicit|ompt_task_undeferred=134217730 // CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_undeferred=134217732, has_dependences=no // CHECK: {{^[0-9]+}}: id=2 task_type=ompt_task_explicit|ompt_task_undeferred=134217732 // CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_undeferred=134217732, has_dependences=no // CHECK: {{^[0-9]+}}: id=3 task_type=ompt_task_explicit|ompt_task_undeferred=134217732 // CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_undeferred|ompt_task_untied=402653188, has_dependences=no // CHECK: {{^[0-9]+}}: id=4 task_type=ompt_task_explicit|ompt_task_undeferred|ompt_task_untied=402653188 // CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_undeferred|ompt_task_final=671088644, has_dependences=no // CHECK: {{^[0-9]+}}: id=5 task_type=ompt_task_explicit|ompt_task_undeferred|ompt_task_final=671088644 // CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_undeferred|ompt_task_final=671088644, has_dependences=no // CHECK: {{^[0-9]+}}: id=6 task_type=ompt_task_explicit|ompt_task_undeferred|ompt_task_final=671088644 // ___CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_undeferred=134217732, has_dependences=no // ___CHECK: {{^[0-9]+}}: id=7 task_type=ompt_task_explicit|ompt_task_undeferred=134217732 return 0; }
channel.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC H H AAA N N N N EEEEE L % % C H H A A NN N NN N E L % % C HHHHH AAAAA N N N N N N EEE L % % C H H A A N NN N NN E L % % CCCC H H A A N N N N EEEEE LLLLL % % % % % % MagickCore Image Channel Methods % % % % Software Design % % Cristy % % December 2003 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/cache-private.h" #include "MagickCore/channel.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/enhance.h" #include "MagickCore/image.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a n n e l F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChannelFxImage() applies a channel expression to the specified image. The % expression consists of one or more channels, either mnemonic or numeric (e.g. % red, 1), separated by actions as follows: % % <=> exchange two channels (e.g. red<=>blue) % => copy one channel to another channel (e.g. red=>green) % = assign a constant value to a channel (e.g. red=50%) % , write new image channels in the specified order (e.g. red, green) % | add a new output image for the next set of channel operations % ; move to the next input image for the source of channel data % % For example, to create 3 grayscale images from the red, green, and blue % channels of an image, use: % % -channel-fx "red; green; blue" % % A channel without an operation symbol implies separate (i.e, semicolon). % % The format of the ChannelFxImage method is: % % Image *ChannelFxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o expression: A channel expression. % % o exception: return any errors or warnings in this structure. % */ typedef enum { ExtractChannelOp, AssignChannelOp, ExchangeChannelOp, TransferChannelOp } ChannelFx; static MagickBooleanType ChannelImage(Image *destination_image, const PixelChannel destination_channel,const ChannelFx channel_op, const Image *source_image,const PixelChannel source_channel, const Quantum pixel,ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; size_t height, width; ssize_t y; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); destination_view=AcquireAuthenticCacheView(destination_image,exception); height=MagickMin(source_image->rows,destination_image->rows); width=MagickMin(source_image->columns,destination_image->columns); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=0; y < (ssize_t) height; y++) { PixelTrait destination_traits, source_traits; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(destination_view,0,y, destination_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } destination_traits=GetPixelChannelTraits(destination_image, destination_channel); source_traits=GetPixelChannelTraits(source_image,source_channel); if ((destination_traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; for (x=0; x < (ssize_t) width; x++) { if (channel_op == AssignChannelOp) SetPixelChannel(destination_image,destination_channel,pixel,q); else SetPixelChannel(destination_image,destination_channel, GetPixelChannel(source_image,source_channel,p),q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(destination_image); } if (SyncCacheViewAuthenticPixels(destination_view,exception) == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *ChannelFxImage(const Image *image,const char *expression, ExceptionInfo *exception) { #define ChannelFxImageTag "ChannelFx/Image" ChannelFx channel_op; ChannelType channel_mask; char token[MagickPathExtent]; const char *p; const Image *source_image; double pixel; Image *destination_image; MagickBooleanType status; PixelChannel source_channel, destination_channel; ssize_t channels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); source_image=image; destination_image=CloneImage(source_image,0,0,MagickTrue,exception); if (destination_image == (Image *) NULL) return((Image *) NULL); if (expression == (const char *) NULL) return(destination_image); status=SetImageStorageClass(destination_image,DirectClass,exception); if (status == MagickFalse) { destination_image=GetLastImageInList(destination_image); return((Image *) NULL); } destination_channel=RedPixelChannel; channel_mask=UndefinedChannel; pixel=0.0; p=(char *) expression; (void) GetNextToken(p,&p,MagickPathExtent,token); channel_op=ExtractChannelOp; for (channels=0; *token != '\0'; ) { ssize_t i; /* Interpret channel expression. */ switch (*token) { case ',': { (void) GetNextToken(p,&p,MagickPathExtent,token); break; } case '|': { if (GetNextImageInList(source_image) != (Image *) NULL) source_image=GetNextImageInList(source_image); else source_image=GetFirstImageInList(source_image); (void) GetNextToken(p,&p,MagickPathExtent,token); break; } case ';': { Image *canvas; (void) SetPixelChannelMask(destination_image,channel_mask); if ((channel_op == ExtractChannelOp) && (channels == 1)) { (void) SetPixelMetaChannels(destination_image,0,exception); (void) SetImageColorspace(destination_image,GRAYColorspace, exception); } canvas=CloneImage(source_image,0,0,MagickTrue,exception); if (canvas == (Image *) NULL) { destination_image=DestroyImageList(destination_image); return(destination_image); } AppendImageToList(&destination_image,canvas); destination_image=GetLastImageInList(destination_image); status=SetImageStorageClass(destination_image,DirectClass,exception); if (status == MagickFalse) { destination_image=GetLastImageInList(destination_image); return((Image *) NULL); } (void) GetNextToken(p,&p,MagickPathExtent,token); channels=0; destination_channel=RedPixelChannel; channel_mask=UndefinedChannel; break; } default: break; } i=ParsePixelChannelOption(token); if (i < 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnrecognizedChannelType","`%s'",token); destination_image=DestroyImageList(destination_image); return(destination_image); } source_channel=(PixelChannel) i; channel_op=ExtractChannelOp; (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == '<') { channel_op=ExchangeChannelOp; (void) GetNextToken(p,&p,MagickPathExtent,token); } if (*token == '=') { if (channel_op != ExchangeChannelOp) channel_op=AssignChannelOp; (void) GetNextToken(p,&p,MagickPathExtent,token); } if (*token == '>') { if (channel_op != ExchangeChannelOp) channel_op=TransferChannelOp; (void) GetNextToken(p,&p,MagickPathExtent,token); } switch (channel_op) { case AssignChannelOp: case ExchangeChannelOp: case TransferChannelOp: { if (channel_op == AssignChannelOp) pixel=StringToDoubleInterval(token,(double) QuantumRange+1.0); else { i=ParsePixelChannelOption(token); if (i < 0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnrecognizedChannelType","`%s'",token); destination_image=DestroyImageList(destination_image); return(destination_image); } } destination_channel=(PixelChannel) i; if (i >= (ssize_t) GetPixelChannels(destination_image)) (void) SetPixelMetaChannels(destination_image,(size_t) ( destination_channel-GetPixelChannels(destination_image)+1), exception); if (image->colorspace != UndefinedColorspace) switch (destination_channel) { case RedPixelChannel: case GreenPixelChannel: case BluePixelChannel: case BlackPixelChannel: case IndexPixelChannel: break; case AlphaPixelChannel: { destination_image->alpha_trait=BlendPixelTrait; break; } case CompositeMaskPixelChannel: { destination_image->channels=(ChannelType) (destination_image->channels | CompositeMaskChannel); break; } case ReadMaskPixelChannel: { destination_image->channels=(ChannelType) (destination_image->channels | ReadMaskChannel); break; } case WriteMaskPixelChannel: { destination_image->channels=(ChannelType) (destination_image->channels | WriteMaskChannel); break; } case MetaPixelChannel: default: { (void) SetPixelMetaChannels(destination_image,(size_t) ( destination_channel-GetPixelChannels(destination_image)+1), exception); break; } } channel_mask=(ChannelType) (channel_mask | ParseChannelOption(token)); if (((channels >= 1) || (destination_channel >= 1)) && (IsGrayColorspace(destination_image->colorspace) != MagickFalse)) (void) SetImageColorspace(destination_image,sRGBColorspace,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); break; } default: break; } status=ChannelImage(destination_image,destination_channel,channel_op, source_image,source_channel,ClampToQuantum(pixel),exception); if (status == MagickFalse) { destination_image=DestroyImageList(destination_image); break; } channels++; if (channel_op == ExchangeChannelOp) { status=ChannelImage(destination_image,source_channel,channel_op, source_image,destination_channel,ClampToQuantum(pixel),exception); if (status == MagickFalse) { destination_image=DestroyImageList(destination_image); break; } channels++; } switch (channel_op) { case ExtractChannelOp: { channel_mask=(ChannelType) (channel_mask | (1UL << destination_channel)); destination_channel=(PixelChannel) (destination_channel+1); break; } default: break; } status=SetImageProgress(source_image,ChannelFxImageTag,p-expression, strlen(expression)); if (status == MagickFalse) break; } (void) SetPixelChannelMask(destination_image,channel_mask); if ((channel_op == ExtractChannelOp) && (channels == 1)) { (void) SetPixelMetaChannels(destination_image,0,exception); (void) SetImageColorspace(destination_image,GRAYColorspace,exception); } return(GetFirstImageInList(destination_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m b i n e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CombineImages() combines one or more images into a single image. The % grayscale value of the pixels of each image in the sequence is assigned in % order to the specified channels of the combined image. The typical % ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc. % % The format of the CombineImages method is: % % Image *CombineImages(const Image *images,const ColorspaceType colorspace, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o colorspace: the image colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CombineImages(const Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { #define CombineImageTag "Combine/Image" CacheView *combine_view; Image *combine_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Ensure the image are the same size. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); combine_image=CloneImage(image,0,0,MagickTrue,exception); if (combine_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(combine_image,DirectClass,exception) == MagickFalse) { combine_image=DestroyImage(combine_image); return((Image *) NULL); } if (colorspace != UndefinedColorspace) (void) SetImageColorspace(combine_image,colorspace,exception); else if (fabs(image->gamma-1.0) <= MagickEpsilon) (void) SetImageColorspace(combine_image,RGBColorspace,exception); else (void) SetImageColorspace(combine_image,sRGBColorspace,exception); switch (combine_image->colorspace) { case UndefinedColorspace: case sRGBColorspace: { if (GetImageListLength(image) > 3) combine_image->alpha_trait=BlendPixelTrait; break; } case LinearGRAYColorspace: case GRAYColorspace: { if (GetImageListLength(image) > 1) combine_image->alpha_trait=BlendPixelTrait; break; } case CMYKColorspace: { if (GetImageListLength(image) > 4) combine_image->alpha_trait=BlendPixelTrait; break; } default: break; } /* Combine images. */ status=MagickTrue; progress=0; combine_view=AcquireAuthenticCacheView(combine_image,exception); for (y=0; y < (ssize_t) combine_image->rows; y++) { CacheView *image_view; const Image *next; Quantum *pixels; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t i; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns, 1,exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } next=image; for (i=0; i < (ssize_t) GetPixelChannels(combine_image); i++) { ssize_t x; PixelChannel channel = GetPixelChannelChannel(combine_image,i); PixelTrait traits = GetPixelChannelTraits(combine_image,channel); if (traits == UndefinedPixelTrait) continue; if (next == (Image *) NULL) continue; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const Quantum *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { if (x < (ssize_t) next->columns) { q[i]=GetPixelIntensity(next,p); p+=GetPixelChannels(next); } q+=GetPixelChannels(combine_image); } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CombineImageTag,progress, combine_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } combine_view=DestroyCacheView(combine_view); if (status == MagickFalse) combine_image=DestroyImage(combine_image); return(combine_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageAlphaChannel() returns MagickFalse if the image alpha channel is % not activated. That is, the image is RGB rather than RGBA or CMYK rather % than CMYKA. % % The format of the GetImageAlphaChannel method is: % % MagickBooleanType GetImageAlphaChannel(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); return(image->alpha_trait != UndefinedPixelTrait ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImage() separates a channel from the image and returns it as a % grayscale image. % % The format of the SeparateImage method is: % % Image *SeparateImage(const Image *image,const ChannelType channel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the image channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SeparateImage(const Image *image, const ChannelType channel_type,ExceptionInfo *exception) { #define GetChannelBit(mask,bit) (((size_t) (mask) >> (size_t) (bit)) & 0x01) #define SeparateImageTag "Separate/Image" CacheView *image_view, *separate_view; Image *separate_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize separate image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); separate_image=CloneImage(image,0,0,MagickTrue,exception); if (separate_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(separate_image,DirectClass,exception) == MagickFalse) { separate_image=DestroyImage(separate_image); return((Image *) NULL); } separate_image->alpha_trait=UndefinedPixelTrait; (void) SetImageColorspace(separate_image,GRAYColorspace,exception); separate_image->gamma=image->gamma; /* Separate image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); separate_view=AcquireAuthenticCacheView(separate_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(separate_view,0,y,separate_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; SetPixelChannel(separate_image,GrayPixelChannel,(Quantum) 0,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (GetChannelBit(channel_type,channel) == 0)) continue; SetPixelChannel(separate_image,GrayPixelChannel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(separate_image); } if (SyncCacheViewAuthenticPixels(separate_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SeparateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } separate_view=DestroyCacheView(separate_view); image_view=DestroyCacheView(image_view); (void) SetImageChannelMask(separate_image,DefaultChannels); if (status == MagickFalse) separate_image=DestroyImage(separate_image); return(separate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImages() returns a separate grayscale image for each channel % specified. % % The format of the SeparateImages method is: % % Image *SeparateImages(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SeparateImages(const Image *image,ExceptionInfo *exception) { Image *images, *separate_image; ssize_t i; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); images=NewImageList(); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0)) continue; separate_image=SeparateImage(image,(ChannelType) (1UL << channel), exception); if (separate_image != (Image *) NULL) AppendImageToList(&images,separate_image); } if (images == (Image *) NULL) images=SeparateImage(image,UndefinedChannel,exception); return(images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha % channel. % % The format of the SetImageAlphaChannel method is: % % MagickBooleanType SetImageAlphaChannel(Image *image, % const AlphaChannelOption alpha_type,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha_type: The alpha channel type: ActivateAlphaChannel, % AssociateAlphaChannel, CopyAlphaChannel, DeactivateAlphaChannel, % DisassociateAlphaChannel, ExtractAlphaChannel, OffAlphaChannel, % OnAlphaChannel, OpaqueAlphaChannel, SetAlphaChannel, ShapeAlphaChannel, % and TransparentAlphaChannel. % % o exception: return any errors or warnings in this structure. % */ static inline void FlattenPixelInfo(const Image *image,const PixelInfo *p, const double alpha,const Quantum *q,const double beta, Quantum *composite) { double Da, gamma, Sa; ssize_t i; /* Compose pixel p over pixel q with the given alpha. */ Sa=QuantumScale*alpha; Da=QuantumScale*beta, gamma=Sa*(-Da)+Sa+Da; gamma=PerceptibleReciprocal(gamma); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; switch (channel) { case RedPixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->red,alpha)); break; } case GreenPixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->green,alpha)); break; } case BluePixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->blue,alpha)); break; } case BlackPixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->black,alpha)); break; } case AlphaPixelChannel: { composite[i]=ClampToQuantum(QuantumRange*(Sa*(-Da)+Sa+Da)); break; } default: break; } } } MagickExport MagickBooleanType SetImageAlphaChannel(Image *image, const AlphaChannelOption alpha_type,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; switch (alpha_type) { case ActivateAlphaChannel: { image->alpha_trait=BlendPixelTrait; break; } case AssociateAlphaChannel: { /* Associate alpha. */ status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; ssize_t i; gamma=QuantumScale*GetPixelAlpha(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (channel == AlphaPixelChannel) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(gamma*q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->alpha_trait=CopyPixelTrait; return(status); } case BackgroundAlphaChannel: { /* Set transparent pixels to background color. */ if (image->alpha_trait == UndefinedPixelTrait) break; status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image,q) == TransparentAlpha) { SetPixelViaPixelInfo(image,&image->background_color,q); SetPixelChannel(image,AlphaPixelChannel,TransparentAlpha,q); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case CopyAlphaChannel: { image->alpha_trait=UpdatePixelTrait; status=CompositeImage(image,image,IntensityCompositeOp,MagickTrue,0,0, exception); break; } case DeactivateAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); image->alpha_trait=CopyPixelTrait; break; } case DisassociateAlphaChannel: { /* Disassociate alpha. */ status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image->alpha_trait=BlendPixelTrait; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma, Sa; ssize_t i; Sa=QuantumScale*GetPixelAlpha(image,q); gamma=PerceptibleReciprocal(Sa); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (channel == AlphaPixelChannel) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(gamma*q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->alpha_trait=UndefinedPixelTrait; return(status); } case DiscreteAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); image->alpha_trait=UpdatePixelTrait; break; } case ExtractAlphaChannel: { status=CompositeImage(image,image,AlphaCompositeOp,MagickTrue,0,0, exception); image->alpha_trait=UndefinedPixelTrait; break; } case OffAlphaChannel: { image->alpha_trait=UndefinedPixelTrait; break; } case OnAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); image->alpha_trait=BlendPixelTrait; break; } case OpaqueAlphaChannel: { status=SetImageAlpha(image,OpaqueAlpha,exception); break; } case RemoveAlphaChannel: { /* Remove transparency. */ if (image->alpha_trait == UndefinedPixelTrait) break; status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { FlattenPixelInfo(image,&image->background_color, image->background_color.alpha,q,(double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->alpha_trait=image->background_color.alpha_trait; break; } case SetAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); break; } case ShapeAlphaChannel: { PixelInfo background; /* Remove transparency. */ ConformPixelInfo(image,&image->background_color,&background,exception); background.alpha_trait=BlendPixelTrait; image->alpha_trait=BlendPixelTrait; status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=background; for (x=0; x < (ssize_t) image->columns; x++) { pixel.alpha=GetPixelIntensity(image,q); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); break; } case TransparentAlphaChannel: { status=SetImageAlpha(image,TransparentAlpha,exception); break; } case UndefinedAlphaChannel: break; } if (status == MagickFalse) return(status); (void) SetPixelChannelMask(image,image->channel_mask); return(SyncImagePixelCache(image,exception)); }
convolution_sgemm_pack1to4_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack1to4_bf16s_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { // Mat bottom_im2col(size, maxk, inch, 2u, 1, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; const float* bias = _bias; // permute Mat tmp; if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + size % 4, 2u, 1, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + size % 4, 2u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 2u, 1, opt.workspace_allocator); { int nn_size = size >> 3; int remain_size_start = 0; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; unsigned short* tmpptr = tmp.channel(i / 8); for (int q = 0; q < inch; q++) { const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { #if __ARM_NEON vst1q_u16(tmpptr, vld1q_u16(img0)); #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; #endif // __ARM_NEON img0 += size; tmpptr += 8; } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); for (int q = 0; q < inch; q++) { const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { #if __ARM_NEON vst1_u16(tmpptr, vld1_u16(img0)); #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; #endif // __ARM_NEON img0 += size; tmpptr += 4; } } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); for (int q = 0; q < inch; q++) { const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; img0 += size; tmpptr += 1; } } } } int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ int nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; unsigned short* outptr0 = top_blob.channel(p); unsigned short* outptr1 = top_blob.channel(p + 1); const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p * 4 : zeros; int i = 0; for (; i + 7 < size; i += 8) { const unsigned short* tmpptr = tmp.channel(i / 8); const unsigned short* kptr0 = kernel.channel(p / 2); int nn = inch * maxk; // inch always > 0 float32x4_t _sum00 = vld1q_f32(biasptr); float32x4_t _sum01 = vld1q_f32(biasptr); float32x4_t _sum02 = vld1q_f32(biasptr); float32x4_t _sum03 = vld1q_f32(biasptr); float32x4_t _sum04 = vld1q_f32(biasptr); float32x4_t _sum05 = vld1q_f32(biasptr); float32x4_t _sum06 = vld1q_f32(biasptr); float32x4_t _sum07 = vld1q_f32(biasptr); float32x4_t _sum10 = vld1q_f32(biasptr + 4); float32x4_t _sum11 = vld1q_f32(biasptr + 4); float32x4_t _sum12 = vld1q_f32(biasptr + 4); float32x4_t _sum13 = vld1q_f32(biasptr + 4); float32x4_t _sum14 = vld1q_f32(biasptr + 4); float32x4_t _sum15 = vld1q_f32(biasptr + 4); float32x4_t _sum16 = vld1q_f32(biasptr + 4); float32x4_t _sum17 = vld1q_f32(biasptr + 4); for (int j = 0; j < nn; j++) { float32x4_t _val0 = vcvt_f32_bf16(vld1_u16(tmpptr)); float32x4_t _val1 = vcvt_f32_bf16(vld1_u16(tmpptr + 4)); float32x4_t _w0 = vcvt_f32_bf16(vld1_u16(kptr0)); float32x4_t _w1 = vcvt_f32_bf16(vld1_u16(kptr0 + 4)); _sum00 = vmlaq_laneq_f32(_sum00, _w0, _val0, 0); _sum01 = vmlaq_laneq_f32(_sum01, _w0, _val0, 1); _sum02 = vmlaq_laneq_f32(_sum02, _w0, _val0, 2); _sum03 = vmlaq_laneq_f32(_sum03, _w0, _val0, 3); _sum04 = vmlaq_laneq_f32(_sum04, _w0, _val1, 0); _sum05 = vmlaq_laneq_f32(_sum05, _w0, _val1, 1); _sum06 = vmlaq_laneq_f32(_sum06, _w0, _val1, 2); _sum07 = vmlaq_laneq_f32(_sum07, _w0, _val1, 3); _sum10 = vmlaq_laneq_f32(_sum10, _w1, _val0, 0); _sum11 = vmlaq_laneq_f32(_sum11, _w1, _val0, 1); _sum12 = vmlaq_laneq_f32(_sum12, _w1, _val0, 2); _sum13 = vmlaq_laneq_f32(_sum13, _w1, _val0, 3); _sum14 = vmlaq_laneq_f32(_sum14, _w1, _val1, 0); _sum15 = vmlaq_laneq_f32(_sum15, _w1, _val1, 1); _sum16 = vmlaq_laneq_f32(_sum16, _w1, _val1, 2); _sum17 = vmlaq_laneq_f32(_sum17, _w1, _val1, 3); tmpptr += 8; kptr0 += 8; } vst1_u16(outptr0, vcvt_bf16_f32(_sum00)); vst1_u16(outptr0 + 4, vcvt_bf16_f32(_sum01)); vst1_u16(outptr0 + 8, vcvt_bf16_f32(_sum02)); vst1_u16(outptr0 + 12, vcvt_bf16_f32(_sum03)); vst1_u16(outptr0 + 16, vcvt_bf16_f32(_sum04)); vst1_u16(outptr0 + 20, vcvt_bf16_f32(_sum05)); vst1_u16(outptr0 + 24, vcvt_bf16_f32(_sum06)); vst1_u16(outptr0 + 28, vcvt_bf16_f32(_sum07)); vst1_u16(outptr1, vcvt_bf16_f32(_sum10)); vst1_u16(outptr1 + 4, vcvt_bf16_f32(_sum11)); vst1_u16(outptr1 + 8, vcvt_bf16_f32(_sum12)); vst1_u16(outptr1 + 12, vcvt_bf16_f32(_sum13)); vst1_u16(outptr1 + 16, vcvt_bf16_f32(_sum14)); vst1_u16(outptr1 + 20, vcvt_bf16_f32(_sum15)); vst1_u16(outptr1 + 24, vcvt_bf16_f32(_sum16)); vst1_u16(outptr1 + 28, vcvt_bf16_f32(_sum17)); outptr0 += 32; outptr1 += 32; } for (; i + 3 < size; i += 4) { const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const unsigned short* kptr0 = kernel.channel(p / 2); int nn = inch * maxk; // inch always > 0 float32x4_t _sum00 = vld1q_f32(biasptr); float32x4_t _sum01 = vld1q_f32(biasptr); float32x4_t _sum02 = vld1q_f32(biasptr); float32x4_t _sum03 = vld1q_f32(biasptr); float32x4_t _sum10 = vld1q_f32(biasptr + 4); float32x4_t _sum11 = vld1q_f32(biasptr + 4); float32x4_t _sum12 = vld1q_f32(biasptr + 4); float32x4_t _sum13 = vld1q_f32(biasptr + 4); for (int j = 0; j < nn; j++) { float32x4_t _val = vcvt_f32_bf16(vld1_u16(tmpptr)); float32x4_t _w0 = vcvt_f32_bf16(vld1_u16(kptr0)); float32x4_t _w1 = vcvt_f32_bf16(vld1_u16(kptr0 + 4)); _sum00 = vmlaq_laneq_f32(_sum00, _w0, _val, 0); _sum01 = vmlaq_laneq_f32(_sum01, _w0, _val, 1); _sum02 = vmlaq_laneq_f32(_sum02, _w0, _val, 2); _sum03 = vmlaq_laneq_f32(_sum03, _w0, _val, 3); _sum10 = vmlaq_laneq_f32(_sum10, _w1, _val, 0); _sum11 = vmlaq_laneq_f32(_sum11, _w1, _val, 1); _sum12 = vmlaq_laneq_f32(_sum12, _w1, _val, 2); _sum13 = vmlaq_laneq_f32(_sum13, _w1, _val, 3); tmpptr += 4; kptr0 += 8; } vst1_u16(outptr0, vcvt_bf16_f32(_sum00)); vst1_u16(outptr0 + 4, vcvt_bf16_f32(_sum01)); vst1_u16(outptr0 + 8, vcvt_bf16_f32(_sum02)); vst1_u16(outptr0 + 12, vcvt_bf16_f32(_sum03)); vst1_u16(outptr1, vcvt_bf16_f32(_sum10)); vst1_u16(outptr1 + 4, vcvt_bf16_f32(_sum11)); vst1_u16(outptr1 + 8, vcvt_bf16_f32(_sum12)); vst1_u16(outptr1 + 12, vcvt_bf16_f32(_sum13)); outptr0 += 16; outptr1 += 16; } for (; i < size; i++) { const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const unsigned short* kptr0 = kernel.channel(p / 2); int nn = inch * maxk; // inch always > 0 float32x4_t _sum0 = vld1q_f32(biasptr); float32x4_t _sum1 = vld1q_f32(biasptr + 4); for (int j = 0; j < nn; j++) { float32x4_t _val = vdupq_n_f32(bfloat16_to_float32(tmpptr[0])); float32x4_t _w0 = vcvt_f32_bf16(vld1_u16(kptr0)); float32x4_t _w1 = vcvt_f32_bf16(vld1_u16(kptr0 + 4)); _sum0 = vmlaq_f32(_sum0, _val, _w0); _sum1 = vmlaq_f32(_sum1, _val, _w1); tmpptr += 1; kptr0 += 8; } vst1_u16(outptr0, vcvt_bf16_f32(_sum0)); vst1_u16(outptr1, vcvt_bf16_f32(_sum1)); outptr0 += 4; outptr1 += 4; } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { unsigned short* outptr0 = top_blob.channel(p); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p * 4 : zeros; int i = 0; for (; i + 7 < size; i += 8) { const unsigned short* tmpptr = tmp.channel(i / 8); #if __ARM_NEON && __aarch64__ const unsigned short* kptr0 = kernel.channel(p / 2 + p % 2); #else const unsigned short* kptr0 = kernel.channel(p); #endif int nn = inch * maxk; // inch always > 0 #if __ARM_NEON float32x4_t _sum0 = vld1q_f32(biasptr); float32x4_t _sum1 = vld1q_f32(biasptr); float32x4_t _sum2 = vld1q_f32(biasptr); float32x4_t _sum3 = vld1q_f32(biasptr); float32x4_t _sum4 = vld1q_f32(biasptr); float32x4_t _sum5 = vld1q_f32(biasptr); float32x4_t _sum6 = vld1q_f32(biasptr); float32x4_t _sum7 = vld1q_f32(biasptr); for (int j = 0; j < nn; j++) { float32x4_t _val0 = vcvt_f32_bf16(vld1_u16(tmpptr)); float32x4_t _val1 = vcvt_f32_bf16(vld1_u16(tmpptr + 4)); float32x4_t _w0 = vcvt_f32_bf16(vld1_u16(kptr0)); #if __aarch64__ _sum0 = vmlaq_laneq_f32(_sum0, _w0, _val0, 0); _sum1 = vmlaq_laneq_f32(_sum1, _w0, _val0, 1); _sum2 = vmlaq_laneq_f32(_sum2, _w0, _val0, 2); _sum3 = vmlaq_laneq_f32(_sum3, _w0, _val0, 3); _sum4 = vmlaq_laneq_f32(_sum4, _w0, _val1, 0); _sum5 = vmlaq_laneq_f32(_sum5, _w0, _val1, 1); _sum6 = vmlaq_laneq_f32(_sum6, _w0, _val1, 2); _sum7 = vmlaq_laneq_f32(_sum7, _w0, _val1, 3); #else _sum0 = vmlaq_lane_f32(_sum0, _w0, vget_low_f32(_val0), 0); _sum1 = vmlaq_lane_f32(_sum1, _w0, vget_low_f32(_val0), 1); _sum2 = vmlaq_lane_f32(_sum2, _w0, vget_high_f32(_val0), 0); _sum3 = vmlaq_lane_f32(_sum3, _w0, vget_high_f32(_val0), 1); _sum4 = vmlaq_lane_f32(_sum4, _w0, vget_low_f32(_val1), 0); _sum5 = vmlaq_lane_f32(_sum5, _w0, vget_low_f32(_val1), 1); _sum6 = vmlaq_lane_f32(_sum6, _w0, vget_high_f32(_val1), 0); _sum7 = vmlaq_lane_f32(_sum7, _w0, vget_high_f32(_val1), 1); #endif tmpptr += 8; kptr0 += 4; } vst1_u16(outptr0, vcvt_bf16_f32(_sum0)); vst1_u16(outptr0 + 4, vcvt_bf16_f32(_sum1)); vst1_u16(outptr0 + 8, vcvt_bf16_f32(_sum2)); vst1_u16(outptr0 + 12, vcvt_bf16_f32(_sum3)); vst1_u16(outptr0 + 16, vcvt_bf16_f32(_sum4)); vst1_u16(outptr0 + 20, vcvt_bf16_f32(_sum5)); vst1_u16(outptr0 + 24, vcvt_bf16_f32(_sum6)); vst1_u16(outptr0 + 28, vcvt_bf16_f32(_sum7)); outptr0 += 32; #else float sum0_0 = biasptr[0]; float sum0_1 = biasptr[0]; float sum0_2 = biasptr[0]; float sum0_3 = biasptr[0]; float sum0_4 = biasptr[0]; float sum0_5 = biasptr[0]; float sum0_6 = biasptr[0]; float sum0_7 = biasptr[0]; float sum1_0 = biasptr[1]; float sum1_1 = biasptr[1]; float sum1_2 = biasptr[1]; float sum1_3 = biasptr[1]; float sum1_4 = biasptr[1]; float sum1_5 = biasptr[1]; float sum1_6 = biasptr[1]; float sum1_7 = biasptr[1]; float sum2_0 = biasptr[2]; float sum2_1 = biasptr[2]; float sum2_2 = biasptr[2]; float sum2_3 = biasptr[2]; float sum2_4 = biasptr[2]; float sum2_5 = biasptr[2]; float sum2_6 = biasptr[2]; float sum2_7 = biasptr[2]; float sum3_0 = biasptr[3]; float sum3_1 = biasptr[3]; float sum3_2 = biasptr[3]; float sum3_3 = biasptr[3]; float sum3_4 = biasptr[3]; float sum3_5 = biasptr[3]; float sum3_6 = biasptr[3]; float sum3_7 = biasptr[3]; for (int q = 0; q < nn; q++) { sum0_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]); sum0_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[0]); sum0_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[0]); sum0_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[0]); sum0_4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[0]); sum0_5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[0]); sum0_6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[0]); sum0_7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[0]); sum1_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[1]); sum1_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[1]); sum1_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[1]); sum1_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[1]); sum1_4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[1]); sum1_5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[1]); sum1_6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[1]); sum1_7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[1]); sum2_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[2]); sum2_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[2]); sum2_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[2]); sum2_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[2]); sum2_4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[2]); sum2_5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[2]); sum2_6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[2]); sum2_7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[2]); sum3_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[3]); sum3_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[3]); sum3_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[3]); sum3_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[3]); sum3_4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[3]); sum3_5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[3]); sum3_6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[3]); sum3_7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[3]); tmpptr += 8; kptr += 4; } outptr0[0] = float32_to_bfloat16(sum0_0); outptr0[1] = float32_to_bfloat16(sum1_0); outptr0[2] = float32_to_bfloat16(sum2_0); outptr0[3] = float32_to_bfloat16(sum3_0); outptr0[4] = float32_to_bfloat16(sum0_1); outptr0[5] = float32_to_bfloat16(sum1_1); outptr0[6] = float32_to_bfloat16(sum2_1); outptr0[7] = float32_to_bfloat16(sum3_1); outptr0[8] = float32_to_bfloat16(sum0_2); outptr0[9] = float32_to_bfloat16(sum1_2); outptr0[10] = float32_to_bfloat16(sum2_2); outptr0[11] = float32_to_bfloat16(sum3_2); outptr0[12] = float32_to_bfloat16(sum0_3); outptr0[13] = float32_to_bfloat16(sum1_3); outptr0[14] = float32_to_bfloat16(sum2_3); outptr0[15] = float32_to_bfloat16(sum3_3); outptr0[16] = float32_to_bfloat16(sum0_4); outptr0[17] = float32_to_bfloat16(sum1_4); outptr0[18] = float32_to_bfloat16(sum2_4); outptr0[19] = float32_to_bfloat16(sum3_4); outptr0[20] = float32_to_bfloat16(sum0_5); outptr0[21] = float32_to_bfloat16(sum1_5); outptr0[22] = float32_to_bfloat16(sum2_5); outptr0[23] = float32_to_bfloat16(sum3_5); outptr0[24] = float32_to_bfloat16(sum0_6); outptr0[25] = float32_to_bfloat16(sum1_6); outptr0[26] = float32_to_bfloat16(sum2_6); outptr0[27] = float32_to_bfloat16(sum3_6); outptr0[28] = float32_to_bfloat16(sum0_7); outptr0[29] = float32_to_bfloat16(sum1_7); outptr0[30] = float32_to_bfloat16(sum2_7); outptr0[31] = float32_to_bfloat16(sum3_7); outptr0 += 32; #endif // __ARM_NEON } for (; i + 3 < size; i += 4) { const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); #if __ARM_NEON && __aarch64__ const unsigned short* kptr0 = kernel.channel(p / 2 + p % 2); #else const unsigned short* kptr0 = kernel.channel(p); #endif int nn = inch * maxk; // inch always > 0 #if __ARM_NEON float32x4_t _sum0 = vld1q_f32(biasptr); float32x4_t _sum1 = vld1q_f32(biasptr); float32x4_t _sum2 = vld1q_f32(biasptr); float32x4_t _sum3 = vld1q_f32(biasptr); for (int j = 0; j < nn; j++) { float32x4_t _val = vcvt_f32_bf16(vld1_u16(tmpptr)); float32x4_t _w0 = vcvt_f32_bf16(vld1_u16(kptr0)); #if __aarch64__ _sum0 = vmlaq_laneq_f32(_sum0, _w0, _val, 0); _sum1 = vmlaq_laneq_f32(_sum1, _w0, _val, 1); _sum2 = vmlaq_laneq_f32(_sum2, _w0, _val, 2); _sum3 = vmlaq_laneq_f32(_sum3, _w0, _val, 3); #else _sum0 = vmlaq_lane_f32(_sum0, _w0, vget_low_f32(_val), 0); _sum1 = vmlaq_lane_f32(_sum1, _w0, vget_low_f32(_val), 1); _sum2 = vmlaq_lane_f32(_sum2, _w0, vget_high_f32(_val), 0); _sum3 = vmlaq_lane_f32(_sum3, _w0, vget_high_f32(_val), 1); #endif tmpptr += 4; kptr0 += 4; } vst1_u16(outptr0, vcvt_bf16_f32(_sum0)); vst1_u16(outptr0 + 4, vcvt_bf16_f32(_sum1)); vst1_u16(outptr0 + 8, vcvt_bf16_f32(_sum2)); vst1_u16(outptr0 + 12, vcvt_bf16_f32(_sum3)); outptr0 += 16; #else float sum0_0 = biasptr[0]; float sum0_1 = biasptr[0]; float sum0_2 = biasptr[0]; float sum0_3 = biasptr[0]; float sum1_0 = biasptr[1]; float sum1_1 = biasptr[1]; float sum1_2 = biasptr[1]; float sum1_3 = biasptr[1]; float sum2_0 = biasptr[2]; float sum2_1 = biasptr[2]; float sum2_2 = biasptr[2]; float sum2_3 = biasptr[2]; float sum3_0 = biasptr[3]; float sum3_1 = biasptr[3]; float sum3_2 = biasptr[3]; float sum3_3 = biasptr[3]; for (int q = 0; q < nn; q++) { sum0_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]); sum0_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[0]); sum0_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[0]); sum0_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[0]); sum1_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[1]); sum1_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[1]); sum1_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[1]); sum1_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[1]); sum2_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[2]); sum2_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[2]); sum2_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[2]); sum2_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[2]); sum3_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[3]); sum3_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[3]); sum3_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[3]); sum3_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[3]); tmpptr += 4; kptr += 4; } outptr0[0] = float32_to_bfloat16(sum0_0); outptr0[1] = float32_to_bfloat16(sum1_0); outptr0[2] = float32_to_bfloat16(sum2_0); outptr0[3] = float32_to_bfloat16(sum3_0); outptr0[4] = float32_to_bfloat16(sum0_1); outptr0[5] = float32_to_bfloat16(sum1_1); outptr0[6] = float32_to_bfloat16(sum2_1); outptr0[7] = float32_to_bfloat16(sum3_1); outptr0[8] = float32_to_bfloat16(sum0_2); outptr0[9] = float32_to_bfloat16(sum1_2); outptr0[10] = float32_to_bfloat16(sum2_2); outptr0[11] = float32_to_bfloat16(sum3_2); outptr0[12] = float32_to_bfloat16(sum0_3); outptr0[13] = float32_to_bfloat16(sum1_3); outptr0[14] = float32_to_bfloat16(sum2_3); outptr0[15] = float32_to_bfloat16(sum3_3); outptr0 += 16; #endif // __ARM_NEON } for (; i < size; i++) { const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); #if __ARM_NEON && __aarch64__ const unsigned short* kptr0 = kernel.channel(p / 2 + p % 2); #else const unsigned short* kptr0 = kernel.channel(p); #endif int nn = inch * maxk; // inch always > 0 #if __ARM_NEON float32x4_t _sum = vld1q_f32(biasptr); for (int j = 0; j < nn; j++) { float32x4_t _val = vdupq_n_f32(bfloat16_to_float32(tmpptr[0])); float32x4_t _w0 = vcvt_f32_bf16(vld1_u16(kptr0)); _sum = vmlaq_f32(_sum, _val, _w0); tmpptr += 1; kptr0 += 4; } vst1_u16(outptr0, vcvt_bf16_f32(_sum)); outptr0 += 4; #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; for (int q = 0; q < nn; q++) { sum0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]); sum1 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[1]); sum2 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[2]); sum3 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[3]); tmpptr++; kptr += 4; } outptr0[0] = float32_to_bfloat16(sum0); outptr0[1] = float32_to_bfloat16(sum1); outptr0[2] = float32_to_bfloat16(sum2); outptr0[3] = float32_to_bfloat16(sum3); outptr0 += 4; #endif // __ARM_NEON } } } static void convolution_im2col_sgemm_transform_kernel_pack1to4_bf16s_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = 4b-4a-maxk-inch/4a-outch/4b Mat kernel = _kernel.reshape(maxk, inch, outch); #if __ARM_NEON && __aarch64__ kernel_tm.create(8 * maxk, inch, outch / 8 + (outch % 8) / 4, (size_t)2u); #else kernel_tm.create(4 * maxk, inch, outch / 4, (size_t)2u); #endif int q = 0; #if __ARM_NEON && __aarch64__ for (; q + 7 < outch; q += 8) { const Mat k0 = kernel.channel(q); const Mat k1 = kernel.channel(q + 1); const Mat k2 = kernel.channel(q + 2); const Mat k3 = kernel.channel(q + 3); const Mat k4 = kernel.channel(q + 4); const Mat k5 = kernel.channel(q + 5); const Mat k6 = kernel.channel(q + 6); const Mat k7 = kernel.channel(q + 7); unsigned short* g00 = kernel_tm.channel(q / 8); for (int p = 0; p < inch; p++) { const float* k00 = k0.row(p); const float* k10 = k1.row(p); const float* k20 = k2.row(p); const float* k30 = k3.row(p); const float* k40 = k4.row(p); const float* k50 = k5.row(p); const float* k60 = k6.row(p); const float* k70 = k7.row(p); for (int k = 0; k < maxk; k++) { g00[0] = float32_to_bfloat16(k00[k]); g00[1] = float32_to_bfloat16(k10[k]); g00[2] = float32_to_bfloat16(k20[k]); g00[3] = float32_to_bfloat16(k30[k]); g00[4] = float32_to_bfloat16(k40[k]); g00[5] = float32_to_bfloat16(k50[k]); g00[6] = float32_to_bfloat16(k60[k]); g00[7] = float32_to_bfloat16(k70[k]); g00 += 8; } } } #endif // __ARM_NEON && __aarch64__ for (; q + 3 < outch; q += 4) { const Mat k0 = kernel.channel(q); const Mat k1 = kernel.channel(q + 1); const Mat k2 = kernel.channel(q + 2); const Mat k3 = kernel.channel(q + 3); #if __ARM_NEON && __aarch64__ unsigned short* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4); #else unsigned short* g00 = kernel_tm.channel(q / 4); #endif for (int p = 0; p < inch; p++) { const float* k00 = k0.row(p); const float* k10 = k1.row(p); const float* k20 = k2.row(p); const float* k30 = k3.row(p); for (int k = 0; k < maxk; k++) { g00[0] = float32_to_bfloat16(k00[k]); g00[1] = float32_to_bfloat16(k10[k]); g00[2] = float32_to_bfloat16(k20[k]); g00[3] = float32_to_bfloat16(k30[k]); g00 += 4; } } } } static void convolution_im2col_sgemm_pack1to4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 2u, 1, opt.workspace_allocator); { const int gap = w * stride_h - outw * stride_w; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); unsigned short* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const unsigned short* sptr = img.row<const unsigned short>(dilation_h * u) + dilation_w * v; for (int i = 0; i < outh; i++) { int j = 0; for (; j < outw; j++) { ptr[0] = sptr[0]; sptr += stride_w; ptr += 1; } sptr += gap; } } } } } im2col_sgemm_pack1to4_bf16s_neon(bottom_im2col, top_blob, kernel, _bias, opt); }
omp_flush.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include "omp_testsuite.h" #include "omp_my_sleep.h" int test_omp_flush() { int result1; int result2; int dummy; result1 = 0; result2 = 0; #pragma omp parallel { int rank; rank = omp_get_thread_num (); #pragma omp barrier if (rank == 1) { result2 = 3; #pragma omp flush (result2) dummy = result2; } if (rank == 0) { my_sleep(SLEEPTIME); #pragma omp flush (result2) result1 = result2; } } /* end of parallel */ return ((result1 == result2) && (result2 == dummy) && (result2 == 3)); } int main() { int i; int num_failed=0; for (i = 0; i < REPETITIONS; i++) { if(!test_omp_flush()) { num_failed++; } } return num_failed; }
arraybench.c
/**************************************************************************** * * * OpenMP MicroBenchmark Suite - Version 3.1 * * * * produced by * * * * Mark Bull, Fiona Reid and Nix Mc Donnell * * * * at * * * * Edinburgh Parallel Computing Centre * * * * email: markb@epcc.ed.ac.uk or fiona@epcc.ed.ac.uk * * * * * * This version copyright (c) The University of Edinburgh, 2015. * * * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * ****************************************************************************/ #include "common.h" #include "arraybench.h" double btest[IDA]; double atest[IDA]; #pragma omp threadprivate (btest) int arraybench_main(int argc, char **argv) { ompbench_init(argc, argv); /* GENERATE REFERENCE TIME */ reference("reference time 1", &refer); char testName[32]; /* TEST PRIVATE */ sprintf(testName, "PRIVATE %d", IDA); benchmark(testName, &testprivnew); /* TEST FIRSTPRIVATE */ sprintf(testName, "FIRSTPRIVATE %d", IDA); benchmark(testName, &testfirstprivnew); #ifdef OMPVER2 /* TEST COPYPRIVATE */ sprintf(testName, "COPYPRIVATE %d", IDA); benchmark(testName, &testcopyprivnew); #endif #if 0 /* TEST THREADPRIVATE - COPYIN */ sprintf(testName, "COPYIN %d", IDA); benchmark(testName, &testthrprivnew); #endif finalise(); return EXIT_SUCCESS; } static void refer() { int j; double a[1]; for (j = 0; j < innerreps; j++) { array_delay(delaylength, a); } } void testfirstprivnew() { int j; for (j = 0; j < innerreps; j++) { #pragma omp parallel firstprivate(atest) { array_delay(delaylength, atest); } } } void testprivnew() { int j; for (j = 0; j < innerreps; j++) { #pragma omp parallel private(atest) { array_delay(delaylength, atest); } } } #ifdef OMPVER2 void testcopyprivnew() { int j; for (j=0; j<innerreps; j++) { #pragma omp parallel private(atest) { #pragma omp single copyprivate(atest) { array_delay(delaylength, atest); } } } } #endif void testthrprivnew() { int j; for (j = 0; j < innerreps; j++) { #pragma omp parallel copyin(btest) { array_delay(delaylength, btest); } } }
mask_rasterize.c
/* * ***** BEGIN GPL LICENSE BLOCK ***** * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * The Original Code is Copyright (C) 2012 Blender Foundation. * All rights reserved. * * Contributor(s): Blender Foundation, * Campbell Barton * * ***** END GPL LICENSE BLOCK ***** */ /** \file blender/blenkernel/intern/mask_rasterize.c * \ingroup bke * * This module exposes a rasterizer that works as a black box - implementation details are confined to this file, * * The basic method to access is: * - create & initialize a handle from a #Mask datablock. * - execute pixel lookups. * - free the handle. * * This file is admittedly a bit confusticated, in quite few areas speed was chosen over readability, * though it is commented - so shouldn't be so hard to see whats going on. * * * Implementation: * * To rasterize the mask its converted into geometry that use a ray-cast for each pixel lookup. * * Initially 'kdopbvh' was used but this ended up being too slow. * * To gain some extra speed we take advantage of a few shortcuts that can be made rasterizing masks specifically. * - all triangles are known to be completely white - so no depth check is done on triangle intersection. * - all quads are known to be feather outlines - the 1 and 0 depths are known by the vertex order in the quad, * - there is no color - just a value for each mask pixel. * - the mask spacial structure always maps to space 0-1 on X and Y axis. * - bucketing is used to speed up lookups for geometry. * * Other Details: * - used unsigned values all over for some extra speed on some arch's. * - anti-aliasing is faked, just ensuring at least one pixel feather - avoids oversampling. * - initializing the spacial structure doesn't need to be as optimized as pixel lookups are. * - mask lookups need not be pixel aligned so any sub-pixel values from x/y (0 - 1), can be found. * (perhaps masks can be used as a vector texture in 3D later on) * * * Currently, to build the spacial structure we have to calculate the total number of faces ahead of time. * * This is getting a bit complicated with the addition of unfilled splines and end capping - * If large changes are needed here we would be better off using an iterable * BLI_mempool for triangles and converting to a contiguous array afterwards. * * - Campbell */ #include "MEM_guardedalloc.h" #include "DNA_vec_types.h" #include "DNA_mask_types.h" #include "DNA_scene_types.h" #include "BLI_utildefines.h" #include "BLI_scanfill.h" #include "BLI_memarena.h" #include "BLI_math.h" #include "BLI_rect.h" #include "BLI_listbase.h" #include "BLI_linklist.h" #include "BKE_mask.h" #include "BLI_strict_flags.h" /* this is rather and annoying hack, use define to isolate it. * problem is caused by scanfill removing edges on us. */ #define USE_SCANFILL_EDGE_WORKAROUND #define SPLINE_RESOL_CAP_PER_PIXEL 2 #define SPLINE_RESOL_CAP_MIN 8 #define SPLINE_RESOL_CAP_MAX 64 /* found this gives best performance for high detail masks, values between 2 and 8 work best */ #define BUCKET_PIXELS_PER_CELL 4 #define SF_EDGE_IS_BOUNDARY 0xff #define SF_KEYINDEX_TEMP_ID ((unsigned int) -1) #define TRI_TERMINATOR_ID ((unsigned int) -1) #define TRI_VERT ((unsigned int) -1) /* for debugging add... */ #ifndef NDEBUG /* printf("%u %u %u %u\n", _t[0], _t[1], _t[2], _t[3]); \ */ # define FACE_ASSERT(face, vert_max) \ { \ unsigned int *_t = face; \ BLI_assert(_t[0] < vert_max); \ BLI_assert(_t[1] < vert_max); \ BLI_assert(_t[2] < vert_max); \ BLI_assert(_t[3] < vert_max || _t[3] == TRI_VERT); \ } (void)0 #else /* do nothing */ # define FACE_ASSERT(face, vert_max) #endif static void rotate_point_v2(float r_p[2], const float p[2], const float cent[2], const float angle, const float asp[2]) { const float s = sinf(angle); const float c = cosf(angle); float p_new[2]; /* translate point back to origin */ r_p[0] = (p[0] - cent[0]) / asp[0]; r_p[1] = (p[1] - cent[1]) / asp[1]; /* rotate point */ p_new[0] = ((r_p[0] * c) - (r_p[1] * s)) * asp[0]; p_new[1] = ((r_p[0] * s) + (r_p[1] * c)) * asp[1]; /* translate point back */ r_p[0] = p_new[0] + cent[0]; r_p[1] = p_new[1] + cent[1]; } BLI_INLINE unsigned int clampis_uint(const unsigned int v, const unsigned int min, const unsigned int max) { return v < min ? min : (v > max ? max : v); } /* --------------------------------------------------------------------- */ /* local structs for mask rasterizeing */ /* --------------------------------------------------------------------- */ /** * A single #MaskRasterHandle contains multiple #MaskRasterLayer's, * each #MaskRasterLayer does its own lookup which contributes to * the final pixel with its own blending mode and the final pixel * is blended between these. */ /* internal use only */ typedef struct MaskRasterLayer { /* geometry */ unsigned int face_tot; unsigned int (*face_array)[4]; /* access coords tri/quad */ float (*face_coords)[3]; /* xy, z 0-1 (1.0 == filled) */ /* 2d bounds (to quickly skip bucket lookup) */ rctf bounds; /* buckets */ unsigned int **buckets_face; /* cache divide and subtract */ float buckets_xy_scalar[2]; /* (1.0 / (buckets_width + FLT_EPSILON)) * buckets_x */ unsigned int buckets_x; unsigned int buckets_y; /* copied direct from #MaskLayer.--- */ /* blending options */ float alpha; char blend; char blend_flag; char falloff; } MaskRasterLayer; typedef struct MaskRasterSplineInfo { /* body of the spline */ unsigned int vertex_offset; unsigned int vertex_total; /* capping for non-filled, non cyclic splines */ unsigned int vertex_total_cap_head; unsigned int vertex_total_cap_tail; bool is_cyclic; } MaskRasterSplineInfo; /** * opaque local struct for mask pixel lookup, each MaskLayer needs one of these */ struct MaskRasterHandle { MaskRasterLayer *layers; unsigned int layers_tot; /* 2d bounds (to quickly skip bucket lookup) */ rctf bounds; }; /* --------------------------------------------------------------------- */ /* alloc / free functions */ /* --------------------------------------------------------------------- */ MaskRasterHandle *BKE_maskrasterize_handle_new(void) { MaskRasterHandle *mr_handle; mr_handle = MEM_callocN(sizeof(MaskRasterHandle), "MaskRasterHandle"); return mr_handle; } void BKE_maskrasterize_handle_free(MaskRasterHandle *mr_handle) { const unsigned int layers_tot = mr_handle->layers_tot; unsigned int i; MaskRasterLayer *layer = mr_handle->layers; for (i = 0; i < layers_tot; i++, layer++) { if (layer->face_array) { MEM_freeN(layer->face_array); } if (layer->face_coords) { MEM_freeN(layer->face_coords); } if (layer->buckets_face) { const unsigned int bucket_tot = layer->buckets_x * layer->buckets_y; unsigned int bucket_index; for (bucket_index = 0; bucket_index < bucket_tot; bucket_index++) { unsigned int *face_index = layer->buckets_face[bucket_index]; if (face_index) { MEM_freeN(face_index); } } MEM_freeN(layer->buckets_face); } } MEM_freeN(mr_handle->layers); MEM_freeN(mr_handle); } static void maskrasterize_spline_differentiate_point_outset(float (*diff_feather_points)[2], float (*diff_points)[2], const unsigned int tot_diff_point, const float ofs, const bool do_test) { unsigned int k_prev = tot_diff_point - 2; unsigned int k_curr = tot_diff_point - 1; unsigned int k_next = 0; unsigned int k; float d_prev[2]; float d_next[2]; float d[2]; const float *co_prev; const float *co_curr; const float *co_next; const float ofs_squared = ofs * ofs; co_prev = diff_points[k_prev]; co_curr = diff_points[k_curr]; co_next = diff_points[k_next]; /* precalc */ sub_v2_v2v2(d_prev, co_prev, co_curr); normalize_v2(d_prev); for (k = 0; k < tot_diff_point; k++) { /* co_prev = diff_points[k_prev]; */ /* precalc */ co_curr = diff_points[k_curr]; co_next = diff_points[k_next]; /* sub_v2_v2v2(d_prev, co_prev, co_curr); */ /* precalc */ sub_v2_v2v2(d_next, co_curr, co_next); /* normalize_v2(d_prev); */ /* precalc */ normalize_v2(d_next); if ((do_test == false) || (len_squared_v2v2(diff_feather_points[k], diff_points[k]) < ofs_squared)) { add_v2_v2v2(d, d_prev, d_next); normalize_v2(d); diff_feather_points[k][0] = diff_points[k][0] + ( d[1] * ofs); diff_feather_points[k][1] = diff_points[k][1] + (-d[0] * ofs); } /* use next iter */ copy_v2_v2(d_prev, d_next); /* k_prev = k_curr; */ /* precalc */ k_curr = k_next; k_next++; } } /* this function is not exact, sometimes it returns false positives, * the main point of it is to clear out _almost_ all bucket/face non-intersections, * returning true in corner cases is ok but missing an intersection is NOT. * * method used * - check if the center of the buckets bounding box is intersecting the face * - if not get the max radius to a corner of the bucket and see how close we * are to any of the triangle edges. */ static bool layer_bucket_isect_test( const MaskRasterLayer *layer, unsigned int face_index, const unsigned int bucket_x, const unsigned int bucket_y, const float bucket_size_x, const float bucket_size_y, const float bucket_max_rad_squared) { unsigned int *face = layer->face_array[face_index]; float (*cos)[3] = layer->face_coords; const float xmin = layer->bounds.xmin + (bucket_size_x * (float)bucket_x); const float ymin = layer->bounds.ymin + (bucket_size_y * (float)bucket_y); const float xmax = xmin + bucket_size_x; const float ymax = ymin + bucket_size_y; const float cent[2] = {(xmin + xmax) * 0.5f, (ymin + ymax) * 0.5f}; if (face[3] == TRI_VERT) { const float *v1 = cos[face[0]]; const float *v2 = cos[face[1]]; const float *v3 = cos[face[2]]; if (isect_point_tri_v2(cent, v1, v2, v3)) { return true; } else { if ((dist_squared_to_line_segment_v2(cent, v1, v2) < bucket_max_rad_squared) || (dist_squared_to_line_segment_v2(cent, v2, v3) < bucket_max_rad_squared) || (dist_squared_to_line_segment_v2(cent, v3, v1) < bucket_max_rad_squared)) { return true; } else { // printf("skip tri\n"); return false; } } } else { const float *v1 = cos[face[0]]; const float *v2 = cos[face[1]]; const float *v3 = cos[face[2]]; const float *v4 = cos[face[3]]; if (isect_point_tri_v2(cent, v1, v2, v3)) { return true; } else if (isect_point_tri_v2(cent, v1, v3, v4)) { return true; } else { if ((dist_squared_to_line_segment_v2(cent, v1, v2) < bucket_max_rad_squared) || (dist_squared_to_line_segment_v2(cent, v2, v3) < bucket_max_rad_squared) || (dist_squared_to_line_segment_v2(cent, v3, v4) < bucket_max_rad_squared) || (dist_squared_to_line_segment_v2(cent, v4, v1) < bucket_max_rad_squared)) { return true; } else { // printf("skip quad\n"); return false; } } } } static void layer_bucket_init_dummy(MaskRasterLayer *layer) { layer->face_tot = 0; layer->face_coords = NULL; layer->face_array = NULL; layer->buckets_x = 0; layer->buckets_y = 0; layer->buckets_xy_scalar[0] = 0.0f; layer->buckets_xy_scalar[1] = 0.0f; layer->buckets_face = NULL; BLI_rctf_init(&layer->bounds, -1.0f, -1.0f, -1.0f, -1.0f); } static void layer_bucket_init(MaskRasterLayer *layer, const float pixel_size) { MemArena *arena = BLI_memarena_new(MEM_SIZE_OPTIMAL(1 << 16), __func__); const float bucket_dim_x = BLI_rctf_size_x(&layer->bounds); const float bucket_dim_y = BLI_rctf_size_y(&layer->bounds); layer->buckets_x = (unsigned int)((bucket_dim_x / pixel_size) / (float)BUCKET_PIXELS_PER_CELL); layer->buckets_y = (unsigned int)((bucket_dim_y / pixel_size) / (float)BUCKET_PIXELS_PER_CELL); // printf("bucket size %ux%u\n", layer->buckets_x, layer->buckets_y); CLAMP(layer->buckets_x, 8, 512); CLAMP(layer->buckets_y, 8, 512); layer->buckets_xy_scalar[0] = (1.0f / (bucket_dim_x + FLT_EPSILON)) * (float)layer->buckets_x; layer->buckets_xy_scalar[1] = (1.0f / (bucket_dim_y + FLT_EPSILON)) * (float)layer->buckets_y; { /* width and height of each bucket */ const float bucket_size_x = (bucket_dim_x + FLT_EPSILON) / (float)layer->buckets_x; const float bucket_size_y = (bucket_dim_y + FLT_EPSILON) / (float)layer->buckets_y; const float bucket_max_rad = (max_ff(bucket_size_x, bucket_size_y) * (float)M_SQRT2) + FLT_EPSILON; const float bucket_max_rad_squared = bucket_max_rad * bucket_max_rad; unsigned int *face = &layer->face_array[0][0]; float (*cos)[3] = layer->face_coords; const unsigned int bucket_tot = layer->buckets_x * layer->buckets_y; LinkNode **bucketstore = MEM_callocN(bucket_tot * sizeof(LinkNode *), __func__); unsigned int *bucketstore_tot = MEM_callocN(bucket_tot * sizeof(unsigned int), __func__); unsigned int face_index; for (face_index = 0; face_index < layer->face_tot; face_index++, face += 4) { float xmin; float xmax; float ymin; float ymax; if (face[3] == TRI_VERT) { const float *v1 = cos[face[0]]; const float *v2 = cos[face[1]]; const float *v3 = cos[face[2]]; xmin = min_ff(v1[0], min_ff(v2[0], v3[0])); xmax = max_ff(v1[0], max_ff(v2[0], v3[0])); ymin = min_ff(v1[1], min_ff(v2[1], v3[1])); ymax = max_ff(v1[1], max_ff(v2[1], v3[1])); } else { const float *v1 = cos[face[0]]; const float *v2 = cos[face[1]]; const float *v3 = cos[face[2]]; const float *v4 = cos[face[3]]; xmin = min_ff(v1[0], min_ff(v2[0], min_ff(v3[0], v4[0]))); xmax = max_ff(v1[0], max_ff(v2[0], max_ff(v3[0], v4[0]))); ymin = min_ff(v1[1], min_ff(v2[1], min_ff(v3[1], v4[1]))); ymax = max_ff(v1[1], max_ff(v2[1], max_ff(v3[1], v4[1]))); } /* not essential but may as will skip any faces outside the view */ if (!((xmax < 0.0f) || (ymax < 0.0f) || (xmin > 1.0f) || (ymin > 1.0f))) { CLAMP(xmin, 0.0f, 1.0f); CLAMP(ymin, 0.0f, 1.0f); CLAMP(xmax, 0.0f, 1.0f); CLAMP(ymax, 0.0f, 1.0f); { unsigned int xi_min = (unsigned int) ((xmin - layer->bounds.xmin) * layer->buckets_xy_scalar[0]); unsigned int xi_max = (unsigned int) ((xmax - layer->bounds.xmin) * layer->buckets_xy_scalar[0]); unsigned int yi_min = (unsigned int) ((ymin - layer->bounds.ymin) * layer->buckets_xy_scalar[1]); unsigned int yi_max = (unsigned int) ((ymax - layer->bounds.ymin) * layer->buckets_xy_scalar[1]); void *face_index_void = SET_UINT_IN_POINTER(face_index); unsigned int xi, yi; /* this should _almost_ never happen but since it can in extreme cases, * we have to clamp the values or we overrun the buffer and crash */ if (xi_min >= layer->buckets_x) xi_min = layer->buckets_x - 1; if (xi_max >= layer->buckets_x) xi_max = layer->buckets_x - 1; if (yi_min >= layer->buckets_y) yi_min = layer->buckets_y - 1; if (yi_max >= layer->buckets_y) yi_max = layer->buckets_y - 1; for (yi = yi_min; yi <= yi_max; yi++) { unsigned int bucket_index = (layer->buckets_x * yi) + xi_min; for (xi = xi_min; xi <= xi_max; xi++, bucket_index++) { // unsigned int bucket_index = (layer->buckets_x * yi) + xi; /* correct but do in outer loop */ BLI_assert(xi < layer->buckets_x); BLI_assert(yi < layer->buckets_y); BLI_assert(bucket_index < bucket_tot); /* check if the bucket intersects with the face */ /* note: there is a trade off here since checking box/tri intersections isn't * as optimal as it could be, but checking pixels against faces they will never intersect * with is likely the greater slowdown here - so check if the cell intersects the face */ if (layer_bucket_isect_test(layer, face_index, xi, yi, bucket_size_x, bucket_size_y, bucket_max_rad_squared)) { BLI_linklist_prepend_arena(&bucketstore[bucket_index], face_index_void, arena); bucketstore_tot[bucket_index]++; } } } } } } if (1) { /* now convert linknodes into arrays for faster per pixel access */ unsigned int **buckets_face = MEM_mallocN(bucket_tot * sizeof(*buckets_face), __func__); unsigned int bucket_index; for (bucket_index = 0; bucket_index < bucket_tot; bucket_index++) { if (bucketstore_tot[bucket_index]) { unsigned int *bucket = MEM_mallocN((bucketstore_tot[bucket_index] + 1) * sizeof(unsigned int), __func__); LinkNode *bucket_node; buckets_face[bucket_index] = bucket; for (bucket_node = bucketstore[bucket_index]; bucket_node; bucket_node = bucket_node->next) { *bucket = GET_UINT_FROM_POINTER(bucket_node->link); bucket++; } *bucket = TRI_TERMINATOR_ID; } else { buckets_face[bucket_index] = NULL; } } layer->buckets_face = buckets_face; } MEM_freeN(bucketstore); MEM_freeN(bucketstore_tot); } BLI_memarena_free(arena); } void BKE_maskrasterize_handle_init(MaskRasterHandle *mr_handle, struct Mask *mask, const int width, const int height, const bool do_aspect_correct, const bool do_mask_aa, const bool do_feather) { const rctf default_bounds = {0.0f, 1.0f, 0.0f, 1.0f}; const float pixel_size = 1.0f / (float)min_ii(width, height); const float asp_xy[2] = {(do_aspect_correct && width > height) ? (float)height / (float)width : 1.0f, (do_aspect_correct && width < height) ? (float)width / (float)height : 1.0f}; const float zvec[3] = {0.0f, 0.0f, 1.0f}; MaskLayer *masklay; unsigned int masklay_index; MemArena *sf_arena; mr_handle->layers_tot = (unsigned int)BLI_listbase_count(&mask->masklayers); mr_handle->layers = MEM_mallocN(sizeof(MaskRasterLayer) * mr_handle->layers_tot, "MaskRasterLayer"); BLI_rctf_init_minmax(&mr_handle->bounds); sf_arena = BLI_memarena_new(BLI_SCANFILL_ARENA_SIZE, __func__); for (masklay = mask->masklayers.first, masklay_index = 0; masklay; masklay = masklay->next, masklay_index++) { /* we need to store vertex ranges for open splines for filling */ unsigned int tot_splines; MaskRasterSplineInfo *open_spline_ranges; unsigned int open_spline_index = 0; MaskSpline *spline; /* scanfill */ ScanFillContext sf_ctx; ScanFillVert *sf_vert = NULL; ScanFillVert *sf_vert_next = NULL; ScanFillFace *sf_tri; unsigned int sf_vert_tot = 0; unsigned int tot_feather_quads = 0; #ifdef USE_SCANFILL_EDGE_WORKAROUND unsigned int tot_boundary_used = 0; unsigned int tot_boundary_found = 0; #endif if (masklay->restrictflag & MASK_RESTRICT_RENDER) { /* skip the layer */ mr_handle->layers_tot--; masklay_index--; continue; } tot_splines = (unsigned int)BLI_listbase_count(&masklay->splines); open_spline_ranges = MEM_callocN(sizeof(*open_spline_ranges) * tot_splines, __func__); BLI_scanfill_begin_arena(&sf_ctx, sf_arena); for (spline = masklay->splines.first; spline; spline = spline->next) { const bool is_cyclic = (spline->flag & MASK_SPLINE_CYCLIC) != 0; const bool is_fill = (spline->flag & MASK_SPLINE_NOFILL) == 0; float (*diff_points)[2]; unsigned int tot_diff_point; float (*diff_feather_points)[2]; float (*diff_feather_points_flip)[2]; unsigned int tot_diff_feather_points; const unsigned int resol_a = BKE_mask_spline_resolution(spline, width, height) / 4; const unsigned int resol_b = BKE_mask_spline_feather_resolution(spline, width, height) / 4; const unsigned int resol = CLAMPIS(MAX2(resol_a, resol_b), 4, 512); diff_points = BKE_mask_spline_differentiate_with_resolution( spline, &tot_diff_point, resol); if (do_feather) { diff_feather_points = BKE_mask_spline_feather_differentiated_points_with_resolution( spline, &tot_diff_feather_points, resol, false); BLI_assert(diff_feather_points); } else { tot_diff_feather_points = 0; diff_feather_points = NULL; } if (tot_diff_point > 3) { ScanFillVert *sf_vert_prev; unsigned int j; float co[3]; co[2] = 0.0f; sf_ctx.poly_nr++; if (do_aspect_correct) { if (width != height) { float *fp; float *ffp; unsigned int i; float asp; if (width < height) { fp = &diff_points[0][0]; ffp = tot_diff_feather_points ? &diff_feather_points[0][0] : NULL; asp = (float)width / (float)height; } else { fp = &diff_points[0][1]; ffp = tot_diff_feather_points ? &diff_feather_points[0][1] : NULL; asp = (float)height / (float)width; } for (i = 0; i < tot_diff_point; i++, fp += 2) { (*fp) = (((*fp) - 0.5f) / asp) + 0.5f; } if (tot_diff_feather_points) { for (i = 0; i < tot_diff_feather_points; i++, ffp += 2) { (*ffp) = (((*ffp) - 0.5f) / asp) + 0.5f; } } } } /* fake aa, using small feather */ if (do_mask_aa == true) { if (do_feather == false) { tot_diff_feather_points = tot_diff_point; diff_feather_points = MEM_mallocN(sizeof(*diff_feather_points) * (size_t)tot_diff_feather_points, __func__); /* add single pixel feather */ maskrasterize_spline_differentiate_point_outset(diff_feather_points, diff_points, tot_diff_point, pixel_size, false); } else { /* ensure single pixel feather, on any zero feather areas */ maskrasterize_spline_differentiate_point_outset(diff_feather_points, diff_points, tot_diff_point, pixel_size, true); } } if (is_fill) { /* applt intersections depending on fill settings */ if (spline->flag & MASK_SPLINE_NOINTERSECT) { BKE_mask_spline_feather_collapse_inner_loops(spline, diff_feather_points, tot_diff_feather_points); } copy_v2_v2(co, diff_points[0]); sf_vert_prev = BLI_scanfill_vert_add(&sf_ctx, co); sf_vert_prev->tmp.u = sf_vert_tot; sf_vert_prev->keyindex = sf_vert_tot + tot_diff_point; /* absolute index of feather vert */ sf_vert_tot++; /* TODO, an alternate functions so we can avoid double vector copy! */ for (j = 1; j < tot_diff_point; j++) { copy_v2_v2(co, diff_points[j]); sf_vert = BLI_scanfill_vert_add(&sf_ctx, co); sf_vert->tmp.u = sf_vert_tot; sf_vert->keyindex = sf_vert_tot + tot_diff_point; /* absolute index of feather vert */ sf_vert_tot++; } sf_vert = sf_vert_prev; sf_vert_prev = sf_ctx.fillvertbase.last; for (j = 0; j < tot_diff_point; j++) { ScanFillEdge *sf_edge = BLI_scanfill_edge_add(&sf_ctx, sf_vert_prev, sf_vert); #ifdef USE_SCANFILL_EDGE_WORKAROUND if (diff_feather_points) { sf_edge->tmp.c = SF_EDGE_IS_BOUNDARY; tot_boundary_used++; } #else (void)sf_edge; #endif sf_vert_prev = sf_vert; sf_vert = sf_vert->next; } if (diff_feather_points) { float co_feather[3]; co_feather[2] = 1.0f; BLI_assert(tot_diff_feather_points == tot_diff_point); /* note: only added for convenience, we don't infact use these to scanfill, * only to create feather faces after scanfill */ for (j = 0; j < tot_diff_feather_points; j++) { copy_v2_v2(co_feather, diff_feather_points[j]); sf_vert = BLI_scanfill_vert_add(&sf_ctx, co_feather); /* no need for these attrs */ #if 0 sf_vert->tmp.u = sf_vert_tot; sf_vert->keyindex = sf_vert_tot + tot_diff_point; /* absolute index of feather vert */ #endif sf_vert->keyindex = SF_KEYINDEX_TEMP_ID; sf_vert_tot++; } tot_feather_quads += tot_diff_point; } } else { /* unfilled spline */ if (diff_feather_points) { float co_diff[2]; float co_feather[3]; co_feather[2] = 1.0f; if (spline->flag & MASK_SPLINE_NOINTERSECT) { diff_feather_points_flip = MEM_mallocN(sizeof(float) * 2 * tot_diff_feather_points, "diff_feather_points_flip"); for (j = 0; j < tot_diff_point; j++) { sub_v2_v2v2(co_diff, diff_points[j], diff_feather_points[j]); add_v2_v2v2(diff_feather_points_flip[j], diff_points[j], co_diff); } BKE_mask_spline_feather_collapse_inner_loops(spline, diff_feather_points, tot_diff_feather_points); BKE_mask_spline_feather_collapse_inner_loops(spline, diff_feather_points_flip, tot_diff_feather_points); } else { diff_feather_points_flip = NULL; } open_spline_ranges[open_spline_index].vertex_offset = sf_vert_tot; open_spline_ranges[open_spline_index].vertex_total = tot_diff_point; /* TODO, an alternate functions so we can avoid double vector copy! */ for (j = 0; j < tot_diff_point; j++) { /* center vert */ copy_v2_v2(co, diff_points[j]); sf_vert = BLI_scanfill_vert_add(&sf_ctx, co); sf_vert->tmp.u = sf_vert_tot; sf_vert->keyindex = SF_KEYINDEX_TEMP_ID; sf_vert_tot++; /* feather vert A */ copy_v2_v2(co_feather, diff_feather_points[j]); sf_vert = BLI_scanfill_vert_add(&sf_ctx, co_feather); sf_vert->tmp.u = sf_vert_tot; sf_vert->keyindex = SF_KEYINDEX_TEMP_ID; sf_vert_tot++; /* feather vert B */ if (diff_feather_points_flip) { copy_v2_v2(co_feather, diff_feather_points_flip[j]); } else { sub_v2_v2v2(co_diff, co, co_feather); add_v2_v2v2(co_feather, co, co_diff); } sf_vert = BLI_scanfill_vert_add(&sf_ctx, co_feather); sf_vert->tmp.u = sf_vert_tot; sf_vert->keyindex = SF_KEYINDEX_TEMP_ID; sf_vert_tot++; tot_feather_quads += 2; } if (!is_cyclic) { tot_feather_quads -= 2; } if (diff_feather_points_flip) { MEM_freeN(diff_feather_points_flip); diff_feather_points_flip = NULL; } /* cap ends */ /* dummy init value */ open_spline_ranges[open_spline_index].vertex_total_cap_head = 0; open_spline_ranges[open_spline_index].vertex_total_cap_tail = 0; if (!is_cyclic) { const float *fp_cent; const float *fp_turn; unsigned int k; fp_cent = diff_points[0]; fp_turn = diff_feather_points[0]; #define CALC_CAP_RESOL \ clampis_uint((unsigned int )(len_v2v2(fp_cent, fp_turn) / \ (pixel_size * SPLINE_RESOL_CAP_PER_PIXEL)), \ SPLINE_RESOL_CAP_MIN, SPLINE_RESOL_CAP_MAX) { const unsigned int vertex_total_cap = CALC_CAP_RESOL; for (k = 1; k < vertex_total_cap; k++) { const float angle = (float)k * (1.0f / (float)vertex_total_cap) * (float)M_PI; rotate_point_v2(co_feather, fp_turn, fp_cent, angle, asp_xy); sf_vert = BLI_scanfill_vert_add(&sf_ctx, co_feather); sf_vert->tmp.u = sf_vert_tot; sf_vert->keyindex = SF_KEYINDEX_TEMP_ID; sf_vert_tot++; } tot_feather_quads += vertex_total_cap; open_spline_ranges[open_spline_index].vertex_total_cap_head = vertex_total_cap; } fp_cent = diff_points[tot_diff_point - 1]; fp_turn = diff_feather_points[tot_diff_point - 1]; { const unsigned int vertex_total_cap = CALC_CAP_RESOL; for (k = 1; k < vertex_total_cap; k++) { const float angle = (float)k * (1.0f / (float)vertex_total_cap) * (float)M_PI; rotate_point_v2(co_feather, fp_turn, fp_cent, -angle, asp_xy); sf_vert = BLI_scanfill_vert_add(&sf_ctx, co_feather); sf_vert->tmp.u = sf_vert_tot; sf_vert->keyindex = SF_KEYINDEX_TEMP_ID; sf_vert_tot++; } tot_feather_quads += vertex_total_cap; open_spline_ranges[open_spline_index].vertex_total_cap_tail = vertex_total_cap; } } open_spline_ranges[open_spline_index].is_cyclic = is_cyclic; open_spline_index++; #undef CALC_CAP_RESOL /* end capping */ } } } if (diff_points) { MEM_freeN(diff_points); } if (diff_feather_points) { MEM_freeN(diff_feather_points); } } { unsigned int (*face_array)[4], *face; /* access coords */ float (*face_coords)[3], *cos; /* xy, z 0-1 (1.0 == filled) */ unsigned int sf_tri_tot; rctf bounds; unsigned int face_index; int scanfill_flag = 0; bool is_isect = false; ListBase isect_remvertbase = {NULL, NULL}; ListBase isect_remedgebase = {NULL, NULL}; /* now we have all the splines */ face_coords = MEM_mallocN((sizeof(float) * 3) * sf_vert_tot, "maskrast_face_coords"); /* init bounds */ BLI_rctf_init_minmax(&bounds); /* coords */ cos = (float *)face_coords; for (sf_vert = sf_ctx.fillvertbase.first; sf_vert; sf_vert = sf_vert_next) { sf_vert_next = sf_vert->next; copy_v3_v3(cos, sf_vert->co); /* remove so as not to interfere with fill (called after) */ if (sf_vert->keyindex == SF_KEYINDEX_TEMP_ID) { BLI_remlink(&sf_ctx.fillvertbase, sf_vert); } /* bounds */ BLI_rctf_do_minmax_v(&bounds, cos); cos += 3; } /* --- inefficient self-intersect case --- */ /* if self intersections are found, its too trickty to attempt to map vertices * so just realloc and add entirely new vertices - the result of the self-intersect check */ if ((masklay->flag & MASK_LAYERFLAG_FILL_OVERLAP) && (is_isect = BLI_scanfill_calc_self_isect(&sf_ctx, &isect_remvertbase, &isect_remedgebase))) { unsigned int sf_vert_tot_isect = (unsigned int)BLI_listbase_count(&sf_ctx.fillvertbase); unsigned int i = sf_vert_tot; face_coords = MEM_reallocN(face_coords, sizeof(float[3]) * (sf_vert_tot + sf_vert_tot_isect)); cos = (float *)&face_coords[sf_vert_tot][0]; for (sf_vert = sf_ctx.fillvertbase.first; sf_vert; sf_vert = sf_vert->next) { copy_v3_v3(cos, sf_vert->co); sf_vert->tmp.u = i++; cos += 3; } sf_vert_tot += sf_vert_tot_isect; /* we need to calc polys after self intersect */ scanfill_flag |= BLI_SCANFILL_CALC_POLYS; } /* --- end inefficient code --- */ /* main scan-fill */ if ((masklay->flag & MASK_LAYERFLAG_FILL_DISCRETE) == 0) scanfill_flag |= BLI_SCANFILL_CALC_HOLES; sf_tri_tot = (unsigned int)BLI_scanfill_calc_ex(&sf_ctx, scanfill_flag, zvec); if (is_isect) { /* add removed data back, we only need edges for feather, * but add verts back so they get freed along with others */ BLI_movelisttolist(&sf_ctx.fillvertbase, &isect_remvertbase); BLI_movelisttolist(&sf_ctx.filledgebase, &isect_remedgebase); } face_array = MEM_mallocN(sizeof(*face_array) * ((size_t)sf_tri_tot + (size_t)tot_feather_quads), "maskrast_face_index"); face_index = 0; /* faces */ face = (unsigned int *)face_array; for (sf_tri = sf_ctx.fillfacebase.first; sf_tri; sf_tri = sf_tri->next) { *(face++) = sf_tri->v3->tmp.u; *(face++) = sf_tri->v2->tmp.u; *(face++) = sf_tri->v1->tmp.u; *(face++) = TRI_VERT; face_index++; FACE_ASSERT(face - 4, sf_vert_tot); } /* start of feather faces... if we have this set, * 'face_index' is kept from loop above */ BLI_assert(face_index == sf_tri_tot); if (tot_feather_quads) { ScanFillEdge *sf_edge; for (sf_edge = sf_ctx.filledgebase.first; sf_edge; sf_edge = sf_edge->next) { if (sf_edge->tmp.c == SF_EDGE_IS_BOUNDARY) { *(face++) = sf_edge->v1->tmp.u; *(face++) = sf_edge->v2->tmp.u; *(face++) = sf_edge->v2->keyindex; *(face++) = sf_edge->v1->keyindex; face_index++; FACE_ASSERT(face - 4, sf_vert_tot); #ifdef USE_SCANFILL_EDGE_WORKAROUND tot_boundary_found++; #endif } } } #ifdef USE_SCANFILL_EDGE_WORKAROUND if (tot_boundary_found != tot_boundary_used) { BLI_assert(tot_boundary_found < tot_boundary_used); } #endif /* feather only splines */ while (open_spline_index > 0) { const unsigned int vertex_offset = open_spline_ranges[--open_spline_index].vertex_offset; unsigned int vertex_total = open_spline_ranges[ open_spline_index].vertex_total; unsigned int vertex_total_cap_head = open_spline_ranges[ open_spline_index].vertex_total_cap_head; unsigned int vertex_total_cap_tail = open_spline_ranges[ open_spline_index].vertex_total_cap_tail; unsigned int k, j; j = vertex_offset; /* subtract one since we reference next vertex triple */ for (k = 0; k < vertex_total - 1; k++, j += 3) { BLI_assert(j == vertex_offset + (k * 3)); *(face++) = j + 3; /* next span */ /* z 1 */ *(face++) = j + 0; /* z 1 */ *(face++) = j + 1; /* z 0 */ *(face++) = j + 4; /* next span */ /* z 0 */ face_index++; FACE_ASSERT(face - 4, sf_vert_tot); *(face++) = j + 0; /* z 1 */ *(face++) = j + 3; /* next span */ /* z 1 */ *(face++) = j + 5; /* next span */ /* z 0 */ *(face++) = j + 2; /* z 0 */ face_index++; FACE_ASSERT(face - 4, sf_vert_tot); } if (open_spline_ranges[open_spline_index].is_cyclic) { *(face++) = vertex_offset + 0; /* next span */ /* z 1 */ *(face++) = j + 0; /* z 1 */ *(face++) = j + 1; /* z 0 */ *(face++) = vertex_offset + 1; /* next span */ /* z 0 */ face_index++; FACE_ASSERT(face - 4, sf_vert_tot); *(face++) = j + 0; /* z 1 */ *(face++) = vertex_offset + 0; /* next span */ /* z 1 */ *(face++) = vertex_offset + 2; /* next span */ /* z 0 */ *(face++) = j + 2; /* z 0 */ face_index++; FACE_ASSERT(face - 4, sf_vert_tot); } else { unsigned int midvidx = vertex_offset; /*************** * cap end 'a' */ j = midvidx + (vertex_total * 3); for (k = 0; k < vertex_total_cap_head - 2; k++, j++) { *(face++) = midvidx + 0; /* z 1 */ *(face++) = midvidx + 0; /* z 1 */ *(face++) = j + 0; /* z 0 */ *(face++) = j + 1; /* z 0 */ face_index++; FACE_ASSERT(face - 4, sf_vert_tot); } j = vertex_offset + (vertex_total * 3); /* 2 tris that join the original */ *(face++) = midvidx + 0; /* z 1 */ *(face++) = midvidx + 0; /* z 1 */ *(face++) = midvidx + 1; /* z 0 */ *(face++) = j + 0; /* z 0 */ face_index++; FACE_ASSERT(face - 4, sf_vert_tot); *(face++) = midvidx + 0; /* z 1 */ *(face++) = midvidx + 0; /* z 1 */ *(face++) = j + vertex_total_cap_head - 2; /* z 0 */ *(face++) = midvidx + 2; /* z 0 */ face_index++; FACE_ASSERT(face - 4, sf_vert_tot); /*************** * cap end 'b' */ /* ... same as previous but v 2-3 flipped, and different initial offsets */ j = vertex_offset + (vertex_total * 3) + (vertex_total_cap_head - 1); midvidx = vertex_offset + (vertex_total * 3) - 3; for (k = 0; k < vertex_total_cap_tail - 2; k++, j++) { *(face++) = midvidx; /* z 1 */ *(face++) = midvidx; /* z 1 */ *(face++) = j + 1; /* z 0 */ *(face++) = j + 0; /* z 0 */ face_index++; FACE_ASSERT(face - 4, sf_vert_tot); } j = vertex_offset + (vertex_total * 3) + (vertex_total_cap_head - 1); /* 2 tris that join the original */ *(face++) = midvidx + 0; /* z 1 */ *(face++) = midvidx + 0; /* z 1 */ *(face++) = j + 0; /* z 0 */ *(face++) = midvidx + 1; /* z 0 */ face_index++; FACE_ASSERT(face - 4, sf_vert_tot); *(face++) = midvidx + 0; /* z 1 */ *(face++) = midvidx + 0; /* z 1 */ *(face++) = midvidx + 2; /* z 0 */ *(face++) = j + vertex_total_cap_tail - 2; /* z 0 */ face_index++; FACE_ASSERT(face - 4, sf_vert_tot); } } MEM_freeN(open_spline_ranges); // fprintf(stderr, "%u %u (%u %u), %u\n", face_index, sf_tri_tot + tot_feather_quads, sf_tri_tot, tot_feather_quads, tot_boundary_used - tot_boundary_found); #ifdef USE_SCANFILL_EDGE_WORKAROUND BLI_assert(face_index + (tot_boundary_used - tot_boundary_found) == sf_tri_tot + tot_feather_quads); #else BLI_assert(face_index == sf_tri_tot + tot_feather_quads); #endif { MaskRasterLayer *layer = &mr_handle->layers[masklay_index]; if (BLI_rctf_isect(&default_bounds, &bounds, &bounds)) { #ifdef USE_SCANFILL_EDGE_WORKAROUND layer->face_tot = (sf_tri_tot + tot_feather_quads) - (tot_boundary_used - tot_boundary_found); #else layer->face_tot = (sf_tri_tot + tot_feather_quads); #endif layer->face_coords = face_coords; layer->face_array = face_array; layer->bounds = bounds; layer_bucket_init(layer, pixel_size); BLI_rctf_union(&mr_handle->bounds, &bounds); } else { MEM_freeN(face_coords); MEM_freeN(face_array); layer_bucket_init_dummy(layer); } /* copy as-is */ layer->alpha = masklay->alpha; layer->blend = masklay->blend; layer->blend_flag = masklay->blend_flag; layer->falloff = masklay->falloff; } /* printf("tris %d, feather tris %d\n", sf_tri_tot, tot_feather_quads); */ } /* add trianges */ BLI_scanfill_end_arena(&sf_ctx, sf_arena); } BLI_memarena_free(sf_arena); } /* --------------------------------------------------------------------- */ /* functions that run inside the sampling thread (keep fast!) */ /* --------------------------------------------------------------------- */ /* 2D ray test */ #if 0 static float maskrasterize_layer_z_depth_tri(const float pt[2], const float v1[3], const float v2[3], const float v3[3]) { float w[3]; barycentric_weights_v2(v1, v2, v3, pt, w); return (v1[2] * w[0]) + (v2[2] * w[1]) + (v3[2] * w[2]); } #endif #if 1 static float maskrasterize_layer_z_depth_quad(const float pt[2], const float v1[3], const float v2[3], const float v3[3], const float v4[3]) { float w[4]; barycentric_weights_v2_quad(v1, v2, v3, v4, pt, w); //return (v1[2] * w[0]) + (v2[2] * w[1]) + (v3[2] * w[2]) + (v4[2] * w[3]); return w[2] + w[3]; /* we can make this assumption for small speedup */ } #endif static float maskrasterize_layer_isect(unsigned int *face, float (*cos)[3], const float dist_orig, const float xy[2]) { /* we always cast from same place only need xy */ if (face[3] == TRI_VERT) { /* --- tri --- */ #if 0 /* not essential but avoids unneeded extra lookups */ if ((cos[0][2] < dist_orig) || (cos[1][2] < dist_orig) || (cos[2][2] < dist_orig)) { if (isect_point_tri_v2_cw(xy, cos[face[0]], cos[face[1]], cos[face[2]])) { /* we know all tris are close for now */ return maskrasterize_layer_z_depth_tri(xy, cos[face[0]], cos[face[1]], cos[face[2]]); } } #else /* we know all tris are close for now */ if (1) { if (isect_point_tri_v2_cw(xy, cos[face[0]], cos[face[1]], cos[face[2]])) { return 0.0f; } } #endif } else { /* --- quad --- */ /* not essential but avoids unneeded extra lookups */ if ((cos[0][2] < dist_orig) || (cos[1][2] < dist_orig) || (cos[2][2] < dist_orig) || (cos[3][2] < dist_orig)) { /* needs work */ #if 1 /* quad check fails for bow-tie, so keep using 2 tri checks */ //if (isect_point_quad_v2(xy, cos[face[0]], cos[face[1]], cos[face[2]], cos[face[3]])) if (isect_point_tri_v2(xy, cos[face[0]], cos[face[1]], cos[face[2]]) || isect_point_tri_v2(xy, cos[face[0]], cos[face[2]], cos[face[3]])) { return maskrasterize_layer_z_depth_quad(xy, cos[face[0]], cos[face[1]], cos[face[2]], cos[face[3]]); } #elif 1 /* don't use isect_point_tri_v2_cw because we could have bow-tie quads */ if (isect_point_tri_v2(xy, cos[face[0]], cos[face[1]], cos[face[2]])) { return maskrasterize_layer_z_depth_tri(xy, cos[face[0]], cos[face[1]], cos[face[2]]); } else if (isect_point_tri_v2(xy, cos[face[0]], cos[face[2]], cos[face[3]])) { return maskrasterize_layer_z_depth_tri(xy, cos[face[0]], cos[face[2]], cos[face[3]]); } #else /* cheat - we know first 2 verts are z0.0f and second 2 are z 1.0f */ /* ... worth looking into */ #endif } } return 1.0f; } BLI_INLINE unsigned int layer_bucket_index_from_xy(MaskRasterLayer *layer, const float xy[2]) { BLI_assert(BLI_rctf_isect_pt_v(&layer->bounds, xy)); return ( (unsigned int)((xy[0] - layer->bounds.xmin) * layer->buckets_xy_scalar[0])) + (((unsigned int)((xy[1] - layer->bounds.ymin) * layer->buckets_xy_scalar[1])) * layer->buckets_x); } static float layer_bucket_depth_from_xy(MaskRasterLayer *layer, const float xy[2]) { unsigned int index = layer_bucket_index_from_xy(layer, xy); unsigned int *face_index = layer->buckets_face[index]; if (face_index) { unsigned int (*face_array)[4] = layer->face_array; float (*cos)[3] = layer->face_coords; float best_dist = 1.0f; while (*face_index != TRI_TERMINATOR_ID) { const float test_dist = maskrasterize_layer_isect(face_array[*face_index], cos, best_dist, xy); if (test_dist < best_dist) { best_dist = test_dist; /* comparing with 0.0f is OK here because triangles are always zero depth */ if (best_dist == 0.0f) { /* bail early, we're as close as possible */ return 0.0f; } } face_index++; } return best_dist; } else { return 1.0f; } } float BKE_maskrasterize_handle_sample(MaskRasterHandle *mr_handle, const float xy[2]) { /* can't do this because some layers may invert */ /* if (BLI_rctf_isect_pt_v(&mr_handle->bounds, xy)) */ const unsigned int layers_tot = mr_handle->layers_tot; unsigned int i; MaskRasterLayer *layer = mr_handle->layers; /* return value */ float value = 0.0f; for (i = 0; i < layers_tot; i++, layer++) { float value_layer; /* also used as signal for unused layer (when render is disabled) */ if (layer->alpha != 0.0f && BLI_rctf_isect_pt_v(&layer->bounds, xy)) { value_layer = 1.0f - layer_bucket_depth_from_xy(layer, xy); switch (layer->falloff) { case PROP_SMOOTH: /* ease - gives less hard lines for dilate/erode feather */ value_layer = (3.0f * value_layer * value_layer - 2.0f * value_layer * value_layer * value_layer); break; case PROP_SPHERE: value_layer = sqrtf(2.0f * value_layer - value_layer * value_layer); break; case PROP_ROOT: value_layer = sqrtf(value_layer); break; case PROP_SHARP: value_layer = value_layer * value_layer; break; case PROP_INVSQUARE: value_layer = value_layer * (2.0f - value_layer); break; case PROP_LIN: default: /* nothing */ break; } if (layer->blend != MASK_BLEND_REPLACE) { value_layer *= layer->alpha; } } else { value_layer = 0.0f; } if (layer->blend_flag & MASK_BLENDFLAG_INVERT) { value_layer = 1.0f - value_layer; } switch (layer->blend) { case MASK_BLEND_MERGE_ADD: value += value_layer * (1.0f - value); break; case MASK_BLEND_MERGE_SUBTRACT: value -= value_layer * value; break; case MASK_BLEND_ADD: value += value_layer; break; case MASK_BLEND_SUBTRACT: value -= value_layer; break; case MASK_BLEND_LIGHTEN: value = max_ff(value, value_layer); break; case MASK_BLEND_DARKEN: value = min_ff(value, value_layer); break; case MASK_BLEND_MUL: value *= value_layer; break; case MASK_BLEND_REPLACE: value = (value * (1.0f - layer->alpha)) + (value_layer * layer->alpha); break; case MASK_BLEND_DIFFERENCE: value = fabsf(value - value_layer); break; default: /* same as add */ BLI_assert(0); value += value_layer; break; } /* clamp after applying each layer so we don't get * issues subtracting after accumulating over 1.0f */ CLAMP(value, 0.0f, 1.0f); } return value; } /** * \brief Rasterize a buffer from a single mask * * We could get some speedup by inlining #BKE_maskrasterize_handle_sample * and calculating each layer then blending buffers, but this function is only * used by the sequencer - so better have the caller thread. * * Since #BKE_maskrasterize_handle_sample is used threaded elsewhere, * we can simply use openmp here for some speedup. */ void BKE_maskrasterize_buffer(MaskRasterHandle *mr_handle, const unsigned int width, const unsigned int height, float *buffer) { const float x_inv = 1.0f / (float)width; const float y_inv = 1.0f / (float)height; const float x_px_ofs = x_inv * 0.5f; const float y_px_ofs = y_inv * 0.5f; #ifdef _MSC_VER int y; /* msvc requires signed for some reason */ /* ignore sign mismatch */ # pragma warning(push) # pragma warning(disable:4018) #else unsigned int y; #endif #pragma omp parallel for private(y) for (y = 0; y < height; y++) { unsigned int i = y * width; unsigned int x; float xy[2]; xy[1] = ((float)y * y_inv) + y_px_ofs; for (x = 0; x < width; x++, i++) { xy[0] = ((float)x * x_inv) + x_px_ofs; buffer[i] = BKE_maskrasterize_handle_sample(mr_handle, xy); } } #ifdef _MSC_VER # pragma warning(pop) #endif }
nr_numint.c
/* Copyright 2014-2020 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <assert.h> #include "config.h" #include "gto/grid_ao_drv.h" #include "np_helper/np_helper.h" #include "vhf/fblas.h" #define BOXSIZE 56 int VXCao_empty_blocks(char *empty, unsigned char *non0table, int *shls_slice, int *ao_loc) { if (non0table == NULL || shls_slice == NULL || ao_loc == NULL) { return 0; } const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; int bas_id; int box_id = 0; int bound = BOXSIZE; int has0 = 0; empty[box_id] = 1; for (bas_id = sh0; bas_id < sh1; bas_id++) { empty[box_id] &= !non0table[bas_id]; if (ao_loc[bas_id] == bound) { has0 |= empty[box_id]; box_id++; bound += BOXSIZE; empty[box_id] = 1; } else if (ao_loc[bas_id] > bound) { has0 |= empty[box_id]; box_id++; bound += BOXSIZE; empty[box_id] = !non0table[bas_id]; } } return has0; } static void dot_ao_dm(double *vm, double *ao, double *dm, int nao, int nocc, int ngrids, int bgrids, unsigned char *non0table, int *shls_slice, int *ao_loc) { int nbox = (nao+BOXSIZE-1) / BOXSIZE; char empty[nbox]; int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc); const char TRANS_T = 'T'; const char TRANS_N = 'N'; const double D1 = 1; double beta = 0; if (has0) { int box_id, blen, i, j; size_t b0; for (box_id = 0; box_id < nbox; box_id++) { if (!empty[box_id]) { b0 = box_id * BOXSIZE; blen = MIN(nao-b0, BOXSIZE); dgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &blen, &D1, ao+b0*ngrids, &ngrids, dm+b0*nocc, &nocc, &beta, vm, &ngrids); beta = 1.0; } } if (beta == 0) { // all empty for (i = 0; i < nocc; i++) { for (j = 0; j < bgrids; j++) { vm[i*ngrids+j] = 0; } } } } else { dgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &nao, &D1, ao, &ngrids, dm, &nocc, &beta, vm, &ngrids); } } /* vm[nocc,ngrids] = ao[i,ngrids] * dm[i,nocc] */ void VXCdot_ao_dm(double *vm, double *ao, double *dm, int nao, int nocc, int ngrids, int nbas, unsigned char *non0table, int *shls_slice, int *ao_loc) { const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; #pragma omp parallel { int ip, ib; #pragma omp for nowait schedule(static) for (ib = 0; ib < nblk; ib++) { ip = ib * BLKSIZE; dot_ao_dm(vm+ip, ao+ip, dm, nao, nocc, ngrids, MIN(ngrids-ip, BLKSIZE), non0table+ib*nbas, shls_slice, ao_loc); } } } /* vv[n,m] = ao1[n,ngrids] * ao2[m,ngrids] */ static void dot_ao_ao(double *vv, double *ao1, double *ao2, int nao, int ngrids, int bgrids, int hermi, unsigned char *non0table, int *shls_slice, int *ao_loc) { int nbox = (nao+BOXSIZE-1) / BOXSIZE; char empty[nbox]; int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc); const char TRANS_T = 'T'; const char TRANS_N = 'N'; const double D1 = 1; if (has0) { int ib, jb, leni, lenj; int j1 = nbox; size_t b0i, b0j; for (ib = 0; ib < nbox; ib++) { if (!empty[ib]) { b0i = ib * BOXSIZE; leni = MIN(nao-b0i, BOXSIZE); if (hermi) { j1 = ib + 1; } for (jb = 0; jb < j1; jb++) { if (!empty[jb]) { b0j = jb * BOXSIZE; lenj = MIN(nao-b0j, BOXSIZE); dgemm_(&TRANS_T, &TRANS_N, &lenj, &leni, &bgrids, &D1, ao2+b0j*ngrids, &ngrids, ao1+b0i*ngrids, &ngrids, &D1, vv+b0i*nao+b0j, &nao); } } } } } else { dgemm_(&TRANS_T, &TRANS_N, &nao, &nao, &bgrids, &D1, ao2, &ngrids, ao1, &ngrids, &D1, vv, &nao); } } /* vv[nao,nao] = ao1[i,nao] * ao2[i,nao] */ void VXCdot_ao_ao(double *vv, double *ao1, double *ao2, int nao, int ngrids, int nbas, int hermi, unsigned char *non0table, int *shls_slice, int *ao_loc) { const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; size_t Nao = nao; NPdset0(vv, Nao * Nao); #pragma omp parallel { int ip, ib; double *v_priv = calloc(Nao*Nao+2, sizeof(double)); #pragma omp for nowait schedule(static) for (ib = 0; ib < nblk; ib++) { ip = ib * BLKSIZE; dot_ao_ao(v_priv, ao1+ip, ao2+ip, nao, ngrids, MIN(ngrids-ip, BLKSIZE), hermi, non0table+ib*nbas, shls_slice, ao_loc); } #pragma omp critical { for (ip = 0; ip < Nao*Nao; ip++) { vv[ip] += v_priv[ip]; } } free(v_priv); } if (hermi != 0) { NPdsymm_triu(nao, vv, hermi); } } // 'nip,np->ip' void VXC_dscale_ao(double *aow, double *ao, double *wv, int comp, int nao, int ngrids) { #pragma omp parallel { size_t Ngrids = ngrids; size_t ao_size = nao * Ngrids; int i, j, ic; double *pao = ao; #pragma omp for schedule(static) for (i = 0; i < nao; i++) { pao = ao + i * Ngrids; for (j = 0; j < Ngrids; j++) { aow[i*Ngrids+j] = pao[j] * wv[j]; } for (ic = 1; ic < comp; ic++) { for (j = 0; j < Ngrids; j++) { aow[i*Ngrids+j] += pao[ic*ao_size+j] * wv[ic*Ngrids+j]; } } } } } // 'ip,ip->p' void VXC_dcontract_rho(double *rho, double *bra, double *ket, int nao, int ngrids) { #pragma omp parallel { size_t Ngrids = ngrids; int nthread = omp_get_num_threads(); int blksize = MAX((Ngrids+nthread-1) / nthread, 1); int ib, b0, b1, i, j; #pragma omp for for (ib = 0; ib < nthread; ib++) { b0 = ib * blksize; b1 = MIN(b0 + blksize, ngrids); for (j = b0; j < b1; j++) { rho[j] = bra[j] * ket[j]; } for (i = 1; i < nao; i++) { for (j = b0; j < b1; j++) { rho[j] += bra[i*Ngrids+j] * ket[i*Ngrids+j]; } } } } } void VXC_vv10nlc(double *Fvec, double *Uvec, double *Wvec, double *vvcoords, double *coords, double *W0p, double *W0, double *K, double *Kp, double *RpW, int vvngrids, int ngrids) { #pragma omp parallel { double DX, DY, DZ, R2; double gp, g, gt, T, F, U, W; int i, j; #pragma omp for schedule(static) for (i = 0; i < ngrids; i++) { F = 0; U = 0; W = 0; for (j = 0; j < vvngrids; j++) { DX = vvcoords[j*3+0] - coords[i*3+0]; DY = vvcoords[j*3+1] - coords[i*3+1]; DZ = vvcoords[j*3+2] - coords[i*3+2]; R2 = DX*DX + DY*DY + DZ*DZ; gp = R2*W0p[j] + Kp[j]; g = R2*W0[i] + K[i]; gt = g + gp; T = RpW[j] / (g*gp*gt); F += T; T *= 1./g + 1./gt; U += T; W += T * R2; } Fvec[i] = F * -1.5; Uvec[i] = U; Wvec[i] = W; } } }
fill_r_4c.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <stdio.h> #include <complex.h> #include "config.h" #include "cint.h" int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env); /* * out[naoi,naoj,naok,comp] in F-order */ void GTOr4c_fill_s1(int (*intor)(), double complex *out, double *buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const int lsh0 = shls_slice[6]; const int lsh1 = shls_slice[7]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t naol = ao_loc[lsh1] - ao_loc[lsh0]; const size_t nij = naoi * naoj; const int dims[] = {naoi, naoj, naok, naol}; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += jp * naoi + ip; int ksh, lsh, k0, l0; int shls[4]; shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { for (lsh = lsh0; lsh < lsh1; lsh++) { shls[2] = ksh; shls[3] = lsh; k0 = ao_loc[ksh] - ao_loc[ksh0]; l0 = ao_loc[lsh] - ao_loc[lsh0]; (*intor)(out+(l0*naok+k0)*nij, dims, shls, atm, natm, bas, nbas, env, cintopt, buf); } } } void GTOr4c_drv(int (*intor)(), void (*fill)(), int (*prescreen)(), double complex *eri, int comp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const int cache_size = GTOmax_cache_size(intor, shls_slice, 4, atm, natm, bas, nbas, env); #pragma omp parallel { int ish, jsh, ij; double *buf = malloc(sizeof(double) * cache_size); #pragma omp for schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { ish = ij / njsh; jsh = ij % njsh; (*fill)(intor, eri, buf, comp, ish, jsh, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } free(buf); } }
cvAdvDiff_bnd_omp.c
/* ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds and Ting Yan @ SMU * Based on cvAdvDiff_bnd.c and parallelized with OpenMP * ----------------------------------------------------------------- * Example problem: * * The following is a simple example problem with a banded Jacobian, * solved using CVODE. * The problem is the semi-discrete form of the advection-diffusion * equation in 2-D: * du/dt = d^2 u / dx^2 + .5 du/dx + d^2 u / dy^2 * on the rectangle 0 <= x <= 2, 0 <= y <= 1, and the time * interval 0 <= t <= 1. Homogeneous Dirichlet boundary conditions * are posed, and the initial condition is * u(x,y,t=0) = x(2-x)y(1-y)exp(5xy). * The PDE is discretized on a uniform MX+2 by MY+2 grid with * central differencing, and with boundary values eliminated, * leaving an ODE system of size NEQ = MX*MY. * This program solves the problem with the BDF method, Newton * iteration with the SUNBAND linear solver, and a user-supplied * Jacobian routine. * It uses scalar relative and absolute tolerances. * Output is printed at t = .1, .2, ..., 1. * Run statistics (optional outputs) are printed at the end. * * Optionally, we can set the number of threads from environment * variable or command line. To check the current value for number * of threads from environment: * % echo $OMP_NUM_THREADS * * Execution: * * To use the default value or the number of threads from the * environment value, run without arguments: * % ./cvAdvDiff_bnd_omp * The environment variable can be over-ridden with a command line * argument specifying the number of threads to use, e.g: * % ./cvAdvDiff_bnd_omp 5 * ----------------------------------------------------------------- */ #include <stdio.h> #include <stdlib.h> #include <math.h> /* Header files with a description of contents */ #include <cvode/cvode.h> /* prototypes for CVODE fcts., consts. */ #include <nvector/nvector_openmp.h> /* serial N_Vector types, fcts., macros */ #include <sunmatrix/sunmatrix_band.h> /* access to band SUNMatrix */ #include <sunlinsol/sunlinsol_band.h> /* access to band SUNLinearSolver */ #include <cvode/cvode_direct.h> /* access to CVDls interface */ #include <sundials/sundials_types.h> /* definition of type realtype */ #include <sundials/sundials_math.h> /* definition of ABS and EXP */ #ifdef _OPENMP #include <omp.h> #endif /* Problem Constants */ #define XMAX RCONST(2.0) /* domain boundaries */ #define YMAX RCONST(1.0) #define MX 10 /* mesh dimensions */ #define MY 5 #define NEQ MX*MY /* number of equations */ #define ATOL RCONST(1.0e-5) /* scalar absolute tolerance */ #define T0 RCONST(0.0) /* initial time */ #define T1 RCONST(0.1) /* first output time */ #define DTOUT RCONST(0.1) /* output time increment */ #define NOUT 10 /* number of output times */ #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define TWO RCONST(2.0) #define FIVE RCONST(5.0) /* User-defined vector access macro IJth */ /* IJth is defined in order to isolate the translation from the mathematical 2-dimensional structure of the dependent variable vector to the underlying 1-dimensional storage. IJth(vdata,i,j) references the element in the vdata array for u at mesh point (i,j), where 1 <= i <= MX, 1 <= j <= MY. The vdata array is obtained via the macro call vdata = NV_DATA_S(v), where v is an N_Vector. The variables are ordered by the y index j, then by the x index i. */ #define IJth(vdata,i,j) (vdata[(j-1) + (i-1)*MY]) /* Type : UserData (contains grid constants) */ typedef struct { realtype dx, dy, hdcoef, hacoef, vdcoef; int nthreads; } *UserData; /* Private Helper Functions */ static void SetIC(N_Vector u, UserData data); static void PrintHeader(realtype reltol, realtype abstol, realtype umax); static void PrintOutput(realtype t, realtype umax, long int nst); static void PrintFinalStats(void *cvode_mem); /* Private function to check function return values */ static int check_flag(void *flagvalue, const char *funcname, int opt); /* Functions Called by the Solver */ static int f(realtype t, N_Vector u, N_Vector udot, void *user_data); static int Jac(realtype t, N_Vector u, N_Vector fu, SUNMatrix J, void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3); /* *------------------------------- * Main Program *------------------------------- */ int main(int argc, char *argv[]) { realtype dx, dy, reltol, abstol, t, tout, umax; N_Vector u; UserData data; SUNMatrix A; SUNLinearSolver LS; void *cvode_mem; int iout, flag; long int nst; int num_threads; u = NULL; data = NULL; A = NULL; LS = NULL; cvode_mem = NULL; /* Set the number of threads to use */ num_threads = 1; /* default value */ #ifdef _OPENMP num_threads = omp_get_max_threads(); /* Overwrite with OMP_NUM_THREADS environment variable */ #endif if (argc > 1) /* overwrite with command line value, if supplied */ num_threads = strtol(argv[1], NULL, 0); /* Create an OpenMP vector */ u = N_VNew_OpenMP(NEQ, num_threads); /* Allocate u vector */ if(check_flag((void*)u, "N_VNew_OpenMP", 0)) return(1); reltol = ZERO; /* Set the tolerances */ abstol = ATOL; data = (UserData) malloc(sizeof *data); /* Allocate data memory */ if(check_flag((void *)data, "malloc", 2)) return(1); dx = data->dx = XMAX/(MX+1); /* Set grid coefficients in data */ dy = data->dy = YMAX/(MY+1); data->hdcoef = ONE/(dx*dx); data->hacoef = HALF/(TWO*dx); data->vdcoef = ONE/(dy*dy); data->nthreads = num_threads; SetIC(u, data); /* Initialize u vector */ /* Call CVodeCreate to create the solver memory and specify the * Backward Differentiation Formula and the use of a Newton iteration */ cvode_mem = CVodeCreate(CV_BDF, CV_NEWTON); if(check_flag((void *)cvode_mem, "CVodeCreate", 0)) return(1); /* Call CVodeInit to initialize the integrator memory and specify the * user's right hand side function in u'=f(t,u), the inital time T0, and * the initial dependent variable vector u. */ flag = CVodeInit(cvode_mem, f, T0, u); if(check_flag(&flag, "CVodeInit", 1)) return(1); /* Call CVodeSStolerances to specify the scalar relative tolerance * and scalar absolute tolerance */ flag = CVodeSStolerances(cvode_mem, reltol, abstol); if (check_flag(&flag, "CVodeSStolerances", 1)) return(1); /* Set the pointer to user-defined data */ flag = CVodeSetUserData(cvode_mem, data); if(check_flag(&flag, "CVodeSetUserData", 1)) return(1); /* Create banded SUNMatrix for use in linear solves -- since this will be factored, set the storage bandwidth to be the sum of upper and lower bandwidths */ A = SUNBandMatrix(NEQ, MY, MY, 2*MY); if(check_flag((void *)A, "SUNBandMatrix", 0)) return(1); /* Create banded SUNLinearSolver object for use by CVode */ LS = SUNBandLinearSolver(u, A); if(check_flag((void *)LS, "SUNBandLinearSolver", 0)) return(1); /* Call CVDlsSetLinearSolver to attach the matrix and linear solver to CVode */ flag = CVDlsSetLinearSolver(cvode_mem, LS, A); if(check_flag(&flag, "CVDlsSetLinearSolver", 1)) return(1); /* Set the user-supplied Jacobian routine Jac */ flag = CVDlsSetJacFn(cvode_mem, Jac); if(check_flag(&flag, "CVDlsSetJacFn", 1)) return(1); /* In loop over output points: call CVode, print results, test for errors */ umax = N_VMaxNorm(u); PrintHeader(reltol, abstol, umax); for(iout=1, tout=T1; iout <= NOUT; iout++, tout += DTOUT) { flag = CVode(cvode_mem, tout, u, &t, CV_NORMAL); if(check_flag(&flag, "CVode", 1)) break; umax = N_VMaxNorm(u); flag = CVodeGetNumSteps(cvode_mem, &nst); check_flag(&flag, "CVodeGetNumSteps", 1); PrintOutput(t, umax, nst); } PrintFinalStats(cvode_mem); /* Print some final statistics */ printf("num_threads = %i\n\n", num_threads); N_VDestroy_OpenMP(u); /* Free the u vector */ CVodeFree(&cvode_mem); /* Free the integrator memory */ SUNLinSolFree(LS); /* Free the linear solver memory */ SUNMatDestroy(A); /* Free the matrix memory */ free(data); /* Free the user data */ return(0); } /* *------------------------------- * Functions called by the solver *------------------------------- */ /* f routine. Compute f(t,u). */ static int f(realtype t, N_Vector u,N_Vector udot, void *user_data) { realtype uij, udn, uup, ult, urt, hordc, horac, verdc, hdiff, hadv, vdiff; realtype *udata, *dudata; int i, j; UserData data; udata = NV_DATA_OMP(u); dudata = NV_DATA_OMP(udot); /* Extract needed constants from data */ data = (UserData) user_data; hordc = data->hdcoef; horac = data->hacoef; verdc = data->vdcoef; /* Loop over all grid points. */ #pragma omp parallel for default(shared) private(j, i, uij, udn, uup, ult, urt, hdiff, hadv, vdiff) num_threads(data->nthreads) for (j=1; j <= MY; j++) { for (i=1; i <= MX; i++) { /* Extract u at x_i, y_j and four neighboring points */ uij = IJth(udata, i, j); udn = (j == 1) ? ZERO : IJth(udata, i, j-1); uup = (j == MY) ? ZERO : IJth(udata, i, j+1); ult = (i == 1) ? ZERO : IJth(udata, i-1, j); urt = (i == MX) ? ZERO : IJth(udata, i+1, j); /* Set diffusion and advection terms and load into udot */ hdiff = hordc*(ult - TWO*uij + urt); hadv = horac*(urt - ult); vdiff = verdc*(uup - TWO*uij + udn); IJth(dudata, i, j) = hdiff + hadv + vdiff; } } return(0); } /* Jacobian routine. Compute J(t,u). */ static int Jac(realtype t, N_Vector u, N_Vector fu, SUNMatrix J, void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3) { sunindextype i, j, k; realtype *kthCol, hordc, horac, verdc; UserData data; /* The components of f = udot that depend on u(i,j) are f(i,j), f(i-1,j), f(i+1,j), f(i,j-1), f(i,j+1), with df(i,j)/du(i,j) = -2 (1/dx^2 + 1/dy^2) df(i-1,j)/du(i,j) = 1/dx^2 + .25/dx (if i > 1) df(i+1,j)/du(i,j) = 1/dx^2 - .25/dx (if i < MX) df(i,j-1)/du(i,j) = 1/dy^2 (if j > 1) df(i,j+1)/du(i,j) = 1/dy^2 (if j < MY) */ data = (UserData) user_data; hordc = data->hdcoef; horac = data->hacoef; verdc = data->vdcoef; #pragma omp parallel for collapse(2) default(shared) private(i, j, k, kthCol) num_threads(data->nthreads) for (j=1; j <= MY; j++) { for (i=1; i <= MX; i++) { k = j-1 + (i-1)*MY; kthCol = SUNBandMatrix_Column(J,k); /* set the kth column of J */ SM_COLUMN_ELEMENT_B(kthCol,k,k) = -TWO*(verdc+hordc); if (i != 1) SM_COLUMN_ELEMENT_B(kthCol,k-MY,k) = hordc + horac; if (i != MX) SM_COLUMN_ELEMENT_B(kthCol,k+MY,k) = hordc - horac; if (j != 1) SM_COLUMN_ELEMENT_B(kthCol,k-1,k) = verdc; if (j != MY) SM_COLUMN_ELEMENT_B(kthCol,k+1,k) = verdc; } } return(0); } /* *------------------------------- * Private helper functions *------------------------------- */ /* Set initial conditions in u vector */ static void SetIC(N_Vector u, UserData data) { int i, j; realtype x, y, dx, dy; realtype *udata; /* Extract needed constants from data */ dx = data->dx; dy = data->dy; /* Set pointer to data array in vector u. */ udata = NV_DATA_OMP(u); /* Load initial profile into u vector */ #pragma omp parallel for default(shared) private(j, i, y, x) for (j=1; j <= MY; j++) { y = j*dy; for (i=1; i <= MX; i++) { x = i*dx; IJth(udata,i,j) = x*(XMAX - x)*y*(YMAX - y)*SUNRexp(FIVE*x*y); } } } /* Print first lines of output (problem description) */ static void PrintHeader(realtype reltol, realtype abstol, realtype umax) { printf("\n2-D Advection-Diffusion Equation\n"); printf("Mesh dimensions = %d X %d\n", MX, MY); printf("Total system size = %d\n", NEQ); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("Tolerance parameters: reltol = %Lg abstol = %Lg\n\n", reltol, abstol); printf("At t = %Lg max.norm(u) =%14.6Le \n", T0, umax); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol); printf("At t = %g max.norm(u) =%14.6e \n", T0, umax); #else printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol); printf("At t = %g max.norm(u) =%14.6e \n", T0, umax); #endif return; } /* Print current value */ static void PrintOutput(realtype t, realtype umax, long int nst) { #if defined(SUNDIALS_EXTENDED_PRECISION) printf("At t = %4.2Lf max.norm(u) =%14.6Le nst = %4ld\n", t, umax, nst); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst); #else printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst); #endif return; } /* Get and print some final statistics */ static void PrintFinalStats(void *cvode_mem) { int flag; long int nst, nfe, nsetups, netf, nni, ncfn, nje, nfeLS; flag = CVodeGetNumSteps(cvode_mem, &nst); check_flag(&flag, "CVodeGetNumSteps", 1); flag = CVodeGetNumRhsEvals(cvode_mem, &nfe); check_flag(&flag, "CVodeGetNumRhsEvals", 1); flag = CVodeGetNumLinSolvSetups(cvode_mem, &nsetups); check_flag(&flag, "CVodeGetNumLinSolvSetups", 1); flag = CVodeGetNumErrTestFails(cvode_mem, &netf); check_flag(&flag, "CVodeGetNumErrTestFails", 1); flag = CVodeGetNumNonlinSolvIters(cvode_mem, &nni); check_flag(&flag, "CVodeGetNumNonlinSolvIters", 1); flag = CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn); check_flag(&flag, "CVodeGetNumNonlinSolvConvFails", 1); flag = CVDlsGetNumJacEvals(cvode_mem, &nje); check_flag(&flag, "CVDlsGetNumJacEvals", 1); flag = CVDlsGetNumRhsEvals(cvode_mem, &nfeLS); check_flag(&flag, "CVDlsGetNumRhsEvals", 1); printf("\nFinal Statistics:\n"); printf("nst = %-6ld nfe = %-6ld nsetups = %-6ld nfeLS = %-6ld nje = %ld\n", nst, nfe, nsetups, nfeLS, nje); printf("nni = %-6ld ncfn = %-6ld netf = %ld\n", nni, ncfn, netf); return; } /* Check function return value... opt == 0 means SUNDIALS function allocates memory so check if returned NULL pointer opt == 1 means SUNDIALS function returns a flag so check if flag >= 0 opt == 2 means function allocates memory so check if returned NULL pointer */ static int check_flag(void *flagvalue, const char *funcname, int opt) { int *errflag; /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ if (opt == 0 && flagvalue == NULL) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } /* Check if flag < 0 */ else if (opt == 1) { errflag = (int *) flagvalue; if (*errflag < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with flag = %d\n\n", funcname, *errflag); return(1); }} /* Check if function returned NULL pointer - no memory allocated */ else if (opt == 2 && flagvalue == NULL) { fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } return(0); }
yoloDection.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #ifndef YOLODECTION_H #define YOLODECTION_H #include <math.h> #include <vector> #include <algorithm> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #include "net.h" struct BBoxRect { float xmin; float ymin; float xmax; float ymax; int label; }; class yoloDection { public: yoloDection(); yoloDection(int classN, int boxN, float conf_thre, float nms_thre, ncnn::Mat biases, ncnn::Mat mask, ncnn::Mat anchors_scale, int mask_group): num_class(classN),num_box(boxN), confidence_threshold(conf_thre), nms_threshold(nms_thre), biases(biases), mask(mask), anchors_scale(anchors_scale), mask_group_num(mask_group){} float sigmoid(float x); float intersection_area(const BBoxRect& a, const BBoxRect& b); template <typename T> void qsort_descent_inplace(std::vector<T>& datas, std::vector<float>& scores, int left, int right); template <typename T> void qsort_descent_inplace(std::vector<T>& datas, std::vector<float>& scores); void nms_sorted_bboxes(const std::vector<BBoxRect>& bboxes, std::vector<int>& picked, float nms_threshold); int detection(const std::vector<ncnn::Mat>& bottom_blobs, ncnn::Mat& top_blob); public: int num_class; int num_box; float confidence_threshold; float nms_threshold; ncnn::Mat biases; ncnn::Mat mask; ncnn::Mat anchors_scale; int mask_group_num; }; inline float yoloDection::intersection_area(const BBoxRect& a, const BBoxRect& b) { if (a.xmin > b.xmax || a.xmax < b.xmin || a.ymin > b.ymax || a.ymax < b.ymin) { // no intersection return 0.f; } float inter_width = std::min(a.xmax, b.xmax) - std::max(a.xmin, b.xmin); float inter_height = std::min(a.ymax, b.ymax) - std::max(a.ymin, b.ymin); return inter_width * inter_height; } template <typename T> inline void yoloDection::qsort_descent_inplace(std::vector<T>& datas, std::vector<float>& scores, int left, int right) { int i = left; int j = right; float p = scores[(left + right) / 2]; while (i <= j) { while (scores[i] > p) i++; while (scores[j] < p) j--; if (i <= j) { // swap std::swap(datas[i], datas[j]); std::swap(scores[i], scores[j]); i++; j--; } } if (left < j) qsort_descent_inplace(datas, scores, left, j); if (i < right) qsort_descent_inplace(datas, scores, i, right); } template <typename T> inline void yoloDection::qsort_descent_inplace(std::vector<T>& datas, std::vector<float>& scores) { if (datas.empty() || scores.empty()) return; qsort_descent_inplace(datas, scores, 0, scores.size() - 1); } inline void yoloDection::nms_sorted_bboxes(const std::vector<BBoxRect>& bboxes, std::vector<int>& picked, float nms_threshold) { picked.clear(); const int n = bboxes.size(); std::vector<float> areas(n); for (int i = 0; i < n; i++) { const BBoxRect& r = bboxes[i]; float width = r.xmax - r.xmin; float height = r.ymax - r.ymin; areas[i] = width * height; } for (int i = 0; i < n; i++) { const BBoxRect& a = bboxes[i]; int keep = 1; for (int j = 0; j < (int)picked.size(); j++) { const BBoxRect& b = bboxes[picked[j]]; // intersection over union float inter_area = intersection_area(a, b); float union_area = areas[i] + areas[picked[j]] - inter_area; // float IoU = inter_area / union_area if (inter_area / union_area > nms_threshold) keep = 0; } if (keep) picked.push_back(i); } } inline float yoloDection::sigmoid(float x) { return 1.f / (1.f + exp(-x)); } inline int yoloDection::detection(const std::vector<ncnn::Mat>& bottom_blobs, ncnn::Mat& top_blob) { // gather all box std::vector<BBoxRect> all_bbox_rects; std::vector<float> all_bbox_scores; for (size_t b = 0; b < bottom_blobs.size(); b++) { fprintf(stderr, "biases: %f\n", biases[b]); std::vector< std::vector<BBoxRect> > all_box_bbox_rects; std::vector< std::vector<float> > all_box_bbox_scores; all_box_bbox_rects.resize(num_box); all_box_bbox_scores.resize(num_box); const ncnn::Mat& bottom_top_blobs = bottom_blobs[b]; int w = bottom_top_blobs.w; int h = bottom_top_blobs.h; int channels = bottom_top_blobs.c; //printf("%d %d %d\n", w, h, channels); const int channels_per_box = channels / num_box; // anchor coord + box score + num_class if (channels_per_box != 4 + 1 + num_class) return -1; int mask_offset = b * num_box; int net_w = (int)(anchors_scale[b] * w); int net_h = (int)(anchors_scale[b] * h); //printf("%d %d\n", net_w, net_h); //printf("%d %d %d\n", w, h, channels); //#pragma omp parallel for num_threads(4) for (int pp = 0; pp < num_box; pp++) { int p = pp * channels_per_box; int biases_index = mask[pp + mask_offset]; //printf("%d\n", biases_index); const float bias_w = biases[biases_index * 2]; const float bias_h = biases[biases_index * 2 + 1]; //printf("%f %f\n", bias_w, bias_h); const float* xptr = bottom_top_blobs.channel(p); const float* yptr = bottom_top_blobs.channel(p + 1); const float* wptr = bottom_top_blobs.channel(p + 2); const float* hptr = bottom_top_blobs.channel(p + 3); const float* box_score_ptr = bottom_top_blobs.channel(p + 4); // softmax class scores ncnn::Mat scores = bottom_top_blobs.channel_range(p + 5, num_class); //softmax->forward_inplace(scores, opt); for (int i = 0; i < h; i++) { for (int j = 0; j < w; j++) { // box score float box_score = sigmoid(box_score_ptr[0]); // find class index with max class score int class_index = 0; float class_score = 0.f; for (int q = 0; q < num_class; q++) { float score = sigmoid(scores.channel(q).row(i)[j]); if (score > class_score) { class_index = q; class_score = score; } } //printf( "%d %f %f\n", class_index, box_score, class_score); float confidence = box_score * class_score; if (confidence >= confidence_threshold) { // region box float bbox_cx = (j + sigmoid(xptr[0])) / w; float bbox_cy = (i + sigmoid(yptr[0])) / h; //float bbox_w = exp(wptr[0]) / net_w; //float bbox_h = exp(hptr[0]) / net_h; float bbox_w = exp(wptr[0]) * bias_w / net_w; float bbox_h = exp(hptr[0]) * bias_h / net_h; float bbox_xmin = bbox_cx - bbox_w * 0.5f; float bbox_ymin = bbox_cy - bbox_h * 0.5f; float bbox_xmax = bbox_cx + bbox_w * 0.5f; float bbox_ymax = bbox_cy + bbox_h * 0.5f; BBoxRect c = { bbox_xmin, bbox_ymin, bbox_xmax, bbox_ymax, class_index }; all_box_bbox_rects[pp].push_back(c); all_box_bbox_scores[pp].push_back(confidence); } xptr++; yptr++; wptr++; hptr++; box_score_ptr++; } } } for (int i = 0; i < num_box; i++) { const std::vector<BBoxRect>& box_bbox_rects = all_box_bbox_rects[i]; const std::vector<float>& box_bbox_scores = all_box_bbox_scores[i]; all_bbox_rects.insert(all_bbox_rects.end(), box_bbox_rects.begin(), box_bbox_rects.end()); all_bbox_scores.insert(all_bbox_scores.end(), box_bbox_scores.begin(), box_bbox_scores.end()); } } // global sort inplace qsort_descent_inplace(all_bbox_rects, all_bbox_scores); // apply nms std::vector<int> picked; nms_sorted_bboxes(all_bbox_rects, picked, nms_threshold); // select std::vector<BBoxRect> bbox_rects; std::vector<float> bbox_scores; for (int i = 0; i < (int)picked.size(); i++) { int z = picked[i]; bbox_rects.push_back(all_bbox_rects[z]); bbox_scores.push_back(all_bbox_scores[z]); } // fill result int num_detected = bbox_rects.size(); if (num_detected == 0) return 0; top_blob.create(6, num_detected, 4u, 0); if (top_blob.empty()) return -100; for (int i = 0; i < num_detected; i++) { const BBoxRect& r = bbox_rects[i]; float score = bbox_scores[i]; float* outptr = top_blob.row(i); outptr[0] = r.label + 1;// +1 for prepend background class outptr[1] = score; outptr[2] = r.xmin; outptr[3] = r.ymin; outptr[4] = r.xmax; outptr[5] = r.ymax; } return 0; } #endif // YOLODECTION_H
HashmapCPU.h
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #pragma once #include <tbb/concurrent_unordered_map.h> #include <unordered_map> #include "open3d/core/hashmap/CPU/HashmapBufferCPU.hpp" #include "open3d/core/hashmap/DeviceHashmap.h" namespace open3d { namespace core { template <typename Hash, typename KeyEq> class CPUHashmap : public DeviceHashmap<Hash, KeyEq> { public: CPUHashmap(int64_t init_buckets, int64_t init_capacity, int64_t dsize_key, int64_t dsize_value, const Device& device); ~CPUHashmap(); void Rehash(int64_t buckets) override; void Insert(const void* input_keys, const void* input_values, addr_t* output_addrs, bool* output_masks, int64_t count) override; void Activate(const void* input_keys, addr_t* output_addrs, bool* output_masks, int64_t count) override; void Find(const void* input_keys, addr_t* output_addrs, bool* output_masks, int64_t count) override; void Erase(const void* input_keys, bool* output_masks, int64_t count) override; int64_t GetActiveIndices(addr_t* output_indices) override; int64_t Size() const override; std::vector<int64_t> BucketSizes() const override; float LoadFactor() const override; std::shared_ptr<tbb::concurrent_unordered_map<void*, addr_t, Hash, KeyEq>> GetContext() const { return impl_; } protected: std::shared_ptr<tbb::concurrent_unordered_map<void*, addr_t, Hash, KeyEq>> impl_; std::shared_ptr<CPUHashmapBufferContext> buffer_ctx_; void InsertImpl(const void* input_keys, const void* input_values, addr_t* output_addrs, bool* output_masks, int64_t count); void Allocate(int64_t capacity, int64_t buckets); }; template <typename Hash, typename KeyEq> CPUHashmap<Hash, KeyEq>::CPUHashmap(int64_t init_buckets, int64_t init_capacity, int64_t dsize_key, int64_t dsize_value, const Device& device) : DeviceHashmap<Hash, KeyEq>( init_buckets, init_capacity, /// Dummy for std unordered_map, reserved for. /// other hashmaps. dsize_key, dsize_value, device) { Allocate(init_capacity, init_buckets); } template <typename Hash, typename KeyEq> CPUHashmap<Hash, KeyEq>::~CPUHashmap() {} template <typename Hash, typename KeyEq> int64_t CPUHashmap<Hash, KeyEq>::Size() const { return impl_->size(); } template <typename Hash, typename KeyEq> void CPUHashmap<Hash, KeyEq>::Insert(const void* input_keys, const void* input_values, addr_t* output_addrs, bool* output_masks, int64_t count) { int64_t new_size = Size() + count; if (new_size > this->capacity_) { float avg_capacity_per_bucket = float(this->capacity_) / float(this->bucket_count_); int64_t expected_buckets = std::max( this->bucket_count_ * 2, int64_t(std::ceil(new_size / avg_capacity_per_bucket))); Rehash(expected_buckets); } InsertImpl(input_keys, input_values, output_addrs, output_masks, count); } template <typename Hash, typename KeyEq> void CPUHashmap<Hash, KeyEq>::Activate(const void* input_keys, addr_t* output_addrs, bool* output_masks, int64_t count) { int64_t new_size = Size() + count; if (new_size > this->capacity_) { float avg_capacity_per_bucket = float(this->capacity_) / float(this->bucket_count_); int64_t expected_buckets = std::max( this->bucket_count_ * 2, int64_t(std::ceil(new_size / avg_capacity_per_bucket))); Rehash(expected_buckets); } InsertImpl(input_keys, nullptr, output_addrs, output_masks, count); } template <typename Hash, typename KeyEq> void CPUHashmap<Hash, KeyEq>::Find(const void* input_keys, addr_t* output_addrs, bool* output_masks, int64_t count) { #pragma omp parallel for for (int64_t i = 0; i < count; ++i) { uint8_t* key = const_cast<uint8_t*>( static_cast<const uint8_t*>(input_keys) + this->dsize_key_ * i); auto iter = impl_->find(key); bool flag = (iter != impl_->end()); output_masks[i] = flag; output_addrs[i] = flag ? iter->second : 0; } } template <typename Hash, typename KeyEq> void CPUHashmap<Hash, KeyEq>::Erase(const void* input_keys, bool* output_masks, int64_t count) { for (int64_t i = 0; i < count; ++i) { uint8_t* key = const_cast<uint8_t*>( static_cast<const uint8_t*>(input_keys) + this->dsize_key_ * i); auto iter = impl_->find(key); bool flag = (iter != impl_->end()); output_masks[i] = flag; if (flag) { buffer_ctx_->DeviceFree(iter->second); impl_->unsafe_erase(iter); } } this->bucket_count_ = impl_->unsafe_bucket_count(); } template <typename Hash, typename KeyEq> int64_t CPUHashmap<Hash, KeyEq>::GetActiveIndices(addr_t* output_indices) { int64_t count = impl_->size(); int64_t i = 0; for (auto iter = impl_->begin(); iter != impl_->end(); ++iter, ++i) { output_indices[i] = static_cast<int64_t>(iter->second); } return count; } template <typename Hash, typename KeyEq> void CPUHashmap<Hash, KeyEq>::Rehash(int64_t buckets) { int64_t iterator_count = Size(); Tensor active_keys; Tensor active_values; if (iterator_count > 0) { Tensor active_addrs({iterator_count}, Dtype::Int32, this->device_); GetActiveIndices(static_cast<addr_t*>(active_addrs.GetDataPtr())); Tensor active_indices = active_addrs.To(Dtype::Int64); active_keys = this->GetKeyBuffer().IndexGet({active_indices}); active_values = this->GetValueBuffer().IndexGet({active_indices}); } float avg_capacity_per_bucket = float(this->capacity_) / float(this->bucket_count_); int64_t new_capacity = int64_t(std::ceil(buckets * avg_capacity_per_bucket)); Allocate(new_capacity, buckets); if (iterator_count > 0) { Tensor output_addrs({iterator_count}, Dtype::Int32, this->device_); Tensor output_masks({iterator_count}, Dtype::Bool, this->device_); InsertImpl(active_keys.GetDataPtr(), active_values.GetDataPtr(), static_cast<addr_t*>(output_addrs.GetDataPtr()), output_masks.GetDataPtr<bool>(), iterator_count); } impl_->rehash(buckets); this->bucket_count_ = impl_->unsafe_bucket_count(); } template <typename Hash, typename KeyEq> std::vector<int64_t> CPUHashmap<Hash, KeyEq>::BucketSizes() const { int64_t bucket_count = impl_->unsafe_bucket_count(); std::vector<int64_t> ret; for (int64_t i = 0; i < bucket_count; ++i) { ret.push_back(impl_->unsafe_bucket_size(i)); } return ret; } template <typename Hash, typename KeyEq> float CPUHashmap<Hash, KeyEq>::LoadFactor() const { return impl_->load_factor(); } template <typename Hash, typename KeyEq> void CPUHashmap<Hash, KeyEq>::InsertImpl(const void* input_keys, const void* input_values, addr_t* output_addrs, bool* output_masks, int64_t count) { #pragma omp parallel for for (int64_t i = 0; i < count; ++i) { const uint8_t* src_key = static_cast<const uint8_t*>(input_keys) + this->dsize_key_ * i; addr_t dst_kv_addr = buffer_ctx_->DeviceAllocate(); auto dst_kv_iter = buffer_ctx_->ExtractIterator(dst_kv_addr); uint8_t* dst_key = static_cast<uint8_t*>(dst_kv_iter.first); uint8_t* dst_value = static_cast<uint8_t*>(dst_kv_iter.second); std::memcpy(dst_key, src_key, this->dsize_key_); if (input_values != nullptr) { const uint8_t* src_value = static_cast<const uint8_t*>(input_values) + this->dsize_value_ * i; std::memcpy(dst_value, src_value, this->dsize_value_); } else { std::memset(dst_value, 0, this->dsize_value_); } // Try insertion. auto res = impl_->insert({dst_key, dst_kv_addr}); output_addrs[i] = dst_kv_addr; output_masks[i] = res.second; } #pragma omp parallel for for (int64_t i = 0; i < count; ++i) { if (!output_masks[i]) { buffer_ctx_->DeviceFree(output_addrs[i]); } } this->bucket_count_ = impl_->unsafe_bucket_count(); } template <typename Hash, typename KeyEq> void CPUHashmap<Hash, KeyEq>::Allocate(int64_t capacity, int64_t buckets) { this->capacity_ = capacity; this->buffer_ = std::make_shared<HashmapBuffer>(this->capacity_, this->dsize_key_, this->dsize_value_, this->device_); buffer_ctx_ = std::make_shared<CPUHashmapBufferContext>( this->capacity_, this->dsize_key_, this->dsize_value_, this->buffer_->GetKeyBuffer(), this->buffer_->GetValueBuffer(), this->buffer_->GetHeap()); buffer_ctx_->Reset(); impl_ = std::make_shared< tbb::concurrent_unordered_map<void*, addr_t, Hash, KeyEq>>( buckets, Hash(this->dsize_key_), KeyEq(this->dsize_key_)); } } // namespace core } // namespace open3d
SP1.c
/////////////////////////// 8INF854 - ARCHITECTURES PARRALLELES - DEVOIR #2 /////////////////////////////////// ///////////////////////////// SP1.c - Corentin RAOULT - Adrien Cambillau ///////////////////////////////////// #include <omp.h> #include <stdio.h> #include <stdlib.h> ////////////////////// déclaration des fonctions ///////////////////////// int digit_to_int(char d); void remplirTABrand(int* TAB, int n); void afficherTAB(int* TAB, int n); void SP1(int* T, int n); ///////////////////// MAIN //////////////////////////////////////////////// int main(int argc, char* argv[]) { int n; n = 1024; int* T = malloc(n*sizeof(int)); remplirTABrand(T,n); afficherTAB(T,n); double fin; double debut = omp_get_wtime();//--> encore un pb, mesure le temps sur un thread SP1(T,n); fin = omp_get_wtime(); afficherTAB(T,n); printf("durée = %lf\n", fin -debut); return EXIT_SUCCESS; } /////////////////// développement des fonctions ///////////////////////////////// void remplirTABrand(int* TAB, int n) { int i; srand(time(NULL)); for(i=0;i<n;i++) TAB[i] = rand()%10000; //limité par unsigned long long int } void afficherTAB(int* TAB, int n) { int j; printf("TAB : { "); for(j = 0; j < n; j++) { printf(" [%d] ",TAB[j]); } printf(" }\n"); } void SP1(int* T, int n) { int i; int* S = malloc((n/2)*sizeof(int)); if (n==1) return; #pragma omp parallel for for(i=0; i<= n/2 -1; i++) { S[i]=T[2*i] + T[2*i+1]; } SP1(S, n/2); #pragma omp parallel for for(i=0; i<= n/2 -1; i++) { T[2*i+1]=S[i]; T[2*i+2]=S[i]+T[2*i+2]; } }
GB_binop__ge_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ge_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__ge_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__ge_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__ge_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_uint16) // A*D function (colscale): GB (_AxD__ge_uint16) // D*A function (rowscale): GB (_DxB__ge_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__ge_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__ge_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_uint16) // C=scalar+B GB (_bind1st__ge_uint16) // C=scalar+B' GB (_bind1st_tran__ge_uint16) // C=A+scalar GB (_bind2nd__ge_uint16) // C=A'+scalar GB (_bind2nd_tran__ge_uint16) // C type: bool // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_UINT16 || GxB_NO_GE_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__ge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ge_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ge_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ge_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ge_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ge_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ge_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ge_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ge_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ge_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ge_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__ge_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__ge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
bug_nested_proxy_task.c
// RUN: %libomp-compile -lpthread && %libomp-run // The runtime currently does not get dependency information from GCC. // UNSUPPORTED: gcc #include <stdio.h> #include <omp.h> #include <pthread.h> #include "omp_my_sleep.h" /* With task dependencies one can generate proxy tasks from an explicit task being executed by a serial task team. The OpenMP runtime library didn't expect that and tries to free the explicit task that is the parent of the proxy task still working in background. It therefore has incomplete children which triggers a debugging assertion. */ // Compiler-generated code (emulation) typedef long kmp_intptr_t; typedef int kmp_int32; typedef char bool; typedef struct ident { kmp_int32 reserved_1; /**< might be used in Fortran; see above */ kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags; KMP_IDENT_KMPC identifies this union member */ kmp_int32 reserved_2; /**< not really used in Fortran any more; see above */ #if USE_ITT_BUILD /* but currently used for storing region-specific ITT */ /* contextual information. */ #endif /* USE_ITT_BUILD */ kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for C++ */ char const *psource; /**< String describing the source location. The string is composed of semi-colon separated fields which describe the source file, the function and a pair of line numbers that delimit the construct. */ } ident_t; typedef struct kmp_depend_info { kmp_intptr_t base_addr; size_t len; struct { bool in:1; bool out:1; } flags; } kmp_depend_info_t; struct kmp_task; typedef kmp_int32 (* kmp_routine_entry_t)( kmp_int32, struct kmp_task * ); typedef struct kmp_task { /* GEH: Shouldn't this be aligned somehow? */ void * shareds; /**< pointer to block of pointers to shared vars */ kmp_routine_entry_t routine; /**< pointer to routine to call for executing task */ kmp_int32 part_id; /**< part id for the task */ } kmp_task_t; #ifdef __cplusplus extern "C" { #endif kmp_int32 __kmpc_global_thread_num ( ident_t * ); kmp_task_t* __kmpc_omp_task_alloc( ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t task_entry ); void __kmpc_proxy_task_completed_ooo ( kmp_task_t *ptask ); kmp_int32 __kmpc_omp_task_with_deps ( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list ); kmp_int32 __kmpc_omp_task( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task ); #ifdef __cplusplus } #endif void *target(void *task) { my_sleep( 0.1 ); __kmpc_proxy_task_completed_ooo((kmp_task_t*) task); return NULL; } pthread_t target_thread; // User's code int task_entry(kmp_int32 gtid, kmp_task_t *task) { pthread_create(&target_thread, NULL, &target, task); return 0; } int main() { int dep; #pragma omp taskgroup { /* * Corresponds to: #pragma omp target nowait depend(out: dep) { my_sleep( 0.1 ); } */ kmp_depend_info_t dep_info; dep_info.base_addr = (long) &dep; dep_info.len = sizeof(int); // out = inout per spec and runtime expects this dep_info.flags.in = 1; dep_info.flags.out = 1; kmp_int32 gtid = __kmpc_global_thread_num(NULL); kmp_task_t *proxy_task = __kmpc_omp_task_alloc(NULL,gtid,17,sizeof(kmp_task_t),0,&task_entry); __kmpc_omp_task_with_deps(NULL,gtid,proxy_task,1,&dep_info,0,NULL); #pragma omp task depend(in: dep) { /* * Corresponds to: #pragma omp target nowait { my_sleep( 0.1 ); } */ kmp_task_t *nested_proxy_task = __kmpc_omp_task_alloc(NULL,gtid,17,sizeof(kmp_task_t),0,&task_entry); __kmpc_omp_task(NULL,gtid,nested_proxy_task); } } // only check that it didn't crash return 0; }
GB_unop__identity_uint32_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint32_int32 // op(A') function: GB_unop_tran__identity_uint32_int32 // C type: uint32_t // A type: int32_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = (uint32_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint32_int32 ( uint32_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; uint32_t z = (uint32_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; uint32_t z = (uint32_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint32_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dynmat.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <math.h> #include <stdlib.h> #include "dynmat.h" #define PI 3.14159265358979323846 static void get_dynmat_ij(double *dynamical_matrix, const long num_patom, const long num_satom, const double *fc, const double q[3], const double (*svecs)[3], const long (*multi)[2], const double *mass, const long *s2p_map, const long *p2s_map, const double (*charge_sum)[3][3], const long i, const long j); static void get_dm(double dm_real[3][3], double dm_imag[3][3], const long num_patom, const long num_satom, const double *fc, const double q[3], const double (*svecs)[3], const long (*multi)[2], const long *p2s_map, const double (*charge_sum)[3][3], const long i, const long j, const long k); static double get_dielectric_part(const double q_cart[3], const double dielectric[3][3]); static void get_KK(double *dd_part, /* [natom, 3, natom, 3, (real,imag)] */ const double (*G_list)[3], /* [num_G, 3] */ const long num_G, const long num_patom, const double q_cart[3], const double *q_direction_cart, const double dielectric[3][3], const double (*pos)[3], /* [num_patom, 3] */ const double lambda, const double tolerance); static void make_Hermitian(double *mat, const long num_band); static void multiply_borns(double *dd, const double *dd_in, const long num_patom, const double (*born)[3][3]); long dym_get_dynamical_matrix_at_q(double *dynamical_matrix, const long num_patom, const long num_satom, const double *fc, const double q[3], const double (*svecs)[3], const long (*multi)[2], const double *mass, const long *s2p_map, const long *p2s_map, const double (*charge_sum)[3][3], const long with_openmp) { long i, j, ij; if (with_openmp) { #ifdef PHPYOPENMP #pragma omp parallel for #endif for (ij = 0; ij < num_patom * num_patom; ij++) { get_dynmat_ij(dynamical_matrix, num_patom, num_satom, fc, q, svecs, multi, mass, s2p_map, p2s_map, charge_sum, ij / num_patom, /* i */ ij % num_patom); /* j */ } } else { for (i = 0; i < num_patom; i++) { for (j = 0; j < num_patom; j++) { get_dynmat_ij(dynamical_matrix, num_patom, num_satom, fc, q, svecs, multi, mass, s2p_map, p2s_map, charge_sum, i, j); } } } make_Hermitian(dynamical_matrix, num_patom * 3); return 0; } void dym_get_recip_dipole_dipole(double *dd, /* [natom, 3, natom, 3, (real,imag)] */ const double *dd_q0, /* [natom, 3, 3, (real,imag)] */ const double (*G_list)[3], /* [num_G, 3] */ const long num_G, const long num_patom, const double q_cart[3], const double *q_direction_cart, /* must be pointer */ const double (*born)[3][3], const double dielectric[3][3], const double (*pos)[3], /* [num_patom, 3] */ const double factor, /* 4pi/V*unit-conv */ const double lambda, const double tolerance) { long i, k, l, adrs, adrs_sum; double *dd_tmp; dd_tmp = NULL; dd_tmp = (double *)malloc(sizeof(double) * num_patom * num_patom * 18); for (i = 0; i < num_patom * num_patom * 18; i++) { dd[i] = 0; dd_tmp[i] = 0; } get_KK(dd_tmp, G_list, num_G, num_patom, q_cart, q_direction_cart, dielectric, pos, lambda, tolerance); multiply_borns(dd, dd_tmp, num_patom, born); for (i = 0; i < num_patom; i++) { for (k = 0; k < 3; k++) { /* alpha */ for (l = 0; l < 3; l++) { /* beta */ adrs = i * num_patom * 9 + k * num_patom * 3 + i * 3 + l; adrs_sum = i * 9 + k * 3 + l; dd[adrs * 2] -= dd_q0[adrs_sum * 2]; dd[adrs * 2 + 1] -= dd_q0[adrs_sum * 2 + 1]; } } } for (i = 0; i < num_patom * num_patom * 18; i++) { dd[i] *= factor; } /* This may not be necessary. */ /* make_Hermitian(dd, num_patom * 3); */ free(dd_tmp); dd_tmp = NULL; } void dym_get_recip_dipole_dipole_q0(double *dd_q0, /* [natom, 3, 3, (real,imag)] */ const double (*G_list)[3], /* [num_G, 3] */ const long num_G, const long num_patom, const double (*born)[3][3], const double dielectric[3][3], const double (*pos)[3], /* [num_patom, 3] */ const double lambda, const double tolerance) { long i, j, k, l, adrs_tmp, adrs, adrsT; double zero_vec[3]; double *dd_tmp1, *dd_tmp2; dd_tmp1 = NULL; dd_tmp1 = (double *)malloc(sizeof(double) * num_patom * num_patom * 18); dd_tmp2 = NULL; dd_tmp2 = (double *)malloc(sizeof(double) * num_patom * num_patom * 18); for (i = 0; i < num_patom * num_patom * 18; i++) { dd_tmp1[i] = 0; dd_tmp2[i] = 0; } zero_vec[0] = 0; zero_vec[1] = 0; zero_vec[2] = 0; get_KK(dd_tmp1, G_list, num_G, num_patom, zero_vec, NULL, dielectric, pos, lambda, tolerance); multiply_borns(dd_tmp2, dd_tmp1, num_patom, born); for (i = 0; i < num_patom * 18; i++) { dd_q0[i] = 0; } for (i = 0; i < num_patom; i++) { for (k = 0; k < 3; k++) { /* alpha */ for (l = 0; l < 3; l++) { /* beta */ adrs = i * 9 + k * 3 + l; for (j = 0; j < num_patom; j++) { adrs_tmp = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l; dd_q0[adrs * 2] += dd_tmp2[adrs_tmp * 2]; dd_q0[adrs * 2 + 1] += dd_tmp2[adrs_tmp * 2 + 1]; } } } } /* Summation over another atomic index */ /* for (j = 0; j < num_patom; j++) { */ /* for (k = 0; k < 3; k++) { /\* alpha *\/ */ /* for (l = 0; l < 3; l++) { /\* beta *\/ */ /* adrs = j * 9 + k * 3 + l; */ /* for (i = 0; i < num_patom; i++) { */ /* adrs_tmp = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l ; */ /* dd_q0[adrs * 2] += dd_tmp2[adrs_tmp * 2]; */ /* dd_q0[adrs * 2 + 1] += dd_tmp2[adrs_tmp * 2 + 1]; */ /* } */ /* } */ /* } */ /* } */ for (i = 0; i < num_patom; i++) { for (k = 0; k < 3; k++) { /* alpha */ for (l = 0; l < 3; l++) { /* beta */ adrs = i * 9 + k * 3 + l; adrsT = i * 9 + l * 3 + k; dd_q0[adrs * 2] += dd_q0[adrsT * 2]; dd_q0[adrs * 2] /= 2; dd_q0[adrsT * 2] = dd_q0[adrs * 2]; dd_q0[adrs * 2 + 1] -= dd_q0[adrsT * 2 + 1]; dd_q0[adrs * 2 + 1] /= 2; dd_q0[adrsT * 2 + 1] = -dd_q0[adrs * 2 + 1]; } } } free(dd_tmp1); dd_tmp1 = NULL; free(dd_tmp2); dd_tmp2 = NULL; } void dym_get_charge_sum(double (*charge_sum)[3][3], const long num_patom, const double factor, /* 4pi/V*unit-conv and denominator */ const double q_cart[3], const double (*born)[3][3]) { long i, j, k, a, b; double(*q_born)[3]; q_born = (double(*)[3])malloc(sizeof(double[3]) * num_patom); for (i = 0; i < num_patom; i++) { for (j = 0; j < 3; j++) { q_born[i][j] = 0; } } for (i = 0; i < num_patom; i++) { for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) { q_born[i][j] += q_cart[k] * born[i][k][j]; } } } for (i = 0; i < num_patom; i++) { for (j = 0; j < num_patom; j++) { for (a = 0; a < 3; a++) { for (b = 0; b < 3; b++) { charge_sum[i * num_patom + j][a][b] = q_born[i][a] * q_born[j][b] * factor; } } } } free(q_born); q_born = NULL; } /* fc[num_patom, num_satom, 3, 3] */ /* dm[num_comm_points, num_patom * 3, num_patom *3] */ /* comm_points[num_satom / num_patom, 3] */ /* shortest_vectors[:, 3] */ /* multiplicities[num_satom, num_patom, 2] */ void dym_transform_dynmat_to_fc(double *fc, const double *dm, const double (*comm_points)[3], const double (*svecs)[3], const long (*multi)[2], const double *masses, const long *s2pp_map, const long *fc_index_map, const long num_patom, const long num_satom) { long i, j, k, l, m, N, adrs, m_pair, i_pair, svecs_adrs; double coef, phase, cos_phase, sin_phase; N = num_satom / num_patom; for (i = 0; i < num_patom * num_satom * 9; i++) { fc[i] = 0; } for (i = 0; i < num_patom; i++) { for (j = 0; j < num_satom; j++) { i_pair = j * num_patom + i; m_pair = multi[i_pair][0]; svecs_adrs = multi[i_pair][1]; coef = sqrt(masses[i] * masses[s2pp_map[j]]) / N; for (k = 0; k < N; k++) { cos_phase = 0; sin_phase = 0; for (l = 0; l < m_pair; l++) { phase = 0; for (m = 0; m < 3; m++) { phase -= comm_points[k][m] * svecs[svecs_adrs + l][m]; } cos_phase += cos(phase * 2 * PI); sin_phase += sin(phase * 2 * PI); } cos_phase /= m_pair; sin_phase /= m_pair; for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { adrs = k * num_patom * num_patom * 18 + i * num_patom * 18 + l * num_patom * 6 + s2pp_map[j] * 6 + m * 2; fc[fc_index_map[i] * num_satom * 9 + j * 9 + l * 3 + m] += (dm[adrs] * cos_phase - dm[adrs + 1] * sin_phase) * coef; } } } } } } static void get_dynmat_ij(double *dynamical_matrix, const long num_patom, const long num_satom, const double *fc, const double q[3], const double (*svecs)[3], const long (*multi)[2], const double *mass, const long *s2p_map, const long *p2s_map, const double (*charge_sum)[3][3], const long i, const long j) { long k, l, adrs; double mass_sqrt; double dm_real[3][3], dm_imag[3][3]; mass_sqrt = sqrt(mass[i] * mass[j]); for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { dm_real[k][l] = 0; dm_imag[k][l] = 0; } } for (k = 0; k < num_satom; k++) { /* Lattice points of right index of fc */ if (s2p_map[k] != p2s_map[j]) { continue; } get_dm(dm_real, dm_imag, num_patom, num_satom, fc, q, svecs, multi, p2s_map, charge_sum, i, j, k); } for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { adrs = (i * 3 + k) * num_patom * 3 + j * 3 + l; dynamical_matrix[adrs * 2] = dm_real[k][l] / mass_sqrt; dynamical_matrix[adrs * 2 + 1] = dm_imag[k][l] / mass_sqrt; } } } static void get_dm(double dm_real[3][3], double dm_imag[3][3], const long num_patom, const long num_satom, const double *fc, const double q[3], const double (*svecs)[3], const long (*multi)[2], const long *p2s_map, const double (*charge_sum)[3][3], const long i, const long j, const long k) { long l, m, i_pair, m_pair, adrs; double phase, cos_phase, sin_phase, fc_elem; cos_phase = 0; sin_phase = 0; i_pair = k * num_patom + i; m_pair = multi[i_pair][0]; adrs = multi[i_pair][1]; for (l = 0; l < m_pair; l++) { phase = 0; for (m = 0; m < 3; m++) { phase += q[m] * svecs[adrs + l][m]; } cos_phase += cos(phase * 2 * PI) / m_pair; sin_phase += sin(phase * 2 * PI) / m_pair; } for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { if (charge_sum) { fc_elem = (fc[p2s_map[i] * num_satom * 9 + k * 9 + l * 3 + m] + charge_sum[i * num_patom + j][l][m]); } else { fc_elem = fc[p2s_map[i] * num_satom * 9 + k * 9 + l * 3 + m]; } dm_real[l][m] += fc_elem * cos_phase; dm_imag[l][m] += fc_elem * sin_phase; } } } static double get_dielectric_part(const double q_cart[3], const double dielectric[3][3]) { long i, j; double x[3]; double sum; for (i = 0; i < 3; i++) { x[i] = 0; for (j = 0; j < 3; j++) { x[i] += dielectric[i][j] * q_cart[j]; } } sum = 0; for (i = 0; i < 3; i++) { sum += q_cart[i] * x[i]; } return sum; } static void get_KK(double *dd_part, /* [natom, 3, natom, 3, (real,imag)] */ const double (*G_list)[3], /* [num_G, 3] */ const long num_G, const long num_patom, const double q_cart[3], const double *q_direction_cart, const double dielectric[3][3], const double (*pos)[3], /* [num_patom, 3] */ const double lambda, const double tolerance) { long i, j, k, l, g, adrs; double q_K[3]; double norm, cos_phase, sin_phase, phase, dielectric_part, exp_damp, L2; double KK[3][3]; L2 = 4 * lambda * lambda; /* sum over K = G + q and over G (i.e. q=0) */ /* q_direction has values for summation over K at Gamma point. */ /* q_direction is NULL for summation over G */ for (g = 0; g < num_G; g++) { norm = 0; for (i = 0; i < 3; i++) { q_K[i] = G_list[g][i] + q_cart[i]; norm += q_K[i] * q_K[i]; } if (sqrt(norm) < tolerance) { if (!q_direction_cart) { continue; } else { dielectric_part = get_dielectric_part(q_direction_cart, dielectric); for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { KK[i][j] = q_direction_cart[i] * q_direction_cart[j] / dielectric_part; } } } } else { dielectric_part = get_dielectric_part(q_K, dielectric); exp_damp = exp(-dielectric_part / L2); for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { KK[i][j] = q_K[i] * q_K[j] / dielectric_part * exp_damp; } } } for (i = 0; i < num_patom; i++) { for (j = 0; j < num_patom; j++) { phase = 0; for (k = 0; k < 3; k++) { /* For D-type dynamical matrix */ /* phase += (pos[i][k] - pos[j][k]) * q_K[k]; */ /* For C-type dynamical matrix */ phase += (pos[i][k] - pos[j][k]) * G_list[g][k]; } phase *= 2 * PI; cos_phase = cos(phase); sin_phase = sin(phase); for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { adrs = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l; dd_part[adrs * 2] += KK[k][l] * cos_phase; dd_part[adrs * 2 + 1] += KK[k][l] * sin_phase; } } } } } } static void make_Hermitian(double *mat, const long num_band) { long i, j, adrs, adrsT; for (i = 0; i < num_band; i++) { for (j = i; j < num_band; j++) { adrs = i * num_band + j * 1; adrs *= 2; adrsT = j * num_band + i * 1; adrsT *= 2; /* real part */ mat[adrs] += mat[adrsT]; mat[adrs] /= 2; /* imaginary part */ mat[adrs + 1] -= mat[adrsT + 1]; mat[adrs + 1] /= 2; /* store */ mat[adrsT] = mat[adrs]; mat[adrsT + 1] = -mat[adrs + 1]; } } } static void multiply_borns(double *dd, const double *dd_in, const long num_patom, const double (*born)[3][3]) { long i, j, k, l, m, n, adrs, adrs_in; double zz; for (i = 0; i < num_patom; i++) { for (j = 0; j < num_patom; j++) { for (k = 0; k < 3; k++) { /* alpha */ for (l = 0; l < 3; l++) { /* beta */ adrs = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l; for (m = 0; m < 3; m++) { /* alpha' */ for (n = 0; n < 3; n++) { /* beta' */ adrs_in = i * num_patom * 9 + m * num_patom * 3 + j * 3 + n; zz = born[i][m][k] * born[j][n][l]; dd[adrs * 2] += dd_in[adrs_in * 2] * zz; dd[adrs * 2 + 1] += dd_in[adrs_in * 2 + 1] * zz; } } } } } } }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 4; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
omp-expand.c
/* Expansion pass for OMP directives. Outlines regions of certain OMP directives to separate functions, converts others into explicit calls to the runtime library (libgomp) and so forth Copyright (C) 2005-2020 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "memmodel.h" #include "backend.h" #include "target.h" #include "rtl.h" #include "tree.h" #include "gimple.h" #include "cfghooks.h" #include "tree-pass.h" #include "ssa.h" #include "optabs.h" #include "cgraph.h" #include "pretty-print.h" #include "diagnostic-core.h" #include "fold-const.h" #include "stor-layout.h" #include "cfganal.h" #include "internal-fn.h" #include "gimplify.h" #include "gimple-iterator.h" #include "gimplify-me.h" #include "gimple-walk.h" #include "tree-cfg.h" #include "tree-into-ssa.h" #include "tree-ssa.h" #include "splay-tree.h" #include "cfgloop.h" #include "omp-general.h" #include "omp-offload.h" #include "tree-cfgcleanup.h" #include "alloc-pool.h" #include "symbol-summary.h" #include "gomp-constants.h" #include "gimple-pretty-print.h" #include "hsa-common.h" #include "stringpool.h" #include "attribs.h" /* OMP region information. Every parallel and workshare directive is enclosed between two markers, the OMP_* directive and a corresponding GIMPLE_OMP_RETURN statement. */ struct omp_region { /* The enclosing region. */ struct omp_region *outer; /* First child region. */ struct omp_region *inner; /* Next peer region. */ struct omp_region *next; /* Block containing the omp directive as its last stmt. */ basic_block entry; /* Block containing the GIMPLE_OMP_RETURN as its last stmt. */ basic_block exit; /* Block containing the GIMPLE_OMP_CONTINUE as its last stmt. */ basic_block cont; /* If this is a combined parallel+workshare region, this is a list of additional arguments needed by the combined parallel+workshare library call. */ vec<tree, va_gc> *ws_args; /* The code for the omp directive of this region. */ enum gimple_code type; /* Schedule kind, only used for GIMPLE_OMP_FOR type regions. */ enum omp_clause_schedule_kind sched_kind; /* Schedule modifiers. */ unsigned char sched_modifiers; /* True if this is a combined parallel+workshare region. */ bool is_combined_parallel; /* Copy of fd.lastprivate_conditional != 0. */ bool has_lastprivate_conditional; /* The ordered stmt if type is GIMPLE_OMP_ORDERED and it has a depend clause. */ gomp_ordered *ord_stmt; }; static struct omp_region *root_omp_region; static bool omp_any_child_fn_dumped; static void expand_omp_build_assign (gimple_stmt_iterator *, tree, tree, bool = false); static gphi *find_phi_with_arg_on_edge (tree, edge); static void expand_omp (struct omp_region *region); /* Return true if REGION is a combined parallel+workshare region. */ static inline bool is_combined_parallel (struct omp_region *region) { return region->is_combined_parallel; } /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB is the immediate dominator of PAR_ENTRY_BB, return true if there are no data dependencies that would prevent expanding the parallel directive at PAR_ENTRY_BB as a combined parallel+workshare region. When expanding a combined parallel+workshare region, the call to the child function may need additional arguments in the case of GIMPLE_OMP_FOR regions. In some cases, these arguments are computed out of variables passed in from the parent to the child via 'struct .omp_data_s'. For instance: #pragma omp parallel for schedule (guided, i * 4) for (j ...) Is lowered into: # BLOCK 2 (PAR_ENTRY_BB) .omp_data_o.i = i; #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598) # BLOCK 3 (WS_ENTRY_BB) .omp_data_i = &.omp_data_o; D.1667 = .omp_data_i->i; D.1598 = D.1667 * 4; #pragma omp for schedule (guided, D.1598) When we outline the parallel region, the call to the child function 'bar.omp_fn.0' will need the value D.1598 in its argument list, but that value is computed *after* the call site. So, in principle we cannot do the transformation. To see whether the code in WS_ENTRY_BB blocks the combined parallel+workshare call, we collect all the variables used in the GIMPLE_OMP_FOR header check whether they appear on the LHS of any statement in WS_ENTRY_BB. If so, then we cannot emit the combined call. FIXME. If we had the SSA form built at this point, we could merely hoist the code in block 3 into block 2 and be done with it. But at this point we don't have dataflow information and though we could hack something up here, it is really not worth the aggravation. */ static bool workshare_safe_to_combine_p (basic_block ws_entry_bb) { struct omp_for_data fd; gimple *ws_stmt = last_stmt (ws_entry_bb); if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS) return true; gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR); if (gimple_omp_for_kind (ws_stmt) != GF_OMP_FOR_KIND_FOR) return false; omp_extract_for_data (as_a <gomp_for *> (ws_stmt), &fd, NULL); if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST) return false; if (fd.iter_type != long_integer_type_node) return false; /* FIXME. We give up too easily here. If any of these arguments are not constants, they will likely involve variables that have been mapped into fields of .omp_data_s for sharing with the child function. With appropriate data flow, it would be possible to see through this. */ if (!is_gimple_min_invariant (fd.loop.n1) || !is_gimple_min_invariant (fd.loop.n2) || !is_gimple_min_invariant (fd.loop.step) || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size))) return false; return true; } /* Adjust CHUNK_SIZE from SCHEDULE clause, depending on simd modifier presence (SIMD_SCHEDULE). */ static tree omp_adjust_chunk_size (tree chunk_size, bool simd_schedule) { if (!simd_schedule || integer_zerop (chunk_size)) return chunk_size; poly_uint64 vf = omp_max_vf (); if (known_eq (vf, 1U)) return chunk_size; tree type = TREE_TYPE (chunk_size); chunk_size = fold_build2 (PLUS_EXPR, type, chunk_size, build_int_cst (type, vf - 1)); return fold_build2 (BIT_AND_EXPR, type, chunk_size, build_int_cst (type, -vf)); } /* Collect additional arguments needed to emit a combined parallel+workshare call. WS_STMT is the workshare directive being expanded. */ static vec<tree, va_gc> * get_ws_args_for (gimple *par_stmt, gimple *ws_stmt) { tree t; location_t loc = gimple_location (ws_stmt); vec<tree, va_gc> *ws_args; if (gomp_for *for_stmt = dyn_cast <gomp_for *> (ws_stmt)) { struct omp_for_data fd; tree n1, n2; omp_extract_for_data (for_stmt, &fd, NULL); n1 = fd.loop.n1; n2 = fd.loop.n2; if (gimple_omp_for_combined_into_p (for_stmt)) { tree innerc = omp_find_clause (gimple_omp_parallel_clauses (par_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n1 = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n2 = OMP_CLAUSE_DECL (innerc); } vec_alloc (ws_args, 3 + (fd.chunk_size != 0)); t = fold_convert_loc (loc, long_integer_type_node, n1); ws_args->quick_push (t); t = fold_convert_loc (loc, long_integer_type_node, n2); ws_args->quick_push (t); t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step); ws_args->quick_push (t); if (fd.chunk_size) { t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size); t = omp_adjust_chunk_size (t, fd.simd_schedule); ws_args->quick_push (t); } return ws_args; } else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS) { /* Number of sections is equal to the number of edges from the GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to the exit of the sections region. */ basic_block bb = single_succ (gimple_bb (ws_stmt)); t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1); vec_alloc (ws_args, 1); ws_args->quick_push (t); return ws_args; } gcc_unreachable (); } /* Discover whether REGION is a combined parallel+workshare region. */ static void determine_parallel_type (struct omp_region *region) { basic_block par_entry_bb, par_exit_bb; basic_block ws_entry_bb, ws_exit_bb; if (region == NULL || region->inner == NULL || region->exit == NULL || region->inner->exit == NULL || region->inner->cont == NULL) return; /* We only support parallel+for and parallel+sections. */ if (region->type != GIMPLE_OMP_PARALLEL || (region->inner->type != GIMPLE_OMP_FOR && region->inner->type != GIMPLE_OMP_SECTIONS)) return; /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and WS_EXIT_BB -> PAR_EXIT_BB. */ par_entry_bb = region->entry; par_exit_bb = region->exit; ws_entry_bb = region->inner->entry; ws_exit_bb = region->inner->exit; /* Give up for task reductions on the parallel, while it is implementable, adding another big set of APIs or slowing down the normal paths is not acceptable. */ tree pclauses = gimple_omp_parallel_clauses (last_stmt (par_entry_bb)); if (omp_find_clause (pclauses, OMP_CLAUSE__REDUCTEMP_)) return; if (single_succ (par_entry_bb) == ws_entry_bb && single_succ (ws_exit_bb) == par_exit_bb && workshare_safe_to_combine_p (ws_entry_bb) && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb)) || (last_and_only_stmt (ws_entry_bb) && last_and_only_stmt (par_exit_bb)))) { gimple *par_stmt = last_stmt (par_entry_bb); gimple *ws_stmt = last_stmt (ws_entry_bb); if (region->inner->type == GIMPLE_OMP_FOR) { /* If this is a combined parallel loop, we need to determine whether or not to use the combined library calls. There are two cases where we do not apply the transformation: static loops and any kind of ordered loop. In the first case, we already open code the loop so there is no need to do anything else. In the latter case, the combined parallel loop call would still need extra synchronization to implement ordered semantics, so there would not be any gain in using the combined call. */ tree clauses = gimple_omp_for_clauses (ws_stmt); tree c = omp_find_clause (clauses, OMP_CLAUSE_SCHEDULE); if (c == NULL || ((OMP_CLAUSE_SCHEDULE_KIND (c) & OMP_CLAUSE_SCHEDULE_MASK) == OMP_CLAUSE_SCHEDULE_STATIC) || omp_find_clause (clauses, OMP_CLAUSE_ORDERED) || omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_) || ((c = omp_find_clause (clauses, OMP_CLAUSE__CONDTEMP_)) && POINTER_TYPE_P (TREE_TYPE (OMP_CLAUSE_DECL (c))))) return; } else if (region->inner->type == GIMPLE_OMP_SECTIONS && (omp_find_clause (gimple_omp_sections_clauses (ws_stmt), OMP_CLAUSE__REDUCTEMP_) || omp_find_clause (gimple_omp_sections_clauses (ws_stmt), OMP_CLAUSE__CONDTEMP_))) return; region->is_combined_parallel = true; region->inner->is_combined_parallel = true; region->ws_args = get_ws_args_for (par_stmt, ws_stmt); } } /* Debugging dumps for parallel regions. */ void dump_omp_region (FILE *, struct omp_region *, int); void debug_omp_region (struct omp_region *); void debug_all_omp_regions (void); /* Dump the parallel region tree rooted at REGION. */ void dump_omp_region (FILE *file, struct omp_region *region, int indent) { fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index, gimple_code_name[region->type]); if (region->inner) dump_omp_region (file, region->inner, indent + 4); if (region->cont) { fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "", region->cont->index); } if (region->exit) fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "", region->exit->index); else fprintf (file, "%*s[no exit marker]\n", indent, ""); if (region->next) dump_omp_region (file, region->next, indent); } DEBUG_FUNCTION void debug_omp_region (struct omp_region *region) { dump_omp_region (stderr, region, 0); } DEBUG_FUNCTION void debug_all_omp_regions (void) { dump_omp_region (stderr, root_omp_region, 0); } /* Create a new parallel region starting at STMT inside region PARENT. */ static struct omp_region * new_omp_region (basic_block bb, enum gimple_code type, struct omp_region *parent) { struct omp_region *region = XCNEW (struct omp_region); region->outer = parent; region->entry = bb; region->type = type; if (parent) { /* This is a nested region. Add it to the list of inner regions in PARENT. */ region->next = parent->inner; parent->inner = region; } else { /* This is a toplevel region. Add it to the list of toplevel regions in ROOT_OMP_REGION. */ region->next = root_omp_region; root_omp_region = region; } return region; } /* Release the memory associated with the region tree rooted at REGION. */ static void free_omp_region_1 (struct omp_region *region) { struct omp_region *i, *n; for (i = region->inner; i ; i = n) { n = i->next; free_omp_region_1 (i); } free (region); } /* Release the memory for the entire omp region tree. */ void omp_free_regions (void) { struct omp_region *r, *n; for (r = root_omp_region; r ; r = n) { n = r->next; free_omp_region_1 (r); } root_omp_region = NULL; } /* A convenience function to build an empty GIMPLE_COND with just the condition. */ static gcond * gimple_build_cond_empty (tree cond) { enum tree_code pred_code; tree lhs, rhs; gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs); return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE); } /* Return true if a parallel REGION is within a declare target function or within a target region and is not a part of a gridified target. */ static bool parallel_needs_hsa_kernel_p (struct omp_region *region) { bool indirect = false; for (region = region->outer; region; region = region->outer) { if (region->type == GIMPLE_OMP_PARALLEL) indirect = true; else if (region->type == GIMPLE_OMP_TARGET) { gomp_target *tgt_stmt = as_a <gomp_target *> (last_stmt (region->entry)); if (omp_find_clause (gimple_omp_target_clauses (tgt_stmt), OMP_CLAUSE__GRIDDIM_)) return indirect; else return true; } } if (lookup_attribute ("omp declare target", DECL_ATTRIBUTES (current_function_decl))) return true; return false; } /* Change DECL_CONTEXT of CHILD_FNDECL to that of the parent function. Add CHILD_FNDECL to decl chain of the supercontext of the block ENTRY_BLOCK - this is the block which originally contained the code from which CHILD_FNDECL was created. Together, these actions ensure that the debug info for the outlined function will be emitted with the correct lexical scope. */ static void adjust_context_and_scope (struct omp_region *region, tree entry_block, tree child_fndecl) { tree parent_fndecl = NULL_TREE; gimple *entry_stmt; /* OMP expansion expands inner regions before outer ones, so if we e.g. have explicit task region nested in parallel region, when expanding the task region current_function_decl will be the original source function, but we actually want to use as context the child function of the parallel. */ for (region = region->outer; region && parent_fndecl == NULL_TREE; region = region->outer) switch (region->type) { case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: case GIMPLE_OMP_TEAMS: entry_stmt = last_stmt (region->entry); parent_fndecl = gimple_omp_taskreg_child_fn (entry_stmt); break; case GIMPLE_OMP_TARGET: entry_stmt = last_stmt (region->entry); parent_fndecl = gimple_omp_target_child_fn (as_a <gomp_target *> (entry_stmt)); break; default: break; } if (parent_fndecl == NULL_TREE) parent_fndecl = current_function_decl; DECL_CONTEXT (child_fndecl) = parent_fndecl; if (entry_block != NULL_TREE && TREE_CODE (entry_block) == BLOCK) { tree b = BLOCK_SUPERCONTEXT (entry_block); if (TREE_CODE (b) == BLOCK) { DECL_CHAIN (child_fndecl) = BLOCK_VARS (b); BLOCK_VARS (b) = child_fndecl; } } } /* Build the function calls to GOMP_parallel etc to actually generate the parallel operation. REGION is the parallel region being expanded. BB is the block where to insert the code. WS_ARGS will be set if this is a call to a combined parallel+workshare construct, it contains the list of additional arguments needed by the workshare construct. */ static void expand_parallel_call (struct omp_region *region, basic_block bb, gomp_parallel *entry_stmt, vec<tree, va_gc> *ws_args) { tree t, t1, t2, val, cond, c, clauses, flags; gimple_stmt_iterator gsi; gimple *stmt; enum built_in_function start_ix; int start_ix2; location_t clause_loc; vec<tree, va_gc> *args; clauses = gimple_omp_parallel_clauses (entry_stmt); /* Determine what flavor of GOMP_parallel we will be emitting. */ start_ix = BUILT_IN_GOMP_PARALLEL; tree rtmp = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_); if (rtmp) start_ix = BUILT_IN_GOMP_PARALLEL_REDUCTIONS; else if (is_combined_parallel (region)) { switch (region->inner->type) { case GIMPLE_OMP_FOR: gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO); switch (region->inner->sched_kind) { case OMP_CLAUSE_SCHEDULE_RUNTIME: /* For lastprivate(conditional:), our implementation requires monotonic behavior. */ if (region->inner->has_lastprivate_conditional != 0) start_ix2 = 3; else if ((region->inner->sched_modifiers & OMP_CLAUSE_SCHEDULE_NONMONOTONIC) != 0) start_ix2 = 6; else if ((region->inner->sched_modifiers & OMP_CLAUSE_SCHEDULE_MONOTONIC) == 0) start_ix2 = 7; else start_ix2 = 3; break; case OMP_CLAUSE_SCHEDULE_DYNAMIC: case OMP_CLAUSE_SCHEDULE_GUIDED: if ((region->inner->sched_modifiers & OMP_CLAUSE_SCHEDULE_MONOTONIC) == 0 && !region->inner->has_lastprivate_conditional) { start_ix2 = 3 + region->inner->sched_kind; break; } /* FALLTHRU */ default: start_ix2 = region->inner->sched_kind; break; } start_ix2 += (int) BUILT_IN_GOMP_PARALLEL_LOOP_STATIC; start_ix = (enum built_in_function) start_ix2; break; case GIMPLE_OMP_SECTIONS: start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS; break; default: gcc_unreachable (); } } /* By default, the value of NUM_THREADS is zero (selected at run time) and there is no conditional. */ cond = NULL_TREE; val = build_int_cst (unsigned_type_node, 0); flags = build_int_cst (unsigned_type_node, 0); c = omp_find_clause (clauses, OMP_CLAUSE_IF); if (c) cond = OMP_CLAUSE_IF_EXPR (c); c = omp_find_clause (clauses, OMP_CLAUSE_NUM_THREADS); if (c) { val = OMP_CLAUSE_NUM_THREADS_EXPR (c); clause_loc = OMP_CLAUSE_LOCATION (c); } else clause_loc = gimple_location (entry_stmt); c = omp_find_clause (clauses, OMP_CLAUSE_PROC_BIND); if (c) flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c)); /* Ensure 'val' is of the correct type. */ val = fold_convert_loc (clause_loc, unsigned_type_node, val); /* If we found the clause 'if (cond)', build either (cond != 0) or (cond ? val : 1u). */ if (cond) { cond = gimple_boolify (cond); if (integer_zerop (val)) val = fold_build2_loc (clause_loc, EQ_EXPR, unsigned_type_node, cond, build_int_cst (TREE_TYPE (cond), 0)); else { basic_block cond_bb, then_bb, else_bb; edge e, e_then, e_else; tree tmp_then, tmp_else, tmp_join, tmp_var; tmp_var = create_tmp_var (TREE_TYPE (val)); if (gimple_in_ssa_p (cfun)) { tmp_then = make_ssa_name (tmp_var); tmp_else = make_ssa_name (tmp_var); tmp_join = make_ssa_name (tmp_var); } else { tmp_then = tmp_var; tmp_else = tmp_var; tmp_join = tmp_var; } e = split_block_after_labels (bb); cond_bb = e->src; bb = e->dest; remove_edge (e); then_bb = create_empty_bb (cond_bb); else_bb = create_empty_bb (then_bb); set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb); set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb); stmt = gimple_build_cond_empty (cond); gsi = gsi_start_bb (cond_bb); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); gsi = gsi_start_bb (then_bb); expand_omp_build_assign (&gsi, tmp_then, val, true); gsi = gsi_start_bb (else_bb); expand_omp_build_assign (&gsi, tmp_else, build_int_cst (unsigned_type_node, 1), true); make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE); make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE); add_bb_to_loop (then_bb, cond_bb->loop_father); add_bb_to_loop (else_bb, cond_bb->loop_father); e_then = make_edge (then_bb, bb, EDGE_FALLTHRU); e_else = make_edge (else_bb, bb, EDGE_FALLTHRU); if (gimple_in_ssa_p (cfun)) { gphi *phi = create_phi_node (tmp_join, bb); add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION); add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION); } val = tmp_join; } gsi = gsi_start_bb (bb); val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } gsi = gsi_last_nondebug_bb (bb); t = gimple_omp_parallel_data_arg (entry_stmt); if (t == NULL) t1 = null_pointer_node; else t1 = build_fold_addr_expr (t); tree child_fndecl = gimple_omp_parallel_child_fn (entry_stmt); t2 = build_fold_addr_expr (child_fndecl); vec_alloc (args, 4 + vec_safe_length (ws_args)); args->quick_push (t2); args->quick_push (t1); args->quick_push (val); if (ws_args) args->splice (*ws_args); args->quick_push (flags); t = build_call_expr_loc_vec (UNKNOWN_LOCATION, builtin_decl_explicit (start_ix), args); if (rtmp) { tree type = TREE_TYPE (OMP_CLAUSE_DECL (rtmp)); t = build2 (MODIFY_EXPR, type, OMP_CLAUSE_DECL (rtmp), fold_convert (type, fold_convert (pointer_sized_int_node, t))); } force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (hsa_gen_requested_p () && parallel_needs_hsa_kernel_p (region)) { cgraph_node *child_cnode = cgraph_node::get (child_fndecl); hsa_register_kernel (child_cnode); } } /* Build the function call to GOMP_task to actually generate the task operation. BB is the block where to insert the code. */ static void expand_task_call (struct omp_region *region, basic_block bb, gomp_task *entry_stmt) { tree t1, t2, t3; gimple_stmt_iterator gsi; location_t loc = gimple_location (entry_stmt); tree clauses = gimple_omp_task_clauses (entry_stmt); tree ifc = omp_find_clause (clauses, OMP_CLAUSE_IF); tree untied = omp_find_clause (clauses, OMP_CLAUSE_UNTIED); tree mergeable = omp_find_clause (clauses, OMP_CLAUSE_MERGEABLE); tree depend = omp_find_clause (clauses, OMP_CLAUSE_DEPEND); tree finalc = omp_find_clause (clauses, OMP_CLAUSE_FINAL); tree priority = omp_find_clause (clauses, OMP_CLAUSE_PRIORITY); unsigned int iflags = (untied ? GOMP_TASK_FLAG_UNTIED : 0) | (mergeable ? GOMP_TASK_FLAG_MERGEABLE : 0) | (depend ? GOMP_TASK_FLAG_DEPEND : 0); bool taskloop_p = gimple_omp_task_taskloop_p (entry_stmt); tree startvar = NULL_TREE, endvar = NULL_TREE, step = NULL_TREE; tree num_tasks = NULL_TREE; bool ull = false; if (taskloop_p) { gimple *g = last_stmt (region->outer->entry); gcc_assert (gimple_code (g) == GIMPLE_OMP_FOR && gimple_omp_for_kind (g) == GF_OMP_FOR_KIND_TASKLOOP); struct omp_for_data fd; omp_extract_for_data (as_a <gomp_for *> (g), &fd, NULL); startvar = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_); endvar = omp_find_clause (OMP_CLAUSE_CHAIN (startvar), OMP_CLAUSE__LOOPTEMP_); startvar = OMP_CLAUSE_DECL (startvar); endvar = OMP_CLAUSE_DECL (endvar); step = fold_convert_loc (loc, fd.iter_type, fd.loop.step); if (fd.loop.cond_code == LT_EXPR) iflags |= GOMP_TASK_FLAG_UP; tree tclauses = gimple_omp_for_clauses (g); num_tasks = omp_find_clause (tclauses, OMP_CLAUSE_NUM_TASKS); if (num_tasks) num_tasks = OMP_CLAUSE_NUM_TASKS_EXPR (num_tasks); else { num_tasks = omp_find_clause (tclauses, OMP_CLAUSE_GRAINSIZE); if (num_tasks) { iflags |= GOMP_TASK_FLAG_GRAINSIZE; num_tasks = OMP_CLAUSE_GRAINSIZE_EXPR (num_tasks); } else num_tasks = integer_zero_node; } num_tasks = fold_convert_loc (loc, long_integer_type_node, num_tasks); if (ifc == NULL_TREE) iflags |= GOMP_TASK_FLAG_IF; if (omp_find_clause (tclauses, OMP_CLAUSE_NOGROUP)) iflags |= GOMP_TASK_FLAG_NOGROUP; ull = fd.iter_type == long_long_unsigned_type_node; if (omp_find_clause (clauses, OMP_CLAUSE_REDUCTION)) iflags |= GOMP_TASK_FLAG_REDUCTION; } else if (priority) iflags |= GOMP_TASK_FLAG_PRIORITY; tree flags = build_int_cst (unsigned_type_node, iflags); tree cond = boolean_true_node; if (ifc) { if (taskloop_p) { tree t = gimple_boolify (OMP_CLAUSE_IF_EXPR (ifc)); t = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, t, build_int_cst (unsigned_type_node, GOMP_TASK_FLAG_IF), build_int_cst (unsigned_type_node, 0)); flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, t); } else cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (ifc)); } if (finalc) { tree t = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (finalc)); t = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, t, build_int_cst (unsigned_type_node, GOMP_TASK_FLAG_FINAL), build_int_cst (unsigned_type_node, 0)); flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, t); } if (depend) depend = OMP_CLAUSE_DECL (depend); else depend = build_int_cst (ptr_type_node, 0); if (priority) priority = fold_convert (integer_type_node, OMP_CLAUSE_PRIORITY_EXPR (priority)); else priority = integer_zero_node; gsi = gsi_last_nondebug_bb (bb); tree t = gimple_omp_task_data_arg (entry_stmt); if (t == NULL) t2 = null_pointer_node; else t2 = build_fold_addr_expr_loc (loc, t); t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt)); t = gimple_omp_task_copy_fn (entry_stmt); if (t == NULL) t3 = null_pointer_node; else t3 = build_fold_addr_expr_loc (loc, t); if (taskloop_p) t = build_call_expr (ull ? builtin_decl_explicit (BUILT_IN_GOMP_TASKLOOP_ULL) : builtin_decl_explicit (BUILT_IN_GOMP_TASKLOOP), 11, t1, t2, t3, gimple_omp_task_arg_size (entry_stmt), gimple_omp_task_arg_align (entry_stmt), flags, num_tasks, priority, startvar, endvar, step); else t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK), 9, t1, t2, t3, gimple_omp_task_arg_size (entry_stmt), gimple_omp_task_arg_align (entry_stmt), cond, flags, depend, priority); force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } /* Build the function call to GOMP_taskwait_depend to actually generate the taskwait operation. BB is the block where to insert the code. */ static void expand_taskwait_call (basic_block bb, gomp_task *entry_stmt) { tree clauses = gimple_omp_task_clauses (entry_stmt); tree depend = omp_find_clause (clauses, OMP_CLAUSE_DEPEND); if (depend == NULL_TREE) return; depend = OMP_CLAUSE_DECL (depend); gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb); tree t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT_DEPEND), 1, depend); force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } /* Build the function call to GOMP_teams_reg to actually generate the host teams operation. REGION is the teams region being expanded. BB is the block where to insert the code. */ static void expand_teams_call (basic_block bb, gomp_teams *entry_stmt) { tree clauses = gimple_omp_teams_clauses (entry_stmt); tree num_teams = omp_find_clause (clauses, OMP_CLAUSE_NUM_TEAMS); if (num_teams == NULL_TREE) num_teams = build_int_cst (unsigned_type_node, 0); else { num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams); num_teams = fold_convert (unsigned_type_node, num_teams); } tree thread_limit = omp_find_clause (clauses, OMP_CLAUSE_THREAD_LIMIT); if (thread_limit == NULL_TREE) thread_limit = build_int_cst (unsigned_type_node, 0); else { thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit); thread_limit = fold_convert (unsigned_type_node, thread_limit); } gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb); tree t = gimple_omp_teams_data_arg (entry_stmt), t1; if (t == NULL) t1 = null_pointer_node; else t1 = build_fold_addr_expr (t); tree child_fndecl = gimple_omp_teams_child_fn (entry_stmt); tree t2 = build_fold_addr_expr (child_fndecl); vec<tree, va_gc> *args; vec_alloc (args, 5); args->quick_push (t2); args->quick_push (t1); args->quick_push (num_teams); args->quick_push (thread_limit); /* For future extensibility. */ args->quick_push (build_zero_cst (unsigned_type_node)); t = build_call_expr_loc_vec (UNKNOWN_LOCATION, builtin_decl_explicit (BUILT_IN_GOMP_TEAMS_REG), args); force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */ static tree vec2chain (vec<tree, va_gc> *v) { tree chain = NULL_TREE, t; unsigned ix; FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t) { DECL_CHAIN (t) = chain; chain = t; } return chain; } /* Remove barriers in REGION->EXIT's block. Note that this is only valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be removed. */ static void remove_exit_barrier (struct omp_region *region) { gimple_stmt_iterator gsi; basic_block exit_bb; edge_iterator ei; edge e; gimple *stmt; int any_addressable_vars = -1; exit_bb = region->exit; /* If the parallel region doesn't return, we don't have REGION->EXIT block at all. */ if (! exit_bb) return; /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of statements that can appear in between are extremely limited -- no memory operations at all. Here, we allow nothing at all, so the only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */ gsi = gsi_last_nondebug_bb (exit_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); gsi_prev_nondebug (&gsi); if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL) return; FOR_EACH_EDGE (e, ei, exit_bb->preds) { gsi = gsi_last_nondebug_bb (e->src); if (gsi_end_p (gsi)) continue; stmt = gsi_stmt (gsi); if (gimple_code (stmt) == GIMPLE_OMP_RETURN && !gimple_omp_return_nowait_p (stmt)) { /* OpenMP 3.0 tasks unfortunately prevent this optimization in many cases. If there could be tasks queued, the barrier might be needed to let the tasks run before some local variable of the parallel that the task uses as shared runs out of scope. The task can be spawned either from within current function (this would be easy to check) or from some function it calls and gets passed an address of such a variable. */ if (any_addressable_vars < 0) { gomp_parallel *parallel_stmt = as_a <gomp_parallel *> (last_stmt (region->entry)); tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt); tree local_decls, block, decl; unsigned ix; any_addressable_vars = 0; FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl) if (TREE_ADDRESSABLE (decl)) { any_addressable_vars = 1; break; } for (block = gimple_block (stmt); !any_addressable_vars && block && TREE_CODE (block) == BLOCK; block = BLOCK_SUPERCONTEXT (block)) { for (local_decls = BLOCK_VARS (block); local_decls; local_decls = DECL_CHAIN (local_decls)) if (TREE_ADDRESSABLE (local_decls)) { any_addressable_vars = 1; break; } if (block == gimple_block (parallel_stmt)) break; } } if (!any_addressable_vars) gimple_omp_return_set_nowait (stmt); } } } static void remove_exit_barriers (struct omp_region *region) { if (region->type == GIMPLE_OMP_PARALLEL) remove_exit_barrier (region); if (region->inner) { region = region->inner; remove_exit_barriers (region); while (region->next) { region = region->next; remove_exit_barriers (region); } } } /* Optimize omp_get_thread_num () and omp_get_num_threads () calls. These can't be declared as const functions, but within one parallel body they are constant, so they can be transformed there into __builtin_omp_get_{thread_num,num_threads} () which are declared const. Similarly for task body, except that in untied task omp_get_thread_num () can change at any task scheduling point. */ static void optimize_omp_library_calls (gimple *entry_stmt) { basic_block bb; gimple_stmt_iterator gsi; tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM); tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree); tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS); tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree); bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK && omp_find_clause (gimple_omp_task_clauses (entry_stmt), OMP_CLAUSE_UNTIED) != NULL); FOR_EACH_BB_FN (bb, cfun) for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *call = gsi_stmt (gsi); tree decl; if (is_gimple_call (call) && (decl = gimple_call_fndecl (call)) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl) && DECL_INITIAL (decl) == NULL) { tree built_in; if (DECL_NAME (decl) == thr_num_id) { /* In #pragma omp task untied omp_get_thread_num () can change during the execution of the task region. */ if (untied_task) continue; built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM); } else if (DECL_NAME (decl) == num_thr_id) built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS); else continue; if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in) || gimple_call_num_args (call) != 0) continue; if (flag_exceptions && !TREE_NOTHROW (decl)) continue; if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)), TREE_TYPE (TREE_TYPE (built_in)))) continue; gimple_call_set_fndecl (call, built_in); } } } /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be regimplified. */ static tree expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *) { tree t = *tp; /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */ if (VAR_P (t) && DECL_HAS_VALUE_EXPR_P (t)) return t; if (TREE_CODE (t) == ADDR_EXPR) recompute_tree_invariant_for_addr_expr (t); *walk_subtrees = !TYPE_P (t) && !DECL_P (t); return NULL_TREE; } /* Prepend or append TO = FROM assignment before or after *GSI_P. */ static void expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from, bool after) { bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to); from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE, !after, after ? GSI_CONTINUE_LINKING : GSI_SAME_STMT); gimple *stmt = gimple_build_assign (to, from); if (after) gsi_insert_after (gsi_p, stmt, GSI_CONTINUE_LINKING); else gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT); if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL) || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL)) { gimple_stmt_iterator gsi = gsi_for_stmt (stmt); gimple_regimplify_operands (stmt, &gsi); } } /* Expand the OpenMP parallel or task directive starting at REGION. */ static void expand_omp_taskreg (struct omp_region *region) { basic_block entry_bb, exit_bb, new_bb; struct function *child_cfun; tree child_fn, block, t; gimple_stmt_iterator gsi; gimple *entry_stmt, *stmt; edge e; vec<tree, va_gc> *ws_args; entry_stmt = last_stmt (region->entry); if (gimple_code (entry_stmt) == GIMPLE_OMP_TASK && gimple_omp_task_taskwait_p (entry_stmt)) { new_bb = region->entry; gsi = gsi_last_nondebug_bb (region->entry); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK); gsi_remove (&gsi, true); expand_taskwait_call (new_bb, as_a <gomp_task *> (entry_stmt)); return; } child_fn = gimple_omp_taskreg_child_fn (entry_stmt); child_cfun = DECL_STRUCT_FUNCTION (child_fn); entry_bb = region->entry; if (gimple_code (entry_stmt) == GIMPLE_OMP_TASK) exit_bb = region->cont; else exit_bb = region->exit; if (is_combined_parallel (region)) ws_args = region->ws_args; else ws_args = NULL; if (child_cfun->cfg) { /* Due to inlining, it may happen that we have already outlined the region, in which case all we need to do is make the sub-graph unreachable and emit the parallel call. */ edge entry_succ_e, exit_succ_e; entry_succ_e = single_succ_edge (entry_bb); gsi = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TEAMS); gsi_remove (&gsi, true); new_bb = entry_bb; if (exit_bb) { exit_succ_e = single_succ_edge (exit_bb); make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU); } remove_edge_and_dominated_blocks (entry_succ_e); } else { unsigned srcidx, dstidx, num; /* If the parallel region needs data sent from the parent function, then the very first statement (except possible tree profile counter updates) of the parallel body is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since &.OMP_DATA_O is passed as an argument to the child function, we need to replace it with the argument as seen by the child function. In most cases, this will end up being the identity assignment .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had a function call that has been inlined, the original PARM_DECL .OMP_DATA_I may have been converted into a different local variable. In which case, we need to keep the assignment. */ if (gimple_omp_taskreg_data_arg (entry_stmt)) { basic_block entry_succ_bb = single_succ_p (entry_bb) ? single_succ (entry_bb) : FALLTHRU_EDGE (entry_bb)->dest; tree arg; gimple *parcopy_stmt = NULL; for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi)) { gimple *stmt; gcc_assert (!gsi_end_p (gsi)); stmt = gsi_stmt (gsi); if (gimple_code (stmt) != GIMPLE_ASSIGN) continue; if (gimple_num_ops (stmt) == 2) { tree arg = gimple_assign_rhs1 (stmt); /* We're ignore the subcode because we're effectively doing a STRIP_NOPS. */ if (TREE_CODE (arg) == ADDR_EXPR && (TREE_OPERAND (arg, 0) == gimple_omp_taskreg_data_arg (entry_stmt))) { parcopy_stmt = stmt; break; } } } gcc_assert (parcopy_stmt != NULL); arg = DECL_ARGUMENTS (child_fn); if (!gimple_in_ssa_p (cfun)) { if (gimple_assign_lhs (parcopy_stmt) == arg) gsi_remove (&gsi, true); else { /* ?? Is setting the subcode really necessary ?? */ gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg)); gimple_assign_set_rhs1 (parcopy_stmt, arg); } } else { tree lhs = gimple_assign_lhs (parcopy_stmt); gcc_assert (SSA_NAME_VAR (lhs) == arg); /* We'd like to set the rhs to the default def in the child_fn, but it's too early to create ssa names in the child_fn. Instead, we set the rhs to the parm. In move_sese_region_to_fn, we introduce a default def for the parm, map the parm to it's default def, and once we encounter this stmt, replace the parm with the default def. */ gimple_assign_set_rhs1 (parcopy_stmt, arg); update_stmt (parcopy_stmt); } } /* Declare local variables needed in CHILD_CFUN. */ block = DECL_INITIAL (child_fn); BLOCK_VARS (block) = vec2chain (child_cfun->local_decls); /* The gimplifier could record temporaries in parallel/task block rather than in containing function's local_decls chain, which would mean cgraph missed finalizing them. Do it now. */ for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t)) if (VAR_P (t) && TREE_STATIC (t) && !DECL_EXTERNAL (t)) varpool_node::finalize_decl (t); DECL_SAVED_TREE (child_fn) = NULL; /* We'll create a CFG for child_fn, so no gimple body is needed. */ gimple_set_body (child_fn, NULL); TREE_USED (block) = 1; /* Reset DECL_CONTEXT on function arguments. */ for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t)) DECL_CONTEXT (t) = child_fn; /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK, so that it can be moved to the child function. */ gsi = gsi_last_nondebug_bb (entry_bb); stmt = gsi_stmt (gsi); gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL || gimple_code (stmt) == GIMPLE_OMP_TASK || gimple_code (stmt) == GIMPLE_OMP_TEAMS)); e = split_block (entry_bb, stmt); gsi_remove (&gsi, true); entry_bb = e->dest; edge e2 = NULL; if (gimple_code (entry_stmt) != GIMPLE_OMP_TASK) single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; else { e2 = make_edge (e->src, BRANCH_EDGE (entry_bb)->dest, EDGE_ABNORMAL); gcc_assert (e2->dest == region->exit); remove_edge (BRANCH_EDGE (entry_bb)); set_immediate_dominator (CDI_DOMINATORS, e2->dest, e->src); gsi = gsi_last_nondebug_bb (region->exit); gcc_assert (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); gsi_remove (&gsi, true); } /* Convert GIMPLE_OMP_{RETURN,CONTINUE} into a RETURN_EXPR. */ if (exit_bb) { gsi = gsi_last_nondebug_bb (exit_bb); gcc_assert (!gsi_end_p (gsi) && (gimple_code (gsi_stmt (gsi)) == (e2 ? GIMPLE_OMP_CONTINUE : GIMPLE_OMP_RETURN))); stmt = gimple_build_return (NULL); gsi_insert_after (&gsi, stmt, GSI_SAME_STMT); gsi_remove (&gsi, true); } /* Move the parallel region into CHILD_CFUN. */ if (gimple_in_ssa_p (cfun)) { init_tree_ssa (child_cfun); init_ssa_operands (child_cfun); child_cfun->gimple_df->in_ssa_p = true; block = NULL_TREE; } else block = gimple_block (entry_stmt); new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block); if (exit_bb) single_succ_edge (new_bb)->flags = EDGE_FALLTHRU; if (e2) { basic_block dest_bb = e2->dest; if (!exit_bb) make_edge (new_bb, dest_bb, EDGE_FALLTHRU); remove_edge (e2); set_immediate_dominator (CDI_DOMINATORS, dest_bb, new_bb); } /* When the OMP expansion process cannot guarantee an up-to-date loop tree arrange for the child function to fixup loops. */ if (loops_state_satisfies_p (LOOPS_NEED_FIXUP)) child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP; /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */ num = vec_safe_length (child_cfun->local_decls); for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++) { t = (*child_cfun->local_decls)[srcidx]; if (DECL_CONTEXT (t) == cfun->decl) continue; if (srcidx != dstidx) (*child_cfun->local_decls)[dstidx] = t; dstidx++; } if (dstidx != num) vec_safe_truncate (child_cfun->local_decls, dstidx); /* Inform the callgraph about the new function. */ child_cfun->curr_properties = cfun->curr_properties; child_cfun->has_simduid_loops |= cfun->has_simduid_loops; child_cfun->has_force_vectorize_loops |= cfun->has_force_vectorize_loops; cgraph_node *node = cgraph_node::get_create (child_fn); node->parallelized_function = 1; cgraph_node::add_new_function (child_fn, true); bool need_asm = DECL_ASSEMBLER_NAME_SET_P (current_function_decl) && !DECL_ASSEMBLER_NAME_SET_P (child_fn); /* Fix the callgraph edges for child_cfun. Those for cfun will be fixed in a following pass. */ push_cfun (child_cfun); if (need_asm) assign_assembler_name_if_needed (child_fn); if (optimize) optimize_omp_library_calls (entry_stmt); update_max_bb_count (); cgraph_edge::rebuild_edges (); /* Some EH regions might become dead, see PR34608. If pass_cleanup_cfg isn't the first pass to happen with the new child, these dead EH edges might cause problems. Clean them up now. */ if (flag_exceptions) { basic_block bb; bool changed = false; FOR_EACH_BB_FN (bb, cfun) changed |= gimple_purge_dead_eh_edges (bb); if (changed) cleanup_tree_cfg (); } if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa); if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP)) verify_loop_structure (); pop_cfun (); if (dump_file && !gimple_in_ssa_p (cfun)) { omp_any_child_fn_dumped = true; dump_function_header (dump_file, child_fn, dump_flags); dump_function_to_file (child_fn, dump_file, dump_flags); } } adjust_context_and_scope (region, gimple_block (entry_stmt), child_fn); if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL) expand_parallel_call (region, new_bb, as_a <gomp_parallel *> (entry_stmt), ws_args); else if (gimple_code (entry_stmt) == GIMPLE_OMP_TEAMS) expand_teams_call (new_bb, as_a <gomp_teams *> (entry_stmt)); else expand_task_call (region, new_bb, as_a <gomp_task *> (entry_stmt)); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_only_virtuals); } /* Information about members of an OpenACC collapsed loop nest. */ struct oacc_collapse { tree base; /* Base value. */ tree iters; /* Number of steps. */ tree step; /* Step size. */ tree tile; /* Tile increment (if tiled). */ tree outer; /* Tile iterator var. */ }; /* Helper for expand_oacc_for. Determine collapsed loop information. Fill in COUNTS array. Emit any initialization code before GSI. Return the calculated outer loop bound of BOUND_TYPE. */ static tree expand_oacc_collapse_init (const struct omp_for_data *fd, gimple_stmt_iterator *gsi, oacc_collapse *counts, tree bound_type, location_t loc) { tree tiling = fd->tiling; tree total = build_int_cst (bound_type, 1); int ix; gcc_assert (integer_onep (fd->loop.step)); gcc_assert (integer_zerop (fd->loop.n1)); /* When tiling, the first operand of the tile clause applies to the innermost loop, and we work outwards from there. Seems backwards, but whatever. */ for (ix = fd->collapse; ix--;) { const omp_for_data_loop *loop = &fd->loops[ix]; tree iter_type = TREE_TYPE (loop->v); tree diff_type = iter_type; tree plus_type = iter_type; gcc_assert (loop->cond_code == fd->loop.cond_code); if (POINTER_TYPE_P (iter_type)) plus_type = sizetype; if (POINTER_TYPE_P (diff_type) || TYPE_UNSIGNED (diff_type)) diff_type = signed_type_for (diff_type); if (TYPE_PRECISION (diff_type) < TYPE_PRECISION (integer_type_node)) diff_type = integer_type_node; if (tiling) { tree num = build_int_cst (integer_type_node, fd->collapse); tree loop_no = build_int_cst (integer_type_node, ix); tree tile = TREE_VALUE (tiling); gcall *call = gimple_build_call_internal (IFN_GOACC_TILE, 5, num, loop_no, tile, /* gwv-outer=*/integer_zero_node, /* gwv-inner=*/integer_zero_node); counts[ix].outer = create_tmp_var (iter_type, ".outer"); counts[ix].tile = create_tmp_var (diff_type, ".tile"); gimple_call_set_lhs (call, counts[ix].tile); gimple_set_location (call, loc); gsi_insert_before (gsi, call, GSI_SAME_STMT); tiling = TREE_CHAIN (tiling); } else { counts[ix].tile = NULL; counts[ix].outer = loop->v; } tree b = loop->n1; tree e = loop->n2; tree s = loop->step; bool up = loop->cond_code == LT_EXPR; tree dir = build_int_cst (diff_type, up ? +1 : -1); bool negating; tree expr; b = force_gimple_operand_gsi (gsi, b, true, NULL_TREE, true, GSI_SAME_STMT); e = force_gimple_operand_gsi (gsi, e, true, NULL_TREE, true, GSI_SAME_STMT); /* Convert the step, avoiding possible unsigned->signed overflow. */ negating = !up && TYPE_UNSIGNED (TREE_TYPE (s)); if (negating) s = fold_build1 (NEGATE_EXPR, TREE_TYPE (s), s); s = fold_convert (diff_type, s); if (negating) s = fold_build1 (NEGATE_EXPR, diff_type, s); s = force_gimple_operand_gsi (gsi, s, true, NULL_TREE, true, GSI_SAME_STMT); /* Determine the range, avoiding possible unsigned->signed overflow. */ negating = !up && TYPE_UNSIGNED (iter_type); expr = fold_build2 (MINUS_EXPR, plus_type, fold_convert (plus_type, negating ? b : e), fold_convert (plus_type, negating ? e : b)); expr = fold_convert (diff_type, expr); if (negating) expr = fold_build1 (NEGATE_EXPR, diff_type, expr); tree range = force_gimple_operand_gsi (gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT); /* Determine number of iterations. */ expr = fold_build2 (MINUS_EXPR, diff_type, range, dir); expr = fold_build2 (PLUS_EXPR, diff_type, expr, s); expr = fold_build2 (TRUNC_DIV_EXPR, diff_type, expr, s); tree iters = force_gimple_operand_gsi (gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT); counts[ix].base = b; counts[ix].iters = iters; counts[ix].step = s; total = fold_build2 (MULT_EXPR, bound_type, total, fold_convert (bound_type, iters)); } return total; } /* Emit initializers for collapsed loop members. INNER is true if this is for the element loop of a TILE. IVAR is the outer loop iteration variable, from which collapsed loop iteration values are calculated. COUNTS array has been initialized by expand_oacc_collapse_inits. */ static void expand_oacc_collapse_vars (const struct omp_for_data *fd, bool inner, gimple_stmt_iterator *gsi, const oacc_collapse *counts, tree ivar) { tree ivar_type = TREE_TYPE (ivar); /* The most rapidly changing iteration variable is the innermost one. */ for (int ix = fd->collapse; ix--;) { const omp_for_data_loop *loop = &fd->loops[ix]; const oacc_collapse *collapse = &counts[ix]; tree v = inner ? loop->v : collapse->outer; tree iter_type = TREE_TYPE (v); tree diff_type = TREE_TYPE (collapse->step); tree plus_type = iter_type; enum tree_code plus_code = PLUS_EXPR; tree expr; if (POINTER_TYPE_P (iter_type)) { plus_code = POINTER_PLUS_EXPR; plus_type = sizetype; } expr = ivar; if (ix) { tree mod = fold_convert (ivar_type, collapse->iters); ivar = fold_build2 (TRUNC_DIV_EXPR, ivar_type, expr, mod); expr = fold_build2 (TRUNC_MOD_EXPR, ivar_type, expr, mod); ivar = force_gimple_operand_gsi (gsi, ivar, true, NULL_TREE, true, GSI_SAME_STMT); } expr = fold_build2 (MULT_EXPR, diff_type, fold_convert (diff_type, expr), collapse->step); expr = fold_build2 (plus_code, iter_type, inner ? collapse->outer : collapse->base, fold_convert (plus_type, expr)); expr = force_gimple_operand_gsi (gsi, expr, false, NULL_TREE, true, GSI_SAME_STMT); gassign *ass = gimple_build_assign (v, expr); gsi_insert_before (gsi, ass, GSI_SAME_STMT); } } /* Helper function for expand_omp_{for_*,simd}. If this is the outermost of the combined collapse > 1 loop constructs, generate code like: if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB; if (cond3 is <) adj = STEP3 - 1; else adj = STEP3 + 1; count3 = (adj + N32 - N31) / STEP3; if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB; if (cond2 is <) adj = STEP2 - 1; else adj = STEP2 + 1; count2 = (adj + N22 - N21) / STEP2; if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB; if (cond1 is <) adj = STEP1 - 1; else adj = STEP1 + 1; count1 = (adj + N12 - N11) / STEP1; count = count1 * count2 * count3; Furthermore, if ZERO_ITER_BB is NULL, create a BB which does: count = 0; and set ZERO_ITER_BB to that bb. If this isn't the outermost of the combined loop constructs, just initialize COUNTS array from the _looptemp_ clauses. */ /* NOTE: It *could* be better to moosh all of the BBs together, creating one larger BB with all the computation and the unexpected jump at the end. I.e. bool zero3, zero2, zero1, zero; zero3 = N32 c3 N31; count3 = (N32 - N31) /[cl] STEP3; zero2 = N22 c2 N21; count2 = (N22 - N21) /[cl] STEP2; zero1 = N12 c1 N11; count1 = (N12 - N11) /[cl] STEP1; zero = zero3 || zero2 || zero1; count = count1 * count2 * count3; if (__builtin_expect(zero, false)) goto zero_iter_bb; After all, we expect the zero=false, and thus we expect to have to evaluate all of the comparison expressions, so short-circuiting oughtn't be a win. Since the condition isn't protecting a denominator, we're not concerned about divide-by-zero, so we can fully evaluate count even if a numerator turned out to be wrong. It seems like putting this all together would create much better scheduling opportunities, and less pressure on the chip's branch predictor. */ static void expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi, basic_block &entry_bb, tree *counts, basic_block &zero_iter1_bb, int &first_zero_iter1, basic_block &zero_iter2_bb, int &first_zero_iter2, basic_block &l2_dom_bb) { tree t, type = TREE_TYPE (fd->loop.v); edge e, ne; int i; /* Collapsed loops need work for expansion into SSA form. */ gcc_assert (!gimple_in_ssa_p (cfun)); if (gimple_omp_for_combined_into_p (fd->for_stmt) && TREE_CODE (fd->loop.n2) != INTEGER_CST) { gcc_assert (fd->ordered == 0); /* First two _looptemp_ clauses are for istart/iend, counts[0] isn't supposed to be handled, as the inner loop doesn't use it. */ tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); for (i = 0; i < fd->collapse; i++) { innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); if (i) counts[i] = OMP_CLAUSE_DECL (innerc); else counts[0] = NULL_TREE; } return; } for (i = fd->collapse; i < fd->ordered; i++) { tree itype = TREE_TYPE (fd->loops[i].v); counts[i] = NULL_TREE; t = fold_binary (fd->loops[i].cond_code, boolean_type_node, fold_convert (itype, fd->loops[i].n1), fold_convert (itype, fd->loops[i].n2)); if (t && integer_zerop (t)) { for (i = fd->collapse; i < fd->ordered; i++) counts[i] = build_int_cst (type, 0); break; } } for (i = 0; i < (fd->ordered ? fd->ordered : fd->collapse); i++) { tree itype = TREE_TYPE (fd->loops[i].v); if (i >= fd->collapse && counts[i]) continue; if ((SSA_VAR_P (fd->loop.n2) || i >= fd->collapse) && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node, fold_convert (itype, fd->loops[i].n1), fold_convert (itype, fd->loops[i].n2))) == NULL_TREE || !integer_onep (t))) { gcond *cond_stmt; tree n1, n2; n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1)); n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE, true, GSI_SAME_STMT); n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2)); n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE, true, GSI_SAME_STMT); cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2, NULL_TREE, NULL_TREE); gsi_insert_before (gsi, cond_stmt, GSI_SAME_STMT); if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL) || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL)) { *gsi = gsi_for_stmt (cond_stmt); gimple_regimplify_operands (cond_stmt, gsi); } e = split_block (entry_bb, cond_stmt); basic_block &zero_iter_bb = i < fd->collapse ? zero_iter1_bb : zero_iter2_bb; int &first_zero_iter = i < fd->collapse ? first_zero_iter1 : first_zero_iter2; if (zero_iter_bb == NULL) { gassign *assign_stmt; first_zero_iter = i; zero_iter_bb = create_empty_bb (entry_bb); add_bb_to_loop (zero_iter_bb, entry_bb->loop_father); *gsi = gsi_after_labels (zero_iter_bb); if (i < fd->collapse) assign_stmt = gimple_build_assign (fd->loop.n2, build_zero_cst (type)); else { counts[i] = create_tmp_reg (type, ".count"); assign_stmt = gimple_build_assign (counts[i], build_zero_cst (type)); } gsi_insert_before (gsi, assign_stmt, GSI_SAME_STMT); set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb, entry_bb); } ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE); ne->probability = profile_probability::very_unlikely (); e->flags = EDGE_TRUE_VALUE; e->probability = ne->probability.invert (); if (l2_dom_bb == NULL) l2_dom_bb = entry_bb; entry_bb = e->dest; *gsi = gsi_last_nondebug_bb (entry_bb); } if (POINTER_TYPE_P (itype)) itype = signed_type_for (itype); t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, fd->loops[i].step), t); t = fold_build2 (PLUS_EXPR, itype, t, fold_convert (itype, fd->loops[i].n2)); t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loops[i].n1)); /* ?? We could probably use CEIL_DIV_EXPR instead of TRUNC_DIV_EXPR and adjusting by hand. Unless we can't generate the same code in the end because generically we don't know that the values involved must be negative for GT?? */ if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, fold_convert (itype, fd->loops[i].step))); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fold_convert (itype, fd->loops[i].step)); t = fold_convert (type, t); if (TREE_CODE (t) == INTEGER_CST) counts[i] = t; else { if (i < fd->collapse || i != first_zero_iter2) counts[i] = create_tmp_reg (type, ".count"); expand_omp_build_assign (gsi, counts[i], t); } if (SSA_VAR_P (fd->loop.n2) && i < fd->collapse) { if (i == 0) t = counts[0]; else t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]); expand_omp_build_assign (gsi, fd->loop.n2, t); } } } /* Helper function for expand_omp_{for_*,simd}. Generate code like: T = V; V3 = N31 + (T % count3) * STEP3; T = T / count3; V2 = N21 + (T % count2) * STEP2; T = T / count2; V1 = N11 + T * STEP1; if this loop doesn't have an inner loop construct combined with it. If it does have an inner loop construct combined with it and the iteration count isn't known constant, store values from counts array into its _looptemp_ temporaries instead. */ static void expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi, tree *counts, gimple *inner_stmt, tree startvar) { int i; if (gimple_omp_for_combined_p (fd->for_stmt)) { /* If fd->loop.n2 is constant, then no propagation of the counts is needed, they are constant. */ if (TREE_CODE (fd->loop.n2) == INTEGER_CST) return; tree clauses = gimple_code (inner_stmt) != GIMPLE_OMP_FOR ? gimple_omp_taskreg_clauses (inner_stmt) : gimple_omp_for_clauses (inner_stmt); /* First two _looptemp_ clauses are for istart/iend, counts[0] isn't supposed to be handled, as the inner loop doesn't use it. */ tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); for (i = 0; i < fd->collapse; i++) { innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); if (i) { tree tem = OMP_CLAUSE_DECL (innerc); tree t = fold_convert (TREE_TYPE (tem), counts[i]); t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); gassign *stmt = gimple_build_assign (tem, t); gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); } } return; } tree type = TREE_TYPE (fd->loop.v); tree tem = create_tmp_reg (type, ".tem"); gassign *stmt = gimple_build_assign (tem, startvar); gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); for (i = fd->collapse - 1; i >= 0; i--) { tree vtype = TREE_TYPE (fd->loops[i].v), itype, t; itype = vtype; if (POINTER_TYPE_P (vtype)) itype = signed_type_for (vtype); if (i != 0) t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]); else t = tem; t = fold_convert (itype, t); t = fold_build2 (MULT_EXPR, itype, t, fold_convert (itype, fd->loops[i].step)); if (POINTER_TYPE_P (vtype)) t = fold_build_pointer_plus (fd->loops[i].n1, t); else t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t); t = force_gimple_operand_gsi (gsi, t, DECL_P (fd->loops[i].v) && TREE_ADDRESSABLE (fd->loops[i].v), NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (fd->loops[i].v, t); gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); if (i != 0) { t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]); t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (tem, t); gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); } } } /* Helper function for expand_omp_for_*. Generate code like: L10: V3 += STEP3; if (V3 cond3 N32) goto BODY_BB; else goto L11; L11: V3 = N31; V2 += STEP2; if (V2 cond2 N22) goto BODY_BB; else goto L12; L12: V2 = N21; V1 += STEP1; goto BODY_BB; */ static basic_block extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb, basic_block body_bb) { basic_block last_bb, bb, collapse_bb = NULL; int i; gimple_stmt_iterator gsi; edge e; tree t; gimple *stmt; last_bb = cont_bb; for (i = fd->collapse - 1; i >= 0; i--) { tree vtype = TREE_TYPE (fd->loops[i].v); bb = create_empty_bb (last_bb); add_bb_to_loop (bb, last_bb->loop_father); gsi = gsi_start_bb (bb); if (i < fd->collapse - 1) { e = make_edge (last_bb, bb, EDGE_FALSE_VALUE); e->probability = profile_probability::guessed_always ().apply_scale (1, 8); t = fd->loops[i + 1].n1; t = force_gimple_operand_gsi (&gsi, t, DECL_P (fd->loops[i + 1].v) && TREE_ADDRESSABLE (fd->loops[i + 1].v), NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (fd->loops[i + 1].v, t); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); } else collapse_bb = bb; set_immediate_dominator (CDI_DOMINATORS, bb, last_bb); if (POINTER_TYPE_P (vtype)) t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step); else t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step); t = force_gimple_operand_gsi (&gsi, t, DECL_P (fd->loops[i].v) && TREE_ADDRESSABLE (fd->loops[i].v), NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (fd->loops[i].v, t); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); if (i > 0) { t = fd->loops[i].n2; t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); tree v = fd->loops[i].v; if (DECL_P (v) && TREE_ADDRESSABLE (v)) v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t); stmt = gimple_build_cond_empty (t); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); if (walk_tree (gimple_cond_lhs_ptr (as_a <gcond *> (stmt)), expand_omp_regimplify_p, NULL, NULL) || walk_tree (gimple_cond_rhs_ptr (as_a <gcond *> (stmt)), expand_omp_regimplify_p, NULL, NULL)) gimple_regimplify_operands (stmt, &gsi); e = make_edge (bb, body_bb, EDGE_TRUE_VALUE); e->probability = profile_probability::guessed_always ().apply_scale (7, 8); } else make_edge (bb, body_bb, EDGE_FALLTHRU); last_bb = bb; } return collapse_bb; } /* Expand #pragma omp ordered depend(source). */ static void expand_omp_ordered_source (gimple_stmt_iterator *gsi, struct omp_for_data *fd, tree *counts, location_t loc) { enum built_in_function source_ix = fd->iter_type == long_integer_type_node ? BUILT_IN_GOMP_DOACROSS_POST : BUILT_IN_GOMP_DOACROSS_ULL_POST; gimple *g = gimple_build_call (builtin_decl_explicit (source_ix), 1, build_fold_addr_expr (counts[fd->ordered])); gimple_set_location (g, loc); gsi_insert_before (gsi, g, GSI_SAME_STMT); } /* Expand a single depend from #pragma omp ordered depend(sink:...). */ static void expand_omp_ordered_sink (gimple_stmt_iterator *gsi, struct omp_for_data *fd, tree *counts, tree c, location_t loc) { auto_vec<tree, 10> args; enum built_in_function sink_ix = fd->iter_type == long_integer_type_node ? BUILT_IN_GOMP_DOACROSS_WAIT : BUILT_IN_GOMP_DOACROSS_ULL_WAIT; tree t, off, coff = NULL_TREE, deps = OMP_CLAUSE_DECL (c), cond = NULL_TREE; int i; gimple_stmt_iterator gsi2 = *gsi; bool warned_step = false; for (i = 0; i < fd->ordered; i++) { tree step = NULL_TREE; off = TREE_PURPOSE (deps); if (TREE_CODE (off) == TRUNC_DIV_EXPR) { step = TREE_OPERAND (off, 1); off = TREE_OPERAND (off, 0); } if (!integer_zerop (off)) { gcc_assert (fd->loops[i].cond_code == LT_EXPR || fd->loops[i].cond_code == GT_EXPR); bool forward = fd->loops[i].cond_code == LT_EXPR; if (step) { /* Non-simple Fortran DO loops. If step is variable, we don't know at compile even the direction, so can't warn. */ if (TREE_CODE (step) != INTEGER_CST) break; forward = tree_int_cst_sgn (step) != -1; } if (forward ^ OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) warning_at (loc, 0, "%<depend%> clause with %<sink%> modifier " "waiting for lexically later iteration"); break; } deps = TREE_CHAIN (deps); } /* If all offsets corresponding to the collapsed loops are zero, this depend clause can be ignored. FIXME: but there is still a flush needed. We need to emit one __sync_synchronize () for it though (perhaps conditionally)? Solve this together with the conservative dependence folding optimization. if (i >= fd->collapse) return; */ deps = OMP_CLAUSE_DECL (c); gsi_prev (&gsi2); edge e1 = split_block (gsi_bb (gsi2), gsi_stmt (gsi2)); edge e2 = split_block_after_labels (e1->dest); gsi2 = gsi_after_labels (e1->dest); *gsi = gsi_last_bb (e1->src); for (i = 0; i < fd->ordered; i++) { tree itype = TREE_TYPE (fd->loops[i].v); tree step = NULL_TREE; tree orig_off = NULL_TREE; if (POINTER_TYPE_P (itype)) itype = sizetype; if (i) deps = TREE_CHAIN (deps); off = TREE_PURPOSE (deps); if (TREE_CODE (off) == TRUNC_DIV_EXPR) { step = TREE_OPERAND (off, 1); off = TREE_OPERAND (off, 0); gcc_assert (fd->loops[i].cond_code == LT_EXPR && integer_onep (fd->loops[i].step) && !POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v))); } tree s = fold_convert_loc (loc, itype, step ? step : fd->loops[i].step); if (step) { off = fold_convert_loc (loc, itype, off); orig_off = off; off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, s); } if (integer_zerop (off)) t = boolean_true_node; else { tree a; tree co = fold_convert_loc (loc, itype, off); if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v))) { if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) co = fold_build1_loc (loc, NEGATE_EXPR, itype, co); a = fold_build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (fd->loops[i].v), fd->loops[i].v, co); } else if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) a = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (fd->loops[i].v), fd->loops[i].v, co); else a = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (fd->loops[i].v), fd->loops[i].v, co); if (step) { tree t1, t2; if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) t1 = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a, fd->loops[i].n1); else t1 = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a, fd->loops[i].n2); if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) t2 = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a, fd->loops[i].n2); else t2 = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a, fd->loops[i].n1); t = fold_build2_loc (loc, LT_EXPR, boolean_type_node, step, build_int_cst (TREE_TYPE (step), 0)); if (TREE_CODE (step) != INTEGER_CST) { t1 = unshare_expr (t1); t1 = force_gimple_operand_gsi (gsi, t1, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t2 = unshare_expr (t2); t2 = force_gimple_operand_gsi (gsi, t2, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } t = fold_build3_loc (loc, COND_EXPR, boolean_type_node, t, t2, t1); } else if (fd->loops[i].cond_code == LT_EXPR) { if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) t = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a, fd->loops[i].n1); else t = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a, fd->loops[i].n2); } else if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) t = fold_build2_loc (loc, GT_EXPR, boolean_type_node, a, fd->loops[i].n2); else t = fold_build2_loc (loc, LE_EXPR, boolean_type_node, a, fd->loops[i].n1); } if (cond) cond = fold_build2_loc (loc, BIT_AND_EXPR, boolean_type_node, cond, t); else cond = t; off = fold_convert_loc (loc, itype, off); if (step || (fd->loops[i].cond_code == LT_EXPR ? !integer_onep (fd->loops[i].step) : !integer_minus_onep (fd->loops[i].step))) { if (step == NULL_TREE && TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR) t = fold_build2_loc (loc, TRUNC_MOD_EXPR, itype, off, fold_build1_loc (loc, NEGATE_EXPR, itype, s)); else t = fold_build2_loc (loc, TRUNC_MOD_EXPR, itype, orig_off ? orig_off : off, s); t = fold_build2_loc (loc, EQ_EXPR, boolean_type_node, t, build_int_cst (itype, 0)); if (integer_zerop (t) && !warned_step) { warning_at (loc, 0, "%<depend%> clause with %<sink%> modifier " "refers to iteration never in the iteration " "space"); warned_step = true; } cond = fold_build2_loc (loc, BIT_AND_EXPR, boolean_type_node, cond, t); } if (i <= fd->collapse - 1 && fd->collapse > 1) t = fd->loop.v; else if (counts[i]) t = counts[i]; else { t = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (fd->loops[i].v), fd->loops[i].v, fd->loops[i].n1); t = fold_convert_loc (loc, fd->iter_type, t); } if (step) /* We have divided off by step already earlier. */; else if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR) off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, fold_build1_loc (loc, NEGATE_EXPR, itype, s)); else off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, s); if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) off = fold_build1_loc (loc, NEGATE_EXPR, itype, off); off = fold_convert_loc (loc, fd->iter_type, off); if (i <= fd->collapse - 1 && fd->collapse > 1) { if (i) off = fold_build2_loc (loc, PLUS_EXPR, fd->iter_type, coff, off); if (i < fd->collapse - 1) { coff = fold_build2_loc (loc, MULT_EXPR, fd->iter_type, off, counts[i]); continue; } } off = unshare_expr (off); t = fold_build2_loc (loc, PLUS_EXPR, fd->iter_type, t, off); t = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE, true, GSI_SAME_STMT); args.safe_push (t); } gimple *g = gimple_build_call_vec (builtin_decl_explicit (sink_ix), args); gimple_set_location (g, loc); gsi_insert_before (&gsi2, g, GSI_SAME_STMT); cond = unshare_expr (cond); cond = force_gimple_operand_gsi (gsi, cond, true, NULL_TREE, false, GSI_CONTINUE_LINKING); gsi_insert_after (gsi, gimple_build_cond_empty (cond), GSI_NEW_STMT); edge e3 = make_edge (e1->src, e2->dest, EDGE_FALSE_VALUE); e3->probability = profile_probability::guessed_always ().apply_scale (1, 8); e1->probability = e3->probability.invert (); e1->flags = EDGE_TRUE_VALUE; set_immediate_dominator (CDI_DOMINATORS, e2->dest, e1->src); *gsi = gsi_after_labels (e2->dest); } /* Expand all #pragma omp ordered depend(source) and #pragma omp ordered depend(sink:...) constructs in the current #pragma omp for ordered(n) region. */ static void expand_omp_ordered_source_sink (struct omp_region *region, struct omp_for_data *fd, tree *counts, basic_block cont_bb) { struct omp_region *inner; int i; for (i = fd->collapse - 1; i < fd->ordered; i++) if (i == fd->collapse - 1 && fd->collapse > 1) counts[i] = NULL_TREE; else if (i >= fd->collapse && !cont_bb) counts[i] = build_zero_cst (fd->iter_type); else if (!POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)) && integer_onep (fd->loops[i].step)) counts[i] = NULL_TREE; else counts[i] = create_tmp_var (fd->iter_type, ".orditer"); tree atype = build_array_type_nelts (fd->iter_type, fd->ordered - fd->collapse + 1); counts[fd->ordered] = create_tmp_var (atype, ".orditera"); TREE_ADDRESSABLE (counts[fd->ordered]) = 1; for (inner = region->inner; inner; inner = inner->next) if (inner->type == GIMPLE_OMP_ORDERED) { gomp_ordered *ord_stmt = inner->ord_stmt; gimple_stmt_iterator gsi = gsi_for_stmt (ord_stmt); location_t loc = gimple_location (ord_stmt); tree c; for (c = gimple_omp_ordered_clauses (ord_stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE) break; if (c) expand_omp_ordered_source (&gsi, fd, counts, loc); for (c = gimple_omp_ordered_clauses (ord_stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK) expand_omp_ordered_sink (&gsi, fd, counts, c, loc); gsi_remove (&gsi, true); } } /* Wrap the body into fd->ordered - fd->collapse loops that aren't collapsed. */ static basic_block expand_omp_for_ordered_loops (struct omp_for_data *fd, tree *counts, basic_block cont_bb, basic_block body_bb, bool ordered_lastprivate) { if (fd->ordered == fd->collapse) return cont_bb; if (!cont_bb) { gimple_stmt_iterator gsi = gsi_after_labels (body_bb); for (int i = fd->collapse; i < fd->ordered; i++) { tree type = TREE_TYPE (fd->loops[i].v); tree n1 = fold_convert (type, fd->loops[i].n1); expand_omp_build_assign (&gsi, fd->loops[i].v, n1); tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered], size_int (i - fd->collapse + 1), NULL_TREE, NULL_TREE); expand_omp_build_assign (&gsi, aref, build_zero_cst (fd->iter_type)); } return NULL; } for (int i = fd->ordered - 1; i >= fd->collapse; i--) { tree t, type = TREE_TYPE (fd->loops[i].v); gimple_stmt_iterator gsi = gsi_after_labels (body_bb); expand_omp_build_assign (&gsi, fd->loops[i].v, fold_convert (type, fd->loops[i].n1)); if (counts[i]) expand_omp_build_assign (&gsi, counts[i], build_zero_cst (fd->iter_type)); tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered], size_int (i - fd->collapse + 1), NULL_TREE, NULL_TREE); expand_omp_build_assign (&gsi, aref, build_zero_cst (fd->iter_type)); if (!gsi_end_p (gsi)) gsi_prev (&gsi); else gsi = gsi_last_bb (body_bb); edge e1 = split_block (body_bb, gsi_stmt (gsi)); basic_block new_body = e1->dest; if (body_bb == cont_bb) cont_bb = new_body; edge e2 = NULL; basic_block new_header; if (EDGE_COUNT (cont_bb->preds) > 0) { gsi = gsi_last_bb (cont_bb); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (fd->loops[i].v, fold_convert (sizetype, fd->loops[i].step)); else t = fold_build2 (PLUS_EXPR, type, fd->loops[i].v, fold_convert (type, fd->loops[i].step)); expand_omp_build_assign (&gsi, fd->loops[i].v, t); if (counts[i]) { t = fold_build2 (PLUS_EXPR, fd->iter_type, counts[i], build_int_cst (fd->iter_type, 1)); expand_omp_build_assign (&gsi, counts[i], t); t = counts[i]; } else { t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[i].v), fd->loops[i].v, fd->loops[i].n1); t = fold_convert (fd->iter_type, t); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); } aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered], size_int (i - fd->collapse + 1), NULL_TREE, NULL_TREE); expand_omp_build_assign (&gsi, aref, t); gsi_prev (&gsi); e2 = split_block (cont_bb, gsi_stmt (gsi)); new_header = e2->dest; } else new_header = cont_bb; gsi = gsi_after_labels (new_header); tree v = force_gimple_operand_gsi (&gsi, fd->loops[i].v, true, NULL_TREE, true, GSI_SAME_STMT); tree n2 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loops[i].n2), true, NULL_TREE, true, GSI_SAME_STMT); t = build2 (fd->loops[i].cond_code, boolean_type_node, v, n2); gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_NEW_STMT); edge e3 = split_block (new_header, gsi_stmt (gsi)); cont_bb = e3->dest; remove_edge (e1); make_edge (body_bb, new_header, EDGE_FALLTHRU); e3->flags = EDGE_FALSE_VALUE; e3->probability = profile_probability::guessed_always ().apply_scale (1, 8); e1 = make_edge (new_header, new_body, EDGE_TRUE_VALUE); e1->probability = e3->probability.invert (); set_immediate_dominator (CDI_DOMINATORS, new_header, body_bb); set_immediate_dominator (CDI_DOMINATORS, new_body, new_header); if (e2) { class loop *loop = alloc_loop (); loop->header = new_header; loop->latch = e2->src; add_loop (loop, body_bb->loop_father); } } /* If there are any lastprivate clauses and it is possible some loops might have zero iterations, ensure all the decls are initialized, otherwise we could crash evaluating C++ class iterators with lastprivate clauses. */ bool need_inits = false; for (int i = fd->collapse; ordered_lastprivate && i < fd->ordered; i++) if (need_inits) { tree type = TREE_TYPE (fd->loops[i].v); gimple_stmt_iterator gsi = gsi_after_labels (body_bb); expand_omp_build_assign (&gsi, fd->loops[i].v, fold_convert (type, fd->loops[i].n1)); } else { tree type = TREE_TYPE (fd->loops[i].v); tree this_cond = fold_build2 (fd->loops[i].cond_code, boolean_type_node, fold_convert (type, fd->loops[i].n1), fold_convert (type, fd->loops[i].n2)); if (!integer_onep (this_cond)) need_inits = true; } return cont_bb; } /* A subroutine of expand_omp_for. Generate code for a parallel loop with any schedule. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0); if (more) goto L0; else goto L3; L0: V = istart0; iend = iend0; L1: BODY; V += STEP; if (V cond iend) goto L1; else goto L2; L2: if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3; L3: If this is a combined omp parallel loop, instead of the call to GOMP_loop_foo_start, we call GOMP_loop_foo_next. If this is gimple_omp_for_combined_p loop, then instead of assigning V and iend in L0 we assign the first two _looptemp_ clause decls of the inner GIMPLE_OMP_FOR and V += STEP; and if (V cond iend) goto L1; else goto L2; are removed. For collapsed loops, given parameters: collapse(3) for (V1 = N11; V1 cond1 N12; V1 += STEP1) for (V2 = N21; V2 cond2 N22; V2 += STEP2) for (V3 = N31; V3 cond3 N32; V3 += STEP3) BODY; we generate pseudocode if (__builtin_expect (N32 cond3 N31, 0)) goto Z0; if (cond3 is <) adj = STEP3 - 1; else adj = STEP3 + 1; count3 = (adj + N32 - N31) / STEP3; if (__builtin_expect (N22 cond2 N21, 0)) goto Z0; if (cond2 is <) adj = STEP2 - 1; else adj = STEP2 + 1; count2 = (adj + N22 - N21) / STEP2; if (__builtin_expect (N12 cond1 N11, 0)) goto Z0; if (cond1 is <) adj = STEP1 - 1; else adj = STEP1 + 1; count1 = (adj + N12 - N11) / STEP1; count = count1 * count2 * count3; goto Z1; Z0: count = 0; Z1: more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0); if (more) goto L0; else goto L3; L0: V = istart0; T = V; V3 = N31 + (T % count3) * STEP3; T = T / count3; V2 = N21 + (T % count2) * STEP2; T = T / count2; V1 = N11 + T * STEP1; iend = iend0; L1: BODY; V += 1; if (V < iend) goto L10; else goto L2; L10: V3 += STEP3; if (V3 cond3 N32) goto L1; else goto L11; L11: V3 = N31; V2 += STEP2; if (V2 cond2 N22) goto L1; else goto L12; L12: V2 = N21; V1 += STEP1; goto L1; L2: if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3; L3: */ static void expand_omp_for_generic (struct omp_region *region, struct omp_for_data *fd, enum built_in_function start_fn, enum built_in_function next_fn, tree sched_arg, gimple *inner_stmt) { tree type, istart0, iend0, iend; tree t, vmain, vback, bias = NULL_TREE; basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb; basic_block l2_bb = NULL, l3_bb = NULL; gimple_stmt_iterator gsi; gassign *assign_stmt; bool in_combined_parallel = is_combined_parallel (region); bool broken_loop = region->cont == NULL; edge e, ne; tree *counts = NULL; int i; bool ordered_lastprivate = false; gcc_assert (!broken_loop || !in_combined_parallel); gcc_assert (fd->iter_type == long_integer_type_node || !in_combined_parallel); entry_bb = region->entry; cont_bb = region->cont; collapse_bb = NULL; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); gcc_assert (broken_loop || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest); l0_bb = split_edge (FALLTHRU_EDGE (entry_bb)); l1_bb = single_succ (l0_bb); if (!broken_loop) { l2_bb = create_empty_bb (cont_bb); gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb || (single_succ_edge (BRANCH_EDGE (cont_bb)->dest)->dest == l1_bb)); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); } else l2_bb = NULL; l3_bb = BRANCH_EDGE (entry_bb)->dest; exit_bb = region->exit; gsi = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); if (fd->ordered && omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE_LASTPRIVATE)) ordered_lastprivate = false; tree reductions = NULL_TREE; tree mem = NULL_TREE, cond_var = NULL_TREE, condtemp = NULL_TREE; tree memv = NULL_TREE; if (fd->lastprivate_conditional) { tree c = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__CONDTEMP_); if (fd->have_pointer_condtemp) condtemp = OMP_CLAUSE_DECL (c); c = omp_find_clause (OMP_CLAUSE_CHAIN (c), OMP_CLAUSE__CONDTEMP_); cond_var = OMP_CLAUSE_DECL (c); } if (sched_arg) { if (fd->have_reductemp) { tree c = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__REDUCTEMP_); reductions = OMP_CLAUSE_DECL (c); gcc_assert (TREE_CODE (reductions) == SSA_NAME); gimple *g = SSA_NAME_DEF_STMT (reductions); reductions = gimple_assign_rhs1 (g); OMP_CLAUSE_DECL (c) = reductions; entry_bb = gimple_bb (g); edge e = split_block (entry_bb, g); if (region->entry == entry_bb) region->entry = e->dest; gsi = gsi_last_bb (entry_bb); } else reductions = null_pointer_node; if (fd->have_pointer_condtemp) { tree type = TREE_TYPE (condtemp); memv = create_tmp_var (type); TREE_ADDRESSABLE (memv) = 1; unsigned HOST_WIDE_INT sz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (type))); sz *= fd->lastprivate_conditional; expand_omp_build_assign (&gsi, memv, build_int_cst (type, sz), false); mem = build_fold_addr_expr (memv); } else mem = null_pointer_node; } if (fd->collapse > 1 || fd->ordered) { int first_zero_iter1 = -1, first_zero_iter2 = -1; basic_block zero_iter1_bb = NULL, zero_iter2_bb = NULL, l2_dom_bb = NULL; counts = XALLOCAVEC (tree, fd->ordered ? fd->ordered + 1 : fd->collapse); expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, zero_iter1_bb, first_zero_iter1, zero_iter2_bb, first_zero_iter2, l2_dom_bb); if (zero_iter1_bb) { /* Some counts[i] vars might be uninitialized if some loop has zero iterations. But the body shouldn't be executed in that case, so just avoid uninit warnings. */ for (i = first_zero_iter1; i < (fd->ordered ? fd->ordered : fd->collapse); i++) if (SSA_VAR_P (counts[i])) TREE_NO_WARNING (counts[i]) = 1; gsi_prev (&gsi); e = split_block (entry_bb, gsi_stmt (gsi)); entry_bb = e->dest; make_edge (zero_iter1_bb, entry_bb, EDGE_FALLTHRU); gsi = gsi_last_nondebug_bb (entry_bb); set_immediate_dominator (CDI_DOMINATORS, entry_bb, get_immediate_dominator (CDI_DOMINATORS, zero_iter1_bb)); } if (zero_iter2_bb) { /* Some counts[i] vars might be uninitialized if some loop has zero iterations. But the body shouldn't be executed in that case, so just avoid uninit warnings. */ for (i = first_zero_iter2; i < fd->ordered; i++) if (SSA_VAR_P (counts[i])) TREE_NO_WARNING (counts[i]) = 1; if (zero_iter1_bb) make_edge (zero_iter2_bb, entry_bb, EDGE_FALLTHRU); else { gsi_prev (&gsi); e = split_block (entry_bb, gsi_stmt (gsi)); entry_bb = e->dest; make_edge (zero_iter2_bb, entry_bb, EDGE_FALLTHRU); gsi = gsi_last_nondebug_bb (entry_bb); set_immediate_dominator (CDI_DOMINATORS, entry_bb, get_immediate_dominator (CDI_DOMINATORS, zero_iter2_bb)); } } if (fd->collapse == 1) { counts[0] = fd->loop.n2; fd->loop = fd->loops[0]; } } type = TREE_TYPE (fd->loop.v); istart0 = create_tmp_var (fd->iter_type, ".istart0"); iend0 = create_tmp_var (fd->iter_type, ".iend0"); TREE_ADDRESSABLE (istart0) = 1; TREE_ADDRESSABLE (iend0) = 1; /* See if we need to bias by LLONG_MIN. */ if (fd->iter_type == long_long_unsigned_type_node && TREE_CODE (type) == INTEGER_TYPE && !TYPE_UNSIGNED (type) && fd->ordered == 0) { tree n1, n2; if (fd->loop.cond_code == LT_EXPR) { n1 = fd->loop.n1; n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step); } else { n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step); n2 = fd->loop.n1; } if (TREE_CODE (n1) != INTEGER_CST || TREE_CODE (n2) != INTEGER_CST || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0))) bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type)); } gimple_stmt_iterator gsif = gsi; gsi_prev (&gsif); tree arr = NULL_TREE; if (in_combined_parallel) { gcc_assert (fd->ordered == 0); /* In a combined parallel loop, emit a call to GOMP_loop_foo_next. */ t = build_call_expr (builtin_decl_explicit (next_fn), 2, build_fold_addr_expr (istart0), build_fold_addr_expr (iend0)); } else { tree t0, t1, t2, t3, t4; /* If this is not a combined parallel loop, emit a call to GOMP_loop_foo_start in ENTRY_BB. */ t4 = build_fold_addr_expr (iend0); t3 = build_fold_addr_expr (istart0); if (fd->ordered) { t0 = build_int_cst (unsigned_type_node, fd->ordered - fd->collapse + 1); arr = create_tmp_var (build_array_type_nelts (fd->iter_type, fd->ordered - fd->collapse + 1), ".omp_counts"); DECL_NAMELESS (arr) = 1; TREE_ADDRESSABLE (arr) = 1; TREE_STATIC (arr) = 1; vec<constructor_elt, va_gc> *v; vec_alloc (v, fd->ordered - fd->collapse + 1); int idx; for (idx = 0; idx < fd->ordered - fd->collapse + 1; idx++) { tree c; if (idx == 0 && fd->collapse > 1) c = fd->loop.n2; else c = counts[idx + fd->collapse - 1]; tree purpose = size_int (idx); CONSTRUCTOR_APPEND_ELT (v, purpose, c); if (TREE_CODE (c) != INTEGER_CST) TREE_STATIC (arr) = 0; } DECL_INITIAL (arr) = build_constructor (TREE_TYPE (arr), v); if (!TREE_STATIC (arr)) force_gimple_operand_gsi (&gsi, build1 (DECL_EXPR, void_type_node, arr), true, NULL_TREE, true, GSI_SAME_STMT); t1 = build_fold_addr_expr (arr); t2 = NULL_TREE; } else { t2 = fold_convert (fd->iter_type, fd->loop.step); t1 = fd->loop.n2; t0 = fd->loop.n1; if (gimple_omp_for_combined_into_p (fd->for_stmt)) { tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); t0 = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); t1 = OMP_CLAUSE_DECL (innerc); } if (POINTER_TYPE_P (TREE_TYPE (t0)) && TYPE_PRECISION (TREE_TYPE (t0)) != TYPE_PRECISION (fd->iter_type)) { /* Avoid casting pointers to integer of a different size. */ tree itype = signed_type_for (type); t1 = fold_convert (fd->iter_type, fold_convert (itype, t1)); t0 = fold_convert (fd->iter_type, fold_convert (itype, t0)); } else { t1 = fold_convert (fd->iter_type, t1); t0 = fold_convert (fd->iter_type, t0); } if (bias) { t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias); t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias); } } if (fd->iter_type == long_integer_type_node || fd->ordered) { if (fd->chunk_size) { t = fold_convert (fd->iter_type, fd->chunk_size); t = omp_adjust_chunk_size (t, fd->simd_schedule); if (sched_arg) { if (fd->ordered) t = build_call_expr (builtin_decl_explicit (start_fn), 8, t0, t1, sched_arg, t, t3, t4, reductions, mem); else t = build_call_expr (builtin_decl_explicit (start_fn), 9, t0, t1, t2, sched_arg, t, t3, t4, reductions, mem); } else if (fd->ordered) t = build_call_expr (builtin_decl_explicit (start_fn), 5, t0, t1, t, t3, t4); else t = build_call_expr (builtin_decl_explicit (start_fn), 6, t0, t1, t2, t, t3, t4); } else if (fd->ordered) t = build_call_expr (builtin_decl_explicit (start_fn), 4, t0, t1, t3, t4); else t = build_call_expr (builtin_decl_explicit (start_fn), 5, t0, t1, t2, t3, t4); } else { tree t5; tree c_bool_type; tree bfn_decl; /* The GOMP_loop_ull_*start functions have additional boolean argument, true for < loops and false for > loops. In Fortran, the C bool type can be different from boolean_type_node. */ bfn_decl = builtin_decl_explicit (start_fn); c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl)); t5 = build_int_cst (c_bool_type, fd->loop.cond_code == LT_EXPR ? 1 : 0); if (fd->chunk_size) { tree bfn_decl = builtin_decl_explicit (start_fn); t = fold_convert (fd->iter_type, fd->chunk_size); t = omp_adjust_chunk_size (t, fd->simd_schedule); if (sched_arg) t = build_call_expr (bfn_decl, 10, t5, t0, t1, t2, sched_arg, t, t3, t4, reductions, mem); else t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4); } else t = build_call_expr (builtin_decl_explicit (start_fn), 6, t5, t0, t1, t2, t3, t4); } } if (TREE_TYPE (t) != boolean_type_node) t = fold_build2 (NE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); if (arr && !TREE_STATIC (arr)) { tree clobber = build_clobber (TREE_TYPE (arr)); gsi_insert_before (&gsi, gimple_build_assign (arr, clobber), GSI_SAME_STMT); } if (fd->have_pointer_condtemp) expand_omp_build_assign (&gsi, condtemp, memv, false); if (fd->have_reductemp) { gimple *g = gsi_stmt (gsi); gsi_remove (&gsi, true); release_ssa_name (gimple_assign_lhs (g)); entry_bb = region->entry; gsi = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); } gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); /* Remove the GIMPLE_OMP_FOR statement. */ gsi_remove (&gsi, true); if (gsi_end_p (gsif)) gsif = gsi_after_labels (gsi_bb (gsif)); gsi_next (&gsif); /* Iteration setup for sequential loop goes in L0_BB. */ tree startvar = fd->loop.v; tree endvar = NULL_TREE; if (gimple_omp_for_combined_p (fd->for_stmt)) { gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR && gimple_omp_for_kind (inner_stmt) == GF_OMP_FOR_KIND_SIMD); tree innerc = omp_find_clause (gimple_omp_for_clauses (inner_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); startvar = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); endvar = OMP_CLAUSE_DECL (innerc); } gsi = gsi_start_bb (l0_bb); t = istart0; if (fd->ordered && fd->collapse == 1) t = fold_build2 (MULT_EXPR, fd->iter_type, t, fold_convert (fd->iter_type, fd->loop.step)); else if (bias) t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias); if (fd->ordered && fd->collapse == 1) { if (POINTER_TYPE_P (TREE_TYPE (startvar))) t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (startvar), fd->loop.n1, fold_convert (sizetype, t)); else { t = fold_convert (TREE_TYPE (startvar), t); t = fold_build2 (PLUS_EXPR, TREE_TYPE (startvar), fd->loop.n1, t); } } else { if (POINTER_TYPE_P (TREE_TYPE (startvar))) t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t); t = fold_convert (TREE_TYPE (startvar), t); } t = force_gimple_operand_gsi (&gsi, t, DECL_P (startvar) && TREE_ADDRESSABLE (startvar), NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (startvar, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (cond_var) { tree itype = TREE_TYPE (cond_var); /* For lastprivate(conditional:) itervar, we need some iteration counter that starts at unsigned non-zero and increases. Prefer as few IVs as possible, so if we can use startvar itself, use that, or startvar + constant (those would be incremented with step), and as last resort use the s0 + 1 incremented by 1. */ if ((fd->ordered && fd->collapse == 1) || bias || POINTER_TYPE_P (type) || TREE_CODE (fd->loop.n1) != INTEGER_CST || fd->loop.cond_code != LT_EXPR) t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, istart0), build_int_cst (itype, 1)); else if (tree_int_cst_sgn (fd->loop.n1) == 1) t = fold_convert (itype, t); else { tree c = fold_convert (itype, fd->loop.n1); c = fold_build2 (MINUS_EXPR, itype, build_int_cst (itype, 1), c); t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, t), c); } t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (cond_var, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } t = iend0; if (fd->ordered && fd->collapse == 1) t = fold_build2 (MULT_EXPR, fd->iter_type, t, fold_convert (fd->iter_type, fd->loop.step)); else if (bias) t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias); if (fd->ordered && fd->collapse == 1) { if (POINTER_TYPE_P (TREE_TYPE (startvar))) t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (startvar), fd->loop.n1, fold_convert (sizetype, t)); else { t = fold_convert (TREE_TYPE (startvar), t); t = fold_build2 (PLUS_EXPR, TREE_TYPE (startvar), fd->loop.n1, t); } } else { if (POINTER_TYPE_P (TREE_TYPE (startvar))) t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t); t = fold_convert (TREE_TYPE (startvar), t); } iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (endvar) { assign_stmt = gimple_build_assign (endvar, iend); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend))) assign_stmt = gimple_build_assign (fd->loop.v, iend); else assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, iend); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } /* Handle linear clause adjustments. */ tree itercnt = NULL_TREE; if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR) for (tree c = gimple_omp_for_clauses (fd->for_stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && !OMP_CLAUSE_LINEAR_NO_COPYIN (c)) { tree d = OMP_CLAUSE_DECL (c); bool is_ref = omp_is_reference (d); tree t = d, a, dest; if (is_ref) t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t); tree type = TREE_TYPE (t); if (POINTER_TYPE_P (type)) type = sizetype; dest = unshare_expr (t); tree v = create_tmp_var (TREE_TYPE (t), NULL); expand_omp_build_assign (&gsif, v, t); if (itercnt == NULL_TREE) { itercnt = startvar; tree n1 = fd->loop.n1; if (POINTER_TYPE_P (TREE_TYPE (itercnt))) { itercnt = fold_convert (signed_type_for (TREE_TYPE (itercnt)), itercnt); n1 = fold_convert (TREE_TYPE (itercnt), n1); } itercnt = fold_build2 (MINUS_EXPR, TREE_TYPE (itercnt), itercnt, n1); itercnt = fold_build2 (EXACT_DIV_EXPR, TREE_TYPE (itercnt), itercnt, fd->loop.step); itercnt = force_gimple_operand_gsi (&gsi, itercnt, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } a = fold_build2 (MULT_EXPR, type, fold_convert (type, itercnt), fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c))); t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR : POINTER_PLUS_EXPR, TREE_TYPE (t), v, a); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (dest, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } if (fd->collapse > 1) expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar); if (fd->ordered) { /* Until now, counts array contained number of iterations or variable containing it for ith loop. From now on, we need those counts only for collapsed loops, and only for the 2nd till the last collapsed one. Move those one element earlier, we'll use counts[fd->collapse - 1] for the first source/sink iteration counter and so on and counts[fd->ordered] as the array holding the current counter values for depend(source). */ if (fd->collapse > 1) memmove (counts, counts + 1, (fd->collapse - 1) * sizeof (counts[0])); if (broken_loop) { int i; for (i = fd->collapse; i < fd->ordered; i++) { tree type = TREE_TYPE (fd->loops[i].v); tree this_cond = fold_build2 (fd->loops[i].cond_code, boolean_type_node, fold_convert (type, fd->loops[i].n1), fold_convert (type, fd->loops[i].n2)); if (!integer_onep (this_cond)) break; } if (i < fd->ordered) { cont_bb = create_empty_bb (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb); add_bb_to_loop (cont_bb, l1_bb->loop_father); gimple_stmt_iterator gsi = gsi_after_labels (cont_bb); gimple *g = gimple_build_omp_continue (fd->loop.v, fd->loop.v); gsi_insert_before (&gsi, g, GSI_SAME_STMT); make_edge (cont_bb, l3_bb, EDGE_FALLTHRU); make_edge (cont_bb, l1_bb, 0); l2_bb = create_empty_bb (cont_bb); broken_loop = false; } } expand_omp_ordered_source_sink (region, fd, counts, cont_bb); cont_bb = expand_omp_for_ordered_loops (fd, counts, cont_bb, l1_bb, ordered_lastprivate); if (counts[fd->collapse - 1]) { gcc_assert (fd->collapse == 1); gsi = gsi_last_bb (l0_bb); expand_omp_build_assign (&gsi, counts[fd->collapse - 1], istart0, true); if (cont_bb) { gsi = gsi_last_bb (cont_bb); t = fold_build2 (PLUS_EXPR, fd->iter_type, counts[fd->collapse - 1], build_int_cst (fd->iter_type, 1)); expand_omp_build_assign (&gsi, counts[fd->collapse - 1], t); tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered], size_zero_node, NULL_TREE, NULL_TREE); expand_omp_build_assign (&gsi, aref, counts[fd->collapse - 1]); } t = counts[fd->collapse - 1]; } else if (fd->collapse > 1) t = fd->loop.v; else { t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[0].v), fd->loops[0].v, fd->loops[0].n1); t = fold_convert (fd->iter_type, t); } gsi = gsi_last_bb (l0_bb); tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered], size_zero_node, NULL_TREE, NULL_TREE); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); expand_omp_build_assign (&gsi, aref, t, true); } if (!broken_loop) { /* Code to control the increment and predicate for the sequential loop goes in the CONT_BB. */ gsi = gsi_last_nondebug_bb (cont_bb); gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi)); gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE); vmain = gimple_omp_continue_control_use (cont_stmt); vback = gimple_omp_continue_control_def (cont_stmt); if (cond_var) { tree itype = TREE_TYPE (cond_var); tree t2; if ((fd->ordered && fd->collapse == 1) || bias || POINTER_TYPE_P (type) || TREE_CODE (fd->loop.n1) != INTEGER_CST || fd->loop.cond_code != LT_EXPR) t2 = build_int_cst (itype, 1); else t2 = fold_convert (itype, fd->loop.step); t2 = fold_build2 (PLUS_EXPR, itype, cond_var, t2); t2 = force_gimple_operand_gsi (&gsi, t2, false, NULL_TREE, true, GSI_SAME_STMT); assign_stmt = gimple_build_assign (cond_var, t2); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); } if (!gimple_omp_for_combined_p (fd->for_stmt)) { if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (vmain, fd->loop.step); else t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step); t = force_gimple_operand_gsi (&gsi, t, DECL_P (vback) && TREE_ADDRESSABLE (vback), NULL_TREE, true, GSI_SAME_STMT); assign_stmt = gimple_build_assign (vback, t); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); if (fd->ordered && counts[fd->collapse - 1] == NULL_TREE) { tree tem; if (fd->collapse > 1) tem = fd->loop.v; else { tem = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[0].v), fd->loops[0].v, fd->loops[0].n1); tem = fold_convert (fd->iter_type, tem); } tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered], size_zero_node, NULL_TREE, NULL_TREE); tem = force_gimple_operand_gsi (&gsi, tem, true, NULL_TREE, true, GSI_SAME_STMT); expand_omp_build_assign (&gsi, aref, tem); } t = build2 (fd->loop.cond_code, boolean_type_node, DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, iend); gcond *cond_stmt = gimple_build_cond_empty (t); gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT); } /* Remove GIMPLE_OMP_CONTINUE. */ gsi_remove (&gsi, true); if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt)) collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb); /* Emit code to get the next parallel iteration in L2_BB. */ gsi = gsi_start_bb (l2_bb); t = build_call_expr (builtin_decl_explicit (next_fn), 2, build_fold_addr_expr (istart0), build_fold_addr_expr (iend0)); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (TREE_TYPE (t) != boolean_type_node) t = fold_build2 (NE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); gcond *cond_stmt = gimple_build_cond_empty (t); gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING); } /* Add the loop cleanup function. */ gsi = gsi_last_nondebug_bb (exit_bb); if (gimple_omp_return_nowait_p (gsi_stmt (gsi))) t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT); else if (gimple_omp_return_lhs (gsi_stmt (gsi))) t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL); else t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END); gcall *call_stmt = gimple_build_call (t, 0); if (fd->ordered) { tree arr = counts[fd->ordered]; tree clobber = build_clobber (TREE_TYPE (arr)); gsi_insert_after (&gsi, gimple_build_assign (arr, clobber), GSI_SAME_STMT); } if (gimple_omp_return_lhs (gsi_stmt (gsi))) { gimple_call_set_lhs (call_stmt, gimple_omp_return_lhs (gsi_stmt (gsi))); if (fd->have_reductemp) { gimple *g = gimple_build_assign (reductions, NOP_EXPR, gimple_call_lhs (call_stmt)); gsi_insert_after (&gsi, g, GSI_SAME_STMT); } } gsi_insert_after (&gsi, call_stmt, GSI_SAME_STMT); gsi_remove (&gsi, true); /* Connect the new blocks. */ find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE; find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE; if (!broken_loop) { gimple_seq phis; e = find_edge (cont_bb, l3_bb); ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE); phis = phi_nodes (l3_bb); for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *phi = gsi_stmt (gsi); SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne), PHI_ARG_DEF_FROM_EDGE (phi, e)); } remove_edge (e); make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE); e = find_edge (cont_bb, l1_bb); if (e == NULL) { e = BRANCH_EDGE (cont_bb); gcc_assert (single_succ (e->dest) == l1_bb); } if (gimple_omp_for_combined_p (fd->for_stmt)) { remove_edge (e); e = NULL; } else if (fd->collapse > 1) { remove_edge (e); e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE); } else e->flags = EDGE_TRUE_VALUE; if (e) { e->probability = profile_probability::guessed_always ().apply_scale (7, 8); find_edge (cont_bb, l2_bb)->probability = e->probability.invert (); } else { e = find_edge (cont_bb, l2_bb); e->flags = EDGE_FALLTHRU; } make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE); if (gimple_in_ssa_p (cfun)) { /* Add phis to the outer loop that connect to the phis in the inner, original loop, and move the loop entry value of the inner phi to the loop entry value of the outer phi. */ gphi_iterator psi; for (psi = gsi_start_phis (l3_bb); !gsi_end_p (psi); gsi_next (&psi)) { location_t locus; gphi *nphi; gphi *exit_phi = psi.phi (); if (virtual_operand_p (gimple_phi_result (exit_phi))) continue; edge l2_to_l3 = find_edge (l2_bb, l3_bb); tree exit_res = PHI_ARG_DEF_FROM_EDGE (exit_phi, l2_to_l3); basic_block latch = BRANCH_EDGE (cont_bb)->dest; edge latch_to_l1 = find_edge (latch, l1_bb); gphi *inner_phi = find_phi_with_arg_on_edge (exit_res, latch_to_l1); tree t = gimple_phi_result (exit_phi); tree new_res = copy_ssa_name (t, NULL); nphi = create_phi_node (new_res, l0_bb); edge l0_to_l1 = find_edge (l0_bb, l1_bb); t = PHI_ARG_DEF_FROM_EDGE (inner_phi, l0_to_l1); locus = gimple_phi_arg_location_from_edge (inner_phi, l0_to_l1); edge entry_to_l0 = find_edge (entry_bb, l0_bb); add_phi_arg (nphi, t, entry_to_l0, locus); edge l2_to_l0 = find_edge (l2_bb, l0_bb); add_phi_arg (nphi, exit_res, l2_to_l0, UNKNOWN_LOCATION); add_phi_arg (inner_phi, new_res, l0_to_l1, UNKNOWN_LOCATION); } } set_immediate_dominator (CDI_DOMINATORS, l2_bb, recompute_dominator (CDI_DOMINATORS, l2_bb)); set_immediate_dominator (CDI_DOMINATORS, l3_bb, recompute_dominator (CDI_DOMINATORS, l3_bb)); set_immediate_dominator (CDI_DOMINATORS, l0_bb, recompute_dominator (CDI_DOMINATORS, l0_bb)); set_immediate_dominator (CDI_DOMINATORS, l1_bb, recompute_dominator (CDI_DOMINATORS, l1_bb)); /* We enter expand_omp_for_generic with a loop. This original loop may have its own loop struct, or it may be part of an outer loop struct (which may be the fake loop). */ class loop *outer_loop = entry_bb->loop_father; bool orig_loop_has_loop_struct = l1_bb->loop_father != outer_loop; add_bb_to_loop (l2_bb, outer_loop); /* We've added a new loop around the original loop. Allocate the corresponding loop struct. */ class loop *new_loop = alloc_loop (); new_loop->header = l0_bb; new_loop->latch = l2_bb; add_loop (new_loop, outer_loop); /* Allocate a loop structure for the original loop unless we already had one. */ if (!orig_loop_has_loop_struct && !gimple_omp_for_combined_p (fd->for_stmt)) { class loop *orig_loop = alloc_loop (); orig_loop->header = l1_bb; /* The loop may have multiple latches. */ add_loop (orig_loop, new_loop); } } } /* Helper function for expand_omp_for_static_nochunk. If PTR is NULL, compute needed allocation size. If !ALLOC of team allocations, if ALLOC of thread allocation. SZ is the initial needed size for other purposes, ALLOC_ALIGN guaranteed alignment of allocation in bytes, CNT number of elements of each array, for !ALLOC this is omp_get_num_threads (), for ALLOC number of iterations handled by the current thread. If PTR is non-NULL, it is the start of the allocation and this routine shall assign to OMP_CLAUSE_DECL (c) of those _scantemp_ clauses pointers to the corresponding arrays. */ static tree expand_omp_scantemp_alloc (tree clauses, tree ptr, unsigned HOST_WIDE_INT sz, unsigned HOST_WIDE_INT alloc_align, tree cnt, gimple_stmt_iterator *gsi, bool alloc) { tree eltsz = NULL_TREE; unsigned HOST_WIDE_INT preval = 0; if (ptr && sz) ptr = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (ptr), ptr, size_int (sz)); for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE__SCANTEMP_ && !OMP_CLAUSE__SCANTEMP__CONTROL (c) && (!OMP_CLAUSE__SCANTEMP__ALLOC (c)) != alloc) { tree pointee_type = TREE_TYPE (TREE_TYPE (OMP_CLAUSE_DECL (c))); unsigned HOST_WIDE_INT al = TYPE_ALIGN_UNIT (pointee_type); if (tree_fits_uhwi_p (TYPE_SIZE_UNIT (pointee_type))) { unsigned HOST_WIDE_INT szl = tree_to_uhwi (TYPE_SIZE_UNIT (pointee_type)); szl = least_bit_hwi (szl); if (szl) al = MIN (al, szl); } if (ptr == NULL_TREE) { if (eltsz == NULL_TREE) eltsz = TYPE_SIZE_UNIT (pointee_type); else eltsz = size_binop (PLUS_EXPR, eltsz, TYPE_SIZE_UNIT (pointee_type)); } if (preval == 0 && al <= alloc_align) { unsigned HOST_WIDE_INT diff = ROUND_UP (sz, al) - sz; sz += diff; if (diff && ptr) ptr = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (ptr), ptr, size_int (diff)); } else if (al > preval) { if (ptr) { ptr = fold_convert (pointer_sized_int_node, ptr); ptr = fold_build2 (PLUS_EXPR, pointer_sized_int_node, ptr, build_int_cst (pointer_sized_int_node, al - 1)); ptr = fold_build2 (BIT_AND_EXPR, pointer_sized_int_node, ptr, build_int_cst (pointer_sized_int_node, -(HOST_WIDE_INT) al)); ptr = fold_convert (ptr_type_node, ptr); } else sz += al - 1; } if (tree_fits_uhwi_p (TYPE_SIZE_UNIT (pointee_type))) preval = al; else preval = 1; if (ptr) { expand_omp_build_assign (gsi, OMP_CLAUSE_DECL (c), ptr, false); ptr = OMP_CLAUSE_DECL (c); ptr = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (ptr), ptr, size_binop (MULT_EXPR, cnt, TYPE_SIZE_UNIT (pointee_type))); } } if (ptr == NULL_TREE) { eltsz = size_binop (MULT_EXPR, eltsz, cnt); if (sz) eltsz = size_binop (PLUS_EXPR, eltsz, size_int (sz)); return eltsz; } else return ptr; } /* A subroutine of expand_omp_for. Generate code for a parallel loop with static schedule and no specified chunk size. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2; if (cond is <) adj = STEP - 1; else adj = STEP + 1; if ((__typeof (V)) -1 > 0 && cond is >) n = -(adj + N2 - N1) / -STEP; else n = (adj + N2 - N1) / STEP; q = n / nthreads; tt = n % nthreads; if (threadid < tt) goto L3; else goto L4; L3: tt = 0; q = q + 1; L4: s0 = q * threadid + tt; e0 = s0 + q; V = s0 * STEP + N1; if (s0 >= e0) goto L2; else goto L0; L0: e = e0 * STEP + N1; L1: BODY; V += STEP; if (V cond e) goto L1; L2: */ static void expand_omp_for_static_nochunk (struct omp_region *region, struct omp_for_data *fd, gimple *inner_stmt) { tree n, q, s0, e0, e, t, tt, nthreads = NULL_TREE, threadid; tree type, itype, vmain, vback; basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb; basic_block body_bb, cont_bb, collapse_bb = NULL; basic_block fin_bb, fourth_bb = NULL, fifth_bb = NULL, sixth_bb = NULL; basic_block exit1_bb = NULL, exit2_bb = NULL, exit3_bb = NULL; gimple_stmt_iterator gsi, gsip; edge ep; bool broken_loop = region->cont == NULL; tree *counts = NULL; tree n1, n2, step; tree reductions = NULL_TREE; tree cond_var = NULL_TREE, condtemp = NULL_TREE; itype = type = TREE_TYPE (fd->loop.v); if (POINTER_TYPE_P (type)) itype = signed_type_for (type); entry_bb = region->entry; cont_bb = region->cont; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); fin_bb = BRANCH_EDGE (entry_bb)->dest; gcc_assert (broken_loop || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest)); seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb)); body_bb = single_succ (seq_start_bb); if (!broken_loop) { gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb || single_succ (BRANCH_EDGE (cont_bb)->dest) == body_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); } exit_bb = region->exit; /* Iteration space partitioning goes in ENTRY_BB. */ gsi = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); gsip = gsi; gsi_prev (&gsip); if (fd->collapse > 1) { int first_zero_iter = -1, dummy = -1; basic_block l2_dom_bb = NULL, dummy_bb = NULL; counts = XALLOCAVEC (tree, fd->collapse); expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, fin_bb, first_zero_iter, dummy_bb, dummy, l2_dom_bb); t = NULL_TREE; } else if (gimple_omp_for_combined_into_p (fd->for_stmt)) t = integer_one_node; else t = fold_binary (fd->loop.cond_code, boolean_type_node, fold_convert (type, fd->loop.n1), fold_convert (type, fd->loop.n2)); if (fd->collapse == 1 && TYPE_UNSIGNED (type) && (t == NULL_TREE || !integer_onep (t))) { n1 = fold_convert (type, unshare_expr (fd->loop.n1)); n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE, true, GSI_SAME_STMT); n2 = fold_convert (type, unshare_expr (fd->loop.n2)); n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE, true, GSI_SAME_STMT); gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2, NULL_TREE, NULL_TREE); gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT); if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL) || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL)) { gsi = gsi_for_stmt (cond_stmt); gimple_regimplify_operands (cond_stmt, &gsi); } ep = split_block (entry_bb, cond_stmt); ep->flags = EDGE_TRUE_VALUE; entry_bb = ep->dest; ep->probability = profile_probability::very_likely (); ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE); ep->probability = profile_probability::very_unlikely (); if (gimple_in_ssa_p (cfun)) { int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx; for (gphi_iterator gpi = gsi_start_phis (fin_bb); !gsi_end_p (gpi); gsi_next (&gpi)) { gphi *phi = gpi.phi (); add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx), ep, UNKNOWN_LOCATION); } } gsi = gsi_last_bb (entry_bb); } if (fd->lastprivate_conditional) { tree clauses = gimple_omp_for_clauses (fd->for_stmt); tree c = omp_find_clause (clauses, OMP_CLAUSE__CONDTEMP_); if (fd->have_pointer_condtemp) condtemp = OMP_CLAUSE_DECL (c); c = omp_find_clause (OMP_CLAUSE_CHAIN (c), OMP_CLAUSE__CONDTEMP_); cond_var = OMP_CLAUSE_DECL (c); } if (fd->have_reductemp /* For scan, we don't want to reinitialize condtemp before the second loop. */ || (fd->have_pointer_condtemp && !fd->have_scantemp) || fd->have_nonctrl_scantemp) { tree t1 = build_int_cst (long_integer_type_node, 0); tree t2 = build_int_cst (long_integer_type_node, 1); tree t3 = build_int_cstu (long_integer_type_node, (HOST_WIDE_INT_1U << 31) + 1); tree clauses = gimple_omp_for_clauses (fd->for_stmt); gimple_stmt_iterator gsi2 = gsi_none (); gimple *g = NULL; tree mem = null_pointer_node, memv = NULL_TREE; unsigned HOST_WIDE_INT condtemp_sz = 0; unsigned HOST_WIDE_INT alloc_align = 0; if (fd->have_reductemp) { gcc_assert (!fd->have_nonctrl_scantemp); tree c = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_); reductions = OMP_CLAUSE_DECL (c); gcc_assert (TREE_CODE (reductions) == SSA_NAME); g = SSA_NAME_DEF_STMT (reductions); reductions = gimple_assign_rhs1 (g); OMP_CLAUSE_DECL (c) = reductions; gsi2 = gsi_for_stmt (g); } else { if (gsi_end_p (gsip)) gsi2 = gsi_after_labels (region->entry); else gsi2 = gsip; reductions = null_pointer_node; } if (fd->have_pointer_condtemp || fd->have_nonctrl_scantemp) { tree type; if (fd->have_pointer_condtemp) type = TREE_TYPE (condtemp); else type = ptr_type_node; memv = create_tmp_var (type); TREE_ADDRESSABLE (memv) = 1; unsigned HOST_WIDE_INT sz = 0; tree size = NULL_TREE; if (fd->have_pointer_condtemp) { sz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (type))); sz *= fd->lastprivate_conditional; condtemp_sz = sz; } if (fd->have_nonctrl_scantemp) { nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS); gimple *g = gimple_build_call (nthreads, 0); nthreads = create_tmp_var (integer_type_node); gimple_call_set_lhs (g, nthreads); gsi_insert_before (&gsi2, g, GSI_SAME_STMT); nthreads = fold_convert (sizetype, nthreads); alloc_align = TYPE_ALIGN_UNIT (long_long_integer_type_node); size = expand_omp_scantemp_alloc (clauses, NULL_TREE, sz, alloc_align, nthreads, NULL, false); size = fold_convert (type, size); } else size = build_int_cst (type, sz); expand_omp_build_assign (&gsi2, memv, size, false); mem = build_fold_addr_expr (memv); } tree t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_LOOP_START), 9, t1, t2, t2, t3, t1, null_pointer_node, null_pointer_node, reductions, mem); force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE, true, GSI_SAME_STMT); if (fd->have_pointer_condtemp) expand_omp_build_assign (&gsi2, condtemp, memv, false); if (fd->have_nonctrl_scantemp) { tree ptr = fd->have_pointer_condtemp ? condtemp : memv; expand_omp_scantemp_alloc (clauses, ptr, condtemp_sz, alloc_align, nthreads, &gsi2, false); } if (fd->have_reductemp) { gsi_remove (&gsi2, true); release_ssa_name (gimple_assign_lhs (g)); } } switch (gimple_omp_for_kind (fd->for_stmt)) { case GF_OMP_FOR_KIND_FOR: nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS); threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM); break; case GF_OMP_FOR_KIND_DISTRIBUTE: nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS); threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM); break; default: gcc_unreachable (); } nthreads = build_call_expr (nthreads, 0); nthreads = fold_convert (itype, nthreads); nthreads = force_gimple_operand_gsi (&gsi, nthreads, true, NULL_TREE, true, GSI_SAME_STMT); threadid = build_call_expr (threadid, 0); threadid = fold_convert (itype, threadid); threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE, true, GSI_SAME_STMT); n1 = fd->loop.n1; n2 = fd->loop.n2; step = fd->loop.step; if (gimple_omp_for_combined_into_p (fd->for_stmt)) { tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n1 = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n2 = OMP_CLAUSE_DECL (innerc); } n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1), true, NULL_TREE, true, GSI_SAME_STMT); n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2), true, NULL_TREE, true, GSI_SAME_STMT); step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step), true, NULL_TREE, true, GSI_SAME_STMT); t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, step, t); t = fold_build2 (PLUS_EXPR, itype, t, n2); t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1)); if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, step)); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step); t = fold_convert (itype, t); n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); q = create_tmp_reg (itype, "q"); t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT); gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT); tt = create_tmp_reg (itype, "tt"); t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT); gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT); t = build2 (LT_EXPR, boolean_type_node, threadid, tt); gcond *cond_stmt = gimple_build_cond_empty (t); gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT); second_bb = split_block (entry_bb, cond_stmt)->dest; gsi = gsi_last_nondebug_bb (second_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)), GSI_SAME_STMT); gassign *assign_stmt = gimple_build_assign (q, PLUS_EXPR, q, build_int_cst (itype, 1)); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); third_bb = split_block (second_bb, assign_stmt)->dest; gsi = gsi_last_nondebug_bb (third_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); if (fd->have_nonctrl_scantemp) { tree clauses = gimple_omp_for_clauses (fd->for_stmt); tree controlp = NULL_TREE, controlb = NULL_TREE; for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE__SCANTEMP_ && OMP_CLAUSE__SCANTEMP__CONTROL (c)) { if (TREE_TYPE (OMP_CLAUSE_DECL (c)) == boolean_type_node) controlb = OMP_CLAUSE_DECL (c); else controlp = OMP_CLAUSE_DECL (c); if (controlb && controlp) break; } gcc_assert (controlp && controlb); tree cnt = create_tmp_var (sizetype); gimple *g = gimple_build_assign (cnt, NOP_EXPR, q); gsi_insert_before (&gsi, g, GSI_SAME_STMT); unsigned HOST_WIDE_INT alloc_align = TYPE_ALIGN_UNIT (ptr_type_node); tree sz = expand_omp_scantemp_alloc (clauses, NULL_TREE, 0, alloc_align, cnt, NULL, true); tree size = create_tmp_var (sizetype); expand_omp_build_assign (&gsi, size, sz, false); tree cmp = fold_build2 (GT_EXPR, boolean_type_node, size, size_int (16384)); expand_omp_build_assign (&gsi, controlb, cmp); g = gimple_build_cond (NE_EXPR, controlb, boolean_false_node, NULL_TREE, NULL_TREE); gsi_insert_before (&gsi, g, GSI_SAME_STMT); fourth_bb = split_block (third_bb, g)->dest; gsi = gsi_last_nondebug_bb (fourth_bb); /* FIXME: Once we have allocators, this should use allocator. */ g = gimple_build_call (builtin_decl_explicit (BUILT_IN_MALLOC), 1, size); gimple_call_set_lhs (g, controlp); gsi_insert_before (&gsi, g, GSI_SAME_STMT); expand_omp_scantemp_alloc (clauses, controlp, 0, alloc_align, cnt, &gsi, true); gsi_prev (&gsi); g = gsi_stmt (gsi); fifth_bb = split_block (fourth_bb, g)->dest; gsi = gsi_last_nondebug_bb (fifth_bb); g = gimple_build_call (builtin_decl_implicit (BUILT_IN_STACK_SAVE), 0); gimple_call_set_lhs (g, controlp); gsi_insert_before (&gsi, g, GSI_SAME_STMT); tree alloca_decl = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN); for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE__SCANTEMP_ && OMP_CLAUSE__SCANTEMP__ALLOC (c)) { tree tmp = create_tmp_var (sizetype); tree pointee_type = TREE_TYPE (TREE_TYPE (OMP_CLAUSE_DECL (c))); g = gimple_build_assign (tmp, MULT_EXPR, cnt, TYPE_SIZE_UNIT (pointee_type)); gsi_insert_before (&gsi, g, GSI_SAME_STMT); g = gimple_build_call (alloca_decl, 2, tmp, size_int (TYPE_ALIGN (pointee_type))); gimple_call_set_lhs (g, OMP_CLAUSE_DECL (c)); gsi_insert_before (&gsi, g, GSI_SAME_STMT); } sixth_bb = split_block (fifth_bb, g)->dest; gsi = gsi_last_nondebug_bb (sixth_bb); } t = build2 (MULT_EXPR, itype, q, threadid); t = build2 (PLUS_EXPR, itype, t, tt); s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_build2 (PLUS_EXPR, itype, s0, q); e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = build2 (GE_EXPR, boolean_type_node, s0, e0); gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); /* Remove the GIMPLE_OMP_FOR statement. */ gsi_remove (&gsi, true); /* Setup code for sequential iteration goes in SEQ_START_BB. */ gsi = gsi_start_bb (seq_start_bb); tree startvar = fd->loop.v; tree endvar = NULL_TREE; if (gimple_omp_for_combined_p (fd->for_stmt)) { tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL ? gimple_omp_parallel_clauses (inner_stmt) : gimple_omp_for_clauses (inner_stmt); tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); startvar = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); endvar = OMP_CLAUSE_DECL (innerc); if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST && gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE) { int i; for (i = 1; i < fd->collapse; i++) { innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); } innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); if (innerc) { /* If needed (distribute parallel for with lastprivate), propagate down the total number of iterations. */ tree t = fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (innerc)), fd->loop.n2); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } } } t = fold_convert (itype, s0); t = fold_build2 (MULT_EXPR, itype, t, step); if (POINTER_TYPE_P (type)) { t = fold_build_pointer_plus (n1, t); if (!POINTER_TYPE_P (TREE_TYPE (startvar)) && TYPE_PRECISION (TREE_TYPE (startvar)) > TYPE_PRECISION (type)) t = fold_convert (signed_type_for (type), t); } else t = fold_build2 (PLUS_EXPR, type, t, n1); t = fold_convert (TREE_TYPE (startvar), t); t = force_gimple_operand_gsi (&gsi, t, DECL_P (startvar) && TREE_ADDRESSABLE (startvar), NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (startvar, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (cond_var) { tree itype = TREE_TYPE (cond_var); /* For lastprivate(conditional:) itervar, we need some iteration counter that starts at unsigned non-zero and increases. Prefer as few IVs as possible, so if we can use startvar itself, use that, or startvar + constant (those would be incremented with step), and as last resort use the s0 + 1 incremented by 1. */ if (POINTER_TYPE_P (type) || TREE_CODE (n1) != INTEGER_CST || fd->loop.cond_code != LT_EXPR) t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, s0), build_int_cst (itype, 1)); else if (tree_int_cst_sgn (n1) == 1) t = fold_convert (itype, t); else { tree c = fold_convert (itype, n1); c = fold_build2 (MINUS_EXPR, itype, build_int_cst (itype, 1), c); t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, t), c); } t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (cond_var, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } t = fold_convert (itype, e0); t = fold_build2 (MULT_EXPR, itype, t, step); if (POINTER_TYPE_P (type)) { t = fold_build_pointer_plus (n1, t); if (!POINTER_TYPE_P (TREE_TYPE (startvar)) && TYPE_PRECISION (TREE_TYPE (startvar)) > TYPE_PRECISION (type)) t = fold_convert (signed_type_for (type), t); } else t = fold_build2 (PLUS_EXPR, type, t, n1); t = fold_convert (TREE_TYPE (startvar), t); e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (endvar) { assign_stmt = gimple_build_assign (endvar, e); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e))) assign_stmt = gimple_build_assign (fd->loop.v, e); else assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } /* Handle linear clause adjustments. */ tree itercnt = NULL_TREE; if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR) for (tree c = gimple_omp_for_clauses (fd->for_stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && !OMP_CLAUSE_LINEAR_NO_COPYIN (c)) { tree d = OMP_CLAUSE_DECL (c); bool is_ref = omp_is_reference (d); tree t = d, a, dest; if (is_ref) t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t); if (itercnt == NULL_TREE) { if (gimple_omp_for_combined_into_p (fd->for_stmt)) { itercnt = fold_build2 (MINUS_EXPR, itype, fold_convert (itype, n1), fold_convert (itype, fd->loop.n1)); itercnt = fold_build2 (EXACT_DIV_EXPR, itype, itercnt, step); itercnt = fold_build2 (PLUS_EXPR, itype, itercnt, s0); itercnt = force_gimple_operand_gsi (&gsi, itercnt, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } else itercnt = s0; } tree type = TREE_TYPE (t); if (POINTER_TYPE_P (type)) type = sizetype; a = fold_build2 (MULT_EXPR, type, fold_convert (type, itercnt), fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c))); dest = unshare_expr (t); t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR : POINTER_PLUS_EXPR, TREE_TYPE (t), t, a); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (dest, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } if (fd->collapse > 1) expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar); if (!broken_loop) { /* The code controlling the sequential loop replaces the GIMPLE_OMP_CONTINUE. */ gsi = gsi_last_nondebug_bb (cont_bb); gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi)); gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE); vmain = gimple_omp_continue_control_use (cont_stmt); vback = gimple_omp_continue_control_def (cont_stmt); if (cond_var) { tree itype = TREE_TYPE (cond_var); tree t2; if (POINTER_TYPE_P (type) || TREE_CODE (n1) != INTEGER_CST || fd->loop.cond_code != LT_EXPR) t2 = build_int_cst (itype, 1); else t2 = fold_convert (itype, step); t2 = fold_build2 (PLUS_EXPR, itype, cond_var, t2); t2 = force_gimple_operand_gsi (&gsi, t2, false, NULL_TREE, true, GSI_SAME_STMT); assign_stmt = gimple_build_assign (cond_var, t2); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); } if (!gimple_omp_for_combined_p (fd->for_stmt)) { if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (vmain, step); else t = fold_build2 (PLUS_EXPR, type, vmain, step); t = force_gimple_operand_gsi (&gsi, t, DECL_P (vback) && TREE_ADDRESSABLE (vback), NULL_TREE, true, GSI_SAME_STMT); assign_stmt = gimple_build_assign (vback, t); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); t = build2 (fd->loop.cond_code, boolean_type_node, DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, e); gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); } /* Remove the GIMPLE_OMP_CONTINUE statement. */ gsi_remove (&gsi, true); if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt)) collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb); } /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */ gsi = gsi_last_nondebug_bb (exit_bb); if (!gimple_omp_return_nowait_p (gsi_stmt (gsi))) { t = gimple_omp_return_lhs (gsi_stmt (gsi)); if (fd->have_reductemp || ((fd->have_pointer_condtemp || fd->have_scantemp) && !fd->have_nonctrl_scantemp)) { tree fn; if (t) fn = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL); else fn = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END); gcall *g = gimple_build_call (fn, 0); if (t) { gimple_call_set_lhs (g, t); if (fd->have_reductemp) gsi_insert_after (&gsi, gimple_build_assign (reductions, NOP_EXPR, t), GSI_SAME_STMT); } gsi_insert_after (&gsi, g, GSI_SAME_STMT); } else gsi_insert_after (&gsi, omp_build_barrier (t), GSI_SAME_STMT); } else if ((fd->have_pointer_condtemp || fd->have_scantemp) && !fd->have_nonctrl_scantemp) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT); gcall *g = gimple_build_call (fn, 0); gsi_insert_after (&gsi, g, GSI_SAME_STMT); } if (fd->have_scantemp && !fd->have_nonctrl_scantemp) { tree clauses = gimple_omp_for_clauses (fd->for_stmt); tree controlp = NULL_TREE, controlb = NULL_TREE; for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE__SCANTEMP_ && OMP_CLAUSE__SCANTEMP__CONTROL (c)) { if (TREE_TYPE (OMP_CLAUSE_DECL (c)) == boolean_type_node) controlb = OMP_CLAUSE_DECL (c); else controlp = OMP_CLAUSE_DECL (c); if (controlb && controlp) break; } gcc_assert (controlp && controlb); gimple *g = gimple_build_cond (NE_EXPR, controlb, boolean_false_node, NULL_TREE, NULL_TREE); gsi_insert_before (&gsi, g, GSI_SAME_STMT); exit1_bb = split_block (exit_bb, g)->dest; gsi = gsi_after_labels (exit1_bb); g = gimple_build_call (builtin_decl_explicit (BUILT_IN_FREE), 1, controlp); gsi_insert_before (&gsi, g, GSI_SAME_STMT); exit2_bb = split_block (exit1_bb, g)->dest; gsi = gsi_after_labels (exit2_bb); g = gimple_build_call (builtin_decl_implicit (BUILT_IN_STACK_RESTORE), 1, controlp); gsi_insert_before (&gsi, g, GSI_SAME_STMT); exit3_bb = split_block (exit2_bb, g)->dest; gsi = gsi_after_labels (exit3_bb); } gsi_remove (&gsi, true); /* Connect all the blocks. */ ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE); ep->probability = profile_probability::guessed_always ().apply_scale (3, 4); ep = find_edge (entry_bb, second_bb); ep->flags = EDGE_TRUE_VALUE; ep->probability = profile_probability::guessed_always ().apply_scale (1, 4); if (fourth_bb) { ep = make_edge (third_bb, fifth_bb, EDGE_FALSE_VALUE); ep->probability = profile_probability::guessed_always ().apply_scale (1, 2); ep = find_edge (third_bb, fourth_bb); ep->flags = EDGE_TRUE_VALUE; ep->probability = profile_probability::guessed_always ().apply_scale (1, 2); ep = find_edge (fourth_bb, fifth_bb); redirect_edge_and_branch (ep, sixth_bb); } else sixth_bb = third_bb; find_edge (sixth_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE; find_edge (sixth_bb, fin_bb)->flags = EDGE_TRUE_VALUE; if (exit1_bb) { ep = make_edge (exit_bb, exit2_bb, EDGE_FALSE_VALUE); ep->probability = profile_probability::guessed_always ().apply_scale (1, 2); ep = find_edge (exit_bb, exit1_bb); ep->flags = EDGE_TRUE_VALUE; ep->probability = profile_probability::guessed_always ().apply_scale (1, 2); ep = find_edge (exit1_bb, exit2_bb); redirect_edge_and_branch (ep, exit3_bb); } if (!broken_loop) { ep = find_edge (cont_bb, body_bb); if (ep == NULL) { ep = BRANCH_EDGE (cont_bb); gcc_assert (single_succ (ep->dest) == body_bb); } if (gimple_omp_for_combined_p (fd->for_stmt)) { remove_edge (ep); ep = NULL; } else if (fd->collapse > 1) { remove_edge (ep); ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE); } else ep->flags = EDGE_TRUE_VALUE; find_edge (cont_bb, fin_bb)->flags = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU; } set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb); set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb); if (fourth_bb) { set_immediate_dominator (CDI_DOMINATORS, fifth_bb, third_bb); set_immediate_dominator (CDI_DOMINATORS, sixth_bb, third_bb); } set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, sixth_bb); set_immediate_dominator (CDI_DOMINATORS, body_bb, recompute_dominator (CDI_DOMINATORS, body_bb)); set_immediate_dominator (CDI_DOMINATORS, fin_bb, recompute_dominator (CDI_DOMINATORS, fin_bb)); if (exit1_bb) { set_immediate_dominator (CDI_DOMINATORS, exit2_bb, exit_bb); set_immediate_dominator (CDI_DOMINATORS, exit3_bb, exit_bb); } class loop *loop = body_bb->loop_father; if (loop != entry_bb->loop_father) { gcc_assert (broken_loop || loop->header == body_bb); gcc_assert (broken_loop || loop->latch == region->cont || single_pred (loop->latch) == region->cont); return; } if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt)) { loop = alloc_loop (); loop->header = body_bb; if (collapse_bb == NULL) loop->latch = cont_bb; add_loop (loop, body_bb->loop_father); } } /* Return phi in E->DEST with ARG on edge E. */ static gphi * find_phi_with_arg_on_edge (tree arg, edge e) { basic_block bb = e->dest; for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi); gsi_next (&gpi)) { gphi *phi = gpi.phi (); if (PHI_ARG_DEF_FROM_EDGE (phi, e) == arg) return phi; } return NULL; } /* A subroutine of expand_omp_for. Generate code for a parallel loop with static schedule and a specified chunk size. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2; if (cond is <) adj = STEP - 1; else adj = STEP + 1; if ((__typeof (V)) -1 > 0 && cond is >) n = -(adj + N2 - N1) / -STEP; else n = (adj + N2 - N1) / STEP; trip = 0; V = threadid * CHUNK * STEP + N1; -- this extra definition of V is here so that V is defined if the loop is not entered L0: s0 = (trip * nthreads + threadid) * CHUNK; e0 = min (s0 + CHUNK, n); if (s0 < n) goto L1; else goto L4; L1: V = s0 * STEP + N1; e = e0 * STEP + N1; L2: BODY; V += STEP; if (V cond e) goto L2; else goto L3; L3: trip += 1; goto L0; L4: */ static void expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd, gimple *inner_stmt) { tree n, s0, e0, e, t; tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid; tree type, itype, vmain, vback, vextra; basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb; basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb; gimple_stmt_iterator gsi, gsip; edge se; bool broken_loop = region->cont == NULL; tree *counts = NULL; tree n1, n2, step; tree reductions = NULL_TREE; tree cond_var = NULL_TREE, condtemp = NULL_TREE; itype = type = TREE_TYPE (fd->loop.v); if (POINTER_TYPE_P (type)) itype = signed_type_for (type); entry_bb = region->entry; se = split_block (entry_bb, last_stmt (entry_bb)); entry_bb = se->src; iter_part_bb = se->dest; cont_bb = region->cont; gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2); fin_bb = BRANCH_EDGE (iter_part_bb)->dest; gcc_assert (broken_loop || fin_bb == FALLTHRU_EDGE (cont_bb)->dest); seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb)); body_bb = single_succ (seq_start_bb); if (!broken_loop) { gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb || single_succ (BRANCH_EDGE (cont_bb)->dest) == body_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb)); } exit_bb = region->exit; /* Trip and adjustment setup goes in ENTRY_BB. */ gsi = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); gsip = gsi; gsi_prev (&gsip); if (fd->collapse > 1) { int first_zero_iter = -1, dummy = -1; basic_block l2_dom_bb = NULL, dummy_bb = NULL; counts = XALLOCAVEC (tree, fd->collapse); expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, fin_bb, first_zero_iter, dummy_bb, dummy, l2_dom_bb); t = NULL_TREE; } else if (gimple_omp_for_combined_into_p (fd->for_stmt)) t = integer_one_node; else t = fold_binary (fd->loop.cond_code, boolean_type_node, fold_convert (type, fd->loop.n1), fold_convert (type, fd->loop.n2)); if (fd->collapse == 1 && TYPE_UNSIGNED (type) && (t == NULL_TREE || !integer_onep (t))) { n1 = fold_convert (type, unshare_expr (fd->loop.n1)); n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE, true, GSI_SAME_STMT); n2 = fold_convert (type, unshare_expr (fd->loop.n2)); n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE, true, GSI_SAME_STMT); gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2, NULL_TREE, NULL_TREE); gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT); if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL) || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL)) { gsi = gsi_for_stmt (cond_stmt); gimple_regimplify_operands (cond_stmt, &gsi); } se = split_block (entry_bb, cond_stmt); se->flags = EDGE_TRUE_VALUE; entry_bb = se->dest; se->probability = profile_probability::very_likely (); se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE); se->probability = profile_probability::very_unlikely (); if (gimple_in_ssa_p (cfun)) { int dest_idx = find_edge (iter_part_bb, fin_bb)->dest_idx; for (gphi_iterator gpi = gsi_start_phis (fin_bb); !gsi_end_p (gpi); gsi_next (&gpi)) { gphi *phi = gpi.phi (); add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx), se, UNKNOWN_LOCATION); } } gsi = gsi_last_bb (entry_bb); } if (fd->lastprivate_conditional) { tree clauses = gimple_omp_for_clauses (fd->for_stmt); tree c = omp_find_clause (clauses, OMP_CLAUSE__CONDTEMP_); if (fd->have_pointer_condtemp) condtemp = OMP_CLAUSE_DECL (c); c = omp_find_clause (OMP_CLAUSE_CHAIN (c), OMP_CLAUSE__CONDTEMP_); cond_var = OMP_CLAUSE_DECL (c); } if (fd->have_reductemp || fd->have_pointer_condtemp) { tree t1 = build_int_cst (long_integer_type_node, 0); tree t2 = build_int_cst (long_integer_type_node, 1); tree t3 = build_int_cstu (long_integer_type_node, (HOST_WIDE_INT_1U << 31) + 1); tree clauses = gimple_omp_for_clauses (fd->for_stmt); gimple_stmt_iterator gsi2 = gsi_none (); gimple *g = NULL; tree mem = null_pointer_node, memv = NULL_TREE; if (fd->have_reductemp) { tree c = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_); reductions = OMP_CLAUSE_DECL (c); gcc_assert (TREE_CODE (reductions) == SSA_NAME); g = SSA_NAME_DEF_STMT (reductions); reductions = gimple_assign_rhs1 (g); OMP_CLAUSE_DECL (c) = reductions; gsi2 = gsi_for_stmt (g); } else { if (gsi_end_p (gsip)) gsi2 = gsi_after_labels (region->entry); else gsi2 = gsip; reductions = null_pointer_node; } if (fd->have_pointer_condtemp) { tree type = TREE_TYPE (condtemp); memv = create_tmp_var (type); TREE_ADDRESSABLE (memv) = 1; unsigned HOST_WIDE_INT sz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (type))); sz *= fd->lastprivate_conditional; expand_omp_build_assign (&gsi2, memv, build_int_cst (type, sz), false); mem = build_fold_addr_expr (memv); } tree t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_LOOP_START), 9, t1, t2, t2, t3, t1, null_pointer_node, null_pointer_node, reductions, mem); force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE, true, GSI_SAME_STMT); if (fd->have_pointer_condtemp) expand_omp_build_assign (&gsi2, condtemp, memv, false); if (fd->have_reductemp) { gsi_remove (&gsi2, true); release_ssa_name (gimple_assign_lhs (g)); } } switch (gimple_omp_for_kind (fd->for_stmt)) { case GF_OMP_FOR_KIND_FOR: nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS); threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM); break; case GF_OMP_FOR_KIND_DISTRIBUTE: nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS); threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM); break; default: gcc_unreachable (); } nthreads = build_call_expr (nthreads, 0); nthreads = fold_convert (itype, nthreads); nthreads = force_gimple_operand_gsi (&gsi, nthreads, true, NULL_TREE, true, GSI_SAME_STMT); threadid = build_call_expr (threadid, 0); threadid = fold_convert (itype, threadid); threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE, true, GSI_SAME_STMT); n1 = fd->loop.n1; n2 = fd->loop.n2; step = fd->loop.step; if (gimple_omp_for_combined_into_p (fd->for_stmt)) { tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n1 = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n2 = OMP_CLAUSE_DECL (innerc); } n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1), true, NULL_TREE, true, GSI_SAME_STMT); n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2), true, NULL_TREE, true, GSI_SAME_STMT); step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step), true, NULL_TREE, true, GSI_SAME_STMT); tree chunk_size = fold_convert (itype, fd->chunk_size); chunk_size = omp_adjust_chunk_size (chunk_size, fd->simd_schedule); chunk_size = force_gimple_operand_gsi (&gsi, chunk_size, true, NULL_TREE, true, GSI_SAME_STMT); t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, step, t); t = fold_build2 (PLUS_EXPR, itype, t, n2); t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1)); if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, step)); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step); t = fold_convert (itype, t); n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); trip_var = create_tmp_reg (itype, ".trip"); if (gimple_in_ssa_p (cfun)) { trip_init = make_ssa_name (trip_var); trip_main = make_ssa_name (trip_var); trip_back = make_ssa_name (trip_var); } else { trip_init = trip_var; trip_main = trip_var; trip_back = trip_var; } gassign *assign_stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0)); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); t = fold_build2 (MULT_EXPR, itype, threadid, chunk_size); t = fold_build2 (MULT_EXPR, itype, t, step); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (n1, t); else t = fold_build2 (PLUS_EXPR, type, t, n1); vextra = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); /* Remove the GIMPLE_OMP_FOR. */ gsi_remove (&gsi, true); gimple_stmt_iterator gsif = gsi; /* Iteration space partitioning goes in ITER_PART_BB. */ gsi = gsi_last_bb (iter_part_bb); t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads); t = fold_build2 (PLUS_EXPR, itype, t, threadid); t = fold_build2 (MULT_EXPR, itype, t, chunk_size); s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = fold_build2 (PLUS_EXPR, itype, s0, chunk_size); t = fold_build2 (MIN_EXPR, itype, t, n); e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = build2 (LT_EXPR, boolean_type_node, s0, n); gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING); /* Setup code for sequential iteration goes in SEQ_START_BB. */ gsi = gsi_start_bb (seq_start_bb); tree startvar = fd->loop.v; tree endvar = NULL_TREE; if (gimple_omp_for_combined_p (fd->for_stmt)) { tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL ? gimple_omp_parallel_clauses (inner_stmt) : gimple_omp_for_clauses (inner_stmt); tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); startvar = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); endvar = OMP_CLAUSE_DECL (innerc); if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST && gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE) { int i; for (i = 1; i < fd->collapse; i++) { innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); } innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); if (innerc) { /* If needed (distribute parallel for with lastprivate), propagate down the total number of iterations. */ tree t = fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (innerc)), fd->loop.n2); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } } } t = fold_convert (itype, s0); t = fold_build2 (MULT_EXPR, itype, t, step); if (POINTER_TYPE_P (type)) { t = fold_build_pointer_plus (n1, t); if (!POINTER_TYPE_P (TREE_TYPE (startvar)) && TYPE_PRECISION (TREE_TYPE (startvar)) > TYPE_PRECISION (type)) t = fold_convert (signed_type_for (type), t); } else t = fold_build2 (PLUS_EXPR, type, t, n1); t = fold_convert (TREE_TYPE (startvar), t); t = force_gimple_operand_gsi (&gsi, t, DECL_P (startvar) && TREE_ADDRESSABLE (startvar), NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (startvar, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (cond_var) { tree itype = TREE_TYPE (cond_var); /* For lastprivate(conditional:) itervar, we need some iteration counter that starts at unsigned non-zero and increases. Prefer as few IVs as possible, so if we can use startvar itself, use that, or startvar + constant (those would be incremented with step), and as last resort use the s0 + 1 incremented by 1. */ if (POINTER_TYPE_P (type) || TREE_CODE (n1) != INTEGER_CST || fd->loop.cond_code != LT_EXPR) t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, s0), build_int_cst (itype, 1)); else if (tree_int_cst_sgn (n1) == 1) t = fold_convert (itype, t); else { tree c = fold_convert (itype, n1); c = fold_build2 (MINUS_EXPR, itype, build_int_cst (itype, 1), c); t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, t), c); } t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (cond_var, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } t = fold_convert (itype, e0); t = fold_build2 (MULT_EXPR, itype, t, step); if (POINTER_TYPE_P (type)) { t = fold_build_pointer_plus (n1, t); if (!POINTER_TYPE_P (TREE_TYPE (startvar)) && TYPE_PRECISION (TREE_TYPE (startvar)) > TYPE_PRECISION (type)) t = fold_convert (signed_type_for (type), t); } else t = fold_build2 (PLUS_EXPR, type, t, n1); t = fold_convert (TREE_TYPE (startvar), t); e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (endvar) { assign_stmt = gimple_build_assign (endvar, e); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e))) assign_stmt = gimple_build_assign (fd->loop.v, e); else assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } /* Handle linear clause adjustments. */ tree itercnt = NULL_TREE, itercntbias = NULL_TREE; if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR) for (tree c = gimple_omp_for_clauses (fd->for_stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && !OMP_CLAUSE_LINEAR_NO_COPYIN (c)) { tree d = OMP_CLAUSE_DECL (c); bool is_ref = omp_is_reference (d); tree t = d, a, dest; if (is_ref) t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t); tree type = TREE_TYPE (t); if (POINTER_TYPE_P (type)) type = sizetype; dest = unshare_expr (t); tree v = create_tmp_var (TREE_TYPE (t), NULL); expand_omp_build_assign (&gsif, v, t); if (itercnt == NULL_TREE) { if (gimple_omp_for_combined_into_p (fd->for_stmt)) { itercntbias = fold_build2 (MINUS_EXPR, itype, fold_convert (itype, n1), fold_convert (itype, fd->loop.n1)); itercntbias = fold_build2 (EXACT_DIV_EXPR, itype, itercntbias, step); itercntbias = force_gimple_operand_gsi (&gsif, itercntbias, true, NULL_TREE, true, GSI_SAME_STMT); itercnt = fold_build2 (PLUS_EXPR, itype, itercntbias, s0); itercnt = force_gimple_operand_gsi (&gsi, itercnt, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } else itercnt = s0; } a = fold_build2 (MULT_EXPR, type, fold_convert (type, itercnt), fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c))); t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR : POINTER_PLUS_EXPR, TREE_TYPE (t), v, a); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (dest, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } if (fd->collapse > 1) expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar); if (!broken_loop) { /* The code controlling the sequential loop goes in CONT_BB, replacing the GIMPLE_OMP_CONTINUE. */ gsi = gsi_last_nondebug_bb (cont_bb); gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi)); vmain = gimple_omp_continue_control_use (cont_stmt); vback = gimple_omp_continue_control_def (cont_stmt); if (cond_var) { tree itype = TREE_TYPE (cond_var); tree t2; if (POINTER_TYPE_P (type) || TREE_CODE (n1) != INTEGER_CST || fd->loop.cond_code != LT_EXPR) t2 = build_int_cst (itype, 1); else t2 = fold_convert (itype, step); t2 = fold_build2 (PLUS_EXPR, itype, cond_var, t2); t2 = force_gimple_operand_gsi (&gsi, t2, false, NULL_TREE, true, GSI_SAME_STMT); assign_stmt = gimple_build_assign (cond_var, t2); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); } if (!gimple_omp_for_combined_p (fd->for_stmt)) { if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (vmain, step); else t = fold_build2 (PLUS_EXPR, type, vmain, step); if (DECL_P (vback) && TREE_ADDRESSABLE (vback)) t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); assign_stmt = gimple_build_assign (vback, t); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); if (tree_int_cst_equal (fd->chunk_size, integer_one_node)) t = build2 (EQ_EXPR, boolean_type_node, build_int_cst (itype, 0), build_int_cst (itype, 1)); else t = build2 (fd->loop.cond_code, boolean_type_node, DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, e); gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); } /* Remove GIMPLE_OMP_CONTINUE. */ gsi_remove (&gsi, true); if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt)) collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb); /* Trip update code goes into TRIP_UPDATE_BB. */ gsi = gsi_start_bb (trip_update_bb); t = build_int_cst (itype, 1); t = build2 (PLUS_EXPR, itype, trip_main, t); assign_stmt = gimple_build_assign (trip_back, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */ gsi = gsi_last_nondebug_bb (exit_bb); if (!gimple_omp_return_nowait_p (gsi_stmt (gsi))) { t = gimple_omp_return_lhs (gsi_stmt (gsi)); if (fd->have_reductemp || fd->have_pointer_condtemp) { tree fn; if (t) fn = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL); else fn = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END); gcall *g = gimple_build_call (fn, 0); if (t) { gimple_call_set_lhs (g, t); if (fd->have_reductemp) gsi_insert_after (&gsi, gimple_build_assign (reductions, NOP_EXPR, t), GSI_SAME_STMT); } gsi_insert_after (&gsi, g, GSI_SAME_STMT); } else gsi_insert_after (&gsi, omp_build_barrier (t), GSI_SAME_STMT); } else if (fd->have_pointer_condtemp) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT); gcall *g = gimple_build_call (fn, 0); gsi_insert_after (&gsi, g, GSI_SAME_STMT); } gsi_remove (&gsi, true); /* Connect the new blocks. */ find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE; find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE; if (!broken_loop) { se = find_edge (cont_bb, body_bb); if (se == NULL) { se = BRANCH_EDGE (cont_bb); gcc_assert (single_succ (se->dest) == body_bb); } if (gimple_omp_for_combined_p (fd->for_stmt)) { remove_edge (se); se = NULL; } else if (fd->collapse > 1) { remove_edge (se); se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE); } else se->flags = EDGE_TRUE_VALUE; find_edge (cont_bb, trip_update_bb)->flags = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU; redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb); } if (gimple_in_ssa_p (cfun)) { gphi_iterator psi; gphi *phi; edge re, ene; edge_var_map *vm; size_t i; gcc_assert (fd->collapse == 1 && !broken_loop); /* When we redirect the edge from trip_update_bb to iter_part_bb, we remove arguments of the phi nodes in fin_bb. We need to create appropriate phi nodes in iter_part_bb instead. */ se = find_edge (iter_part_bb, fin_bb); re = single_succ_edge (trip_update_bb); vec<edge_var_map> *head = redirect_edge_var_map_vector (re); ene = single_succ_edge (entry_bb); psi = gsi_start_phis (fin_bb); for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm); gsi_next (&psi), ++i) { gphi *nphi; location_t locus; phi = psi.phi (); if (operand_equal_p (gimple_phi_arg_def (phi, 0), redirect_edge_var_map_def (vm), 0)) continue; t = gimple_phi_result (phi); gcc_assert (t == redirect_edge_var_map_result (vm)); if (!single_pred_p (fin_bb)) t = copy_ssa_name (t, phi); nphi = create_phi_node (t, iter_part_bb); t = PHI_ARG_DEF_FROM_EDGE (phi, se); locus = gimple_phi_arg_location_from_edge (phi, se); /* A special case -- fd->loop.v is not yet computed in iter_part_bb, we need to use vextra instead. */ if (t == fd->loop.v) t = vextra; add_phi_arg (nphi, t, ene, locus); locus = redirect_edge_var_map_location (vm); tree back_arg = redirect_edge_var_map_def (vm); add_phi_arg (nphi, back_arg, re, locus); edge ce = find_edge (cont_bb, body_bb); if (ce == NULL) { ce = BRANCH_EDGE (cont_bb); gcc_assert (single_succ (ce->dest) == body_bb); ce = single_succ_edge (ce->dest); } gphi *inner_loop_phi = find_phi_with_arg_on_edge (back_arg, ce); gcc_assert (inner_loop_phi != NULL); add_phi_arg (inner_loop_phi, gimple_phi_result (nphi), find_edge (seq_start_bb, body_bb), locus); if (!single_pred_p (fin_bb)) add_phi_arg (phi, gimple_phi_result (nphi), se, locus); } gcc_assert (gsi_end_p (psi) && (head == NULL || i == head->length ())); redirect_edge_var_map_clear (re); if (single_pred_p (fin_bb)) while (1) { psi = gsi_start_phis (fin_bb); if (gsi_end_p (psi)) break; remove_phi_node (&psi, false); } /* Make phi node for trip. */ phi = create_phi_node (trip_main, iter_part_bb); add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb), UNKNOWN_LOCATION); add_phi_arg (phi, trip_init, single_succ_edge (entry_bb), UNKNOWN_LOCATION); } if (!broken_loop) set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb); set_immediate_dominator (CDI_DOMINATORS, iter_part_bb, recompute_dominator (CDI_DOMINATORS, iter_part_bb)); set_immediate_dominator (CDI_DOMINATORS, fin_bb, recompute_dominator (CDI_DOMINATORS, fin_bb)); set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, recompute_dominator (CDI_DOMINATORS, seq_start_bb)); set_immediate_dominator (CDI_DOMINATORS, body_bb, recompute_dominator (CDI_DOMINATORS, body_bb)); if (!broken_loop) { class loop *loop = body_bb->loop_father; class loop *trip_loop = alloc_loop (); trip_loop->header = iter_part_bb; trip_loop->latch = trip_update_bb; add_loop (trip_loop, iter_part_bb->loop_father); if (loop != entry_bb->loop_father) { gcc_assert (loop->header == body_bb); gcc_assert (loop->latch == region->cont || single_pred (loop->latch) == region->cont); trip_loop->inner = loop; return; } if (!gimple_omp_for_combined_p (fd->for_stmt)) { loop = alloc_loop (); loop->header = body_bb; if (collapse_bb == NULL) loop->latch = cont_bb; add_loop (loop, trip_loop); } } } /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing loop. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode V = N1; goto L1; L0: BODY; V += STEP; L1: if (V cond N2) goto L0; else goto L2; L2: For collapsed loops, given parameters: collapse(3) for (V1 = N11; V1 cond1 N12; V1 += STEP1) for (V2 = N21; V2 cond2 N22; V2 += STEP2) for (V3 = N31; V3 cond3 N32; V3 += STEP3) BODY; we generate pseudocode if (cond3 is <) adj = STEP3 - 1; else adj = STEP3 + 1; count3 = (adj + N32 - N31) / STEP3; if (cond2 is <) adj = STEP2 - 1; else adj = STEP2 + 1; count2 = (adj + N22 - N21) / STEP2; if (cond1 is <) adj = STEP1 - 1; else adj = STEP1 + 1; count1 = (adj + N12 - N11) / STEP1; count = count1 * count2 * count3; V = 0; V1 = N11; V2 = N21; V3 = N31; goto L1; L0: BODY; V += 1; V3 += STEP3; V2 += (V3 cond3 N32) ? 0 : STEP2; V3 = (V3 cond3 N32) ? V3 : N31; V1 += (V2 cond2 N22) ? 0 : STEP1; V2 = (V2 cond2 N22) ? V2 : N21; L1: if (V < count) goto L0; else goto L2; L2: */ static void expand_omp_simd (struct omp_region *region, struct omp_for_data *fd) { tree type, t; basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb; gimple_stmt_iterator gsi; gimple *stmt; gcond *cond_stmt; bool broken_loop = region->cont == NULL; edge e, ne; tree *counts = NULL; int i; int safelen_int = INT_MAX; bool dont_vectorize = false; tree safelen = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE_SAFELEN); tree simduid = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__SIMDUID_); tree ifc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE_IF); tree simdlen = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE_SIMDLEN); tree condtemp = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__CONDTEMP_); tree n1, n2; tree cond_var = condtemp ? OMP_CLAUSE_DECL (condtemp) : NULL_TREE; if (safelen) { poly_uint64 val; safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen); if (!poly_int_tree_p (safelen, &val)) safelen_int = 0; else safelen_int = MIN (constant_lower_bound (val), INT_MAX); if (safelen_int == 1) safelen_int = 0; } if ((ifc && integer_zerop (OMP_CLAUSE_IF_EXPR (ifc))) || (simdlen && integer_onep (OMP_CLAUSE_SIMDLEN_EXPR (simdlen)))) { safelen_int = 0; dont_vectorize = true; } type = TREE_TYPE (fd->loop.v); entry_bb = region->entry; cont_bb = region->cont; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); gcc_assert (broken_loop || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest); l0_bb = FALLTHRU_EDGE (entry_bb)->dest; if (!broken_loop) { gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest; l2_bb = BRANCH_EDGE (entry_bb)->dest; } else { BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL; l1_bb = split_edge (BRANCH_EDGE (entry_bb)); l2_bb = single_succ (l1_bb); } exit_bb = region->exit; l2_dom_bb = NULL; gsi = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); /* Not needed in SSA form right now. */ gcc_assert (!gimple_in_ssa_p (cfun)); if (fd->collapse > 1) { int first_zero_iter = -1, dummy = -1; basic_block zero_iter_bb = l2_bb, dummy_bb = NULL; counts = XALLOCAVEC (tree, fd->collapse); expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, zero_iter_bb, first_zero_iter, dummy_bb, dummy, l2_dom_bb); } if (l2_dom_bb == NULL) l2_dom_bb = l1_bb; n1 = fd->loop.n1; n2 = fd->loop.n2; if (gimple_omp_for_combined_into_p (fd->for_stmt)) { tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n1 = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n2 = OMP_CLAUSE_DECL (innerc); } tree step = fd->loop.step; bool is_simt = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__SIMT_); if (is_simt) { cfun->curr_properties &= ~PROP_gimple_lomp_dev; is_simt = safelen_int > 1; } tree simt_lane = NULL_TREE, simt_maxlane = NULL_TREE; if (is_simt) { simt_lane = create_tmp_var (unsigned_type_node); gimple *g = gimple_build_call_internal (IFN_GOMP_SIMT_LANE, 0); gimple_call_set_lhs (g, simt_lane); gsi_insert_before (&gsi, g, GSI_SAME_STMT); tree offset = fold_build2 (MULT_EXPR, TREE_TYPE (step), step, fold_convert (TREE_TYPE (step), simt_lane)); n1 = fold_convert (type, n1); if (POINTER_TYPE_P (type)) n1 = fold_build_pointer_plus (n1, offset); else n1 = fold_build2 (PLUS_EXPR, type, n1, fold_convert (type, offset)); /* Collapsed loops not handled for SIMT yet: limit to one lane only. */ if (fd->collapse > 1) simt_maxlane = build_one_cst (unsigned_type_node); else if (safelen_int < omp_max_simt_vf ()) simt_maxlane = build_int_cst (unsigned_type_node, safelen_int); tree vf = build_call_expr_internal_loc (UNKNOWN_LOCATION, IFN_GOMP_SIMT_VF, unsigned_type_node, 0); if (simt_maxlane) vf = fold_build2 (MIN_EXPR, unsigned_type_node, vf, simt_maxlane); vf = fold_convert (TREE_TYPE (step), vf); step = fold_build2 (MULT_EXPR, TREE_TYPE (step), step, vf); } expand_omp_build_assign (&gsi, fd->loop.v, fold_convert (type, n1)); if (fd->collapse > 1) { if (gimple_omp_for_combined_into_p (fd->for_stmt)) { gsi_prev (&gsi); expand_omp_for_init_vars (fd, &gsi, counts, NULL, n1); gsi_next (&gsi); } else for (i = 0; i < fd->collapse; i++) { tree itype = TREE_TYPE (fd->loops[i].v); if (POINTER_TYPE_P (itype)) itype = signed_type_for (itype); t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1); expand_omp_build_assign (&gsi, fd->loops[i].v, t); } } if (cond_var) { if (POINTER_TYPE_P (type) || TREE_CODE (n1) != INTEGER_CST || fd->loop.cond_code != LT_EXPR || tree_int_cst_sgn (n1) != 1) expand_omp_build_assign (&gsi, cond_var, build_one_cst (TREE_TYPE (cond_var))); else expand_omp_build_assign (&gsi, cond_var, fold_convert (TREE_TYPE (cond_var), n1)); } /* Remove the GIMPLE_OMP_FOR statement. */ gsi_remove (&gsi, true); if (!broken_loop) { /* Code to control the increment goes in the CONT_BB. */ gsi = gsi_last_nondebug_bb (cont_bb); stmt = gsi_stmt (gsi); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (fd->loop.v, step); else t = fold_build2 (PLUS_EXPR, type, fd->loop.v, step); expand_omp_build_assign (&gsi, fd->loop.v, t); if (fd->collapse > 1) { i = fd->collapse - 1; if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v))) { t = fold_convert (sizetype, fd->loops[i].step); t = fold_build_pointer_plus (fd->loops[i].v, t); } else { t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].step); t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v), fd->loops[i].v, t); } expand_omp_build_assign (&gsi, fd->loops[i].v, t); for (i = fd->collapse - 1; i > 0; i--) { tree itype = TREE_TYPE (fd->loops[i].v); tree itype2 = TREE_TYPE (fd->loops[i - 1].v); if (POINTER_TYPE_P (itype2)) itype2 = signed_type_for (itype2); t = fold_convert (itype2, fd->loops[i - 1].step); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = build3 (COND_EXPR, itype2, build2 (fd->loops[i].cond_code, boolean_type_node, fd->loops[i].v, fold_convert (itype, fd->loops[i].n2)), build_int_cst (itype2, 0), t); if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v))) t = fold_build_pointer_plus (fd->loops[i - 1].v, t); else t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t); expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t); t = fold_convert (itype, fd->loops[i].n1); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = build3 (COND_EXPR, itype, build2 (fd->loops[i].cond_code, boolean_type_node, fd->loops[i].v, fold_convert (itype, fd->loops[i].n2)), fd->loops[i].v, t); expand_omp_build_assign (&gsi, fd->loops[i].v, t); } } if (cond_var) { if (POINTER_TYPE_P (type) || TREE_CODE (n1) != INTEGER_CST || fd->loop.cond_code != LT_EXPR || tree_int_cst_sgn (n1) != 1) t = fold_build2 (PLUS_EXPR, TREE_TYPE (cond_var), cond_var, build_one_cst (TREE_TYPE (cond_var))); else t = fold_build2 (PLUS_EXPR, TREE_TYPE (cond_var), cond_var, fold_convert (TREE_TYPE (cond_var), step)); expand_omp_build_assign (&gsi, cond_var, t); } /* Remove GIMPLE_OMP_CONTINUE. */ gsi_remove (&gsi, true); } /* Emit the condition in L1_BB. */ gsi = gsi_start_bb (l1_bb); t = fold_convert (type, n2); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); tree v = fd->loop.v; if (DECL_P (v) && TREE_ADDRESSABLE (v)) v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = build2 (fd->loop.cond_code, boolean_type_node, v, t); cond_stmt = gimple_build_cond_empty (t); gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING); if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL) || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL)) { gsi = gsi_for_stmt (cond_stmt); gimple_regimplify_operands (cond_stmt, &gsi); } /* Add 'V -= STEP * (SIMT_VF - 1)' after the loop. */ if (is_simt) { gsi = gsi_start_bb (l2_bb); step = fold_build2 (MINUS_EXPR, TREE_TYPE (step), fd->loop.step, step); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (fd->loop.v, step); else t = fold_build2 (PLUS_EXPR, type, fd->loop.v, step); expand_omp_build_assign (&gsi, fd->loop.v, t); } /* Remove GIMPLE_OMP_RETURN. */ gsi = gsi_last_nondebug_bb (exit_bb); gsi_remove (&gsi, true); /* Connect the new blocks. */ remove_edge (FALLTHRU_EDGE (entry_bb)); if (!broken_loop) { remove_edge (BRANCH_EDGE (entry_bb)); make_edge (entry_bb, l1_bb, EDGE_FALLTHRU); e = BRANCH_EDGE (l1_bb); ne = FALLTHRU_EDGE (l1_bb); e->flags = EDGE_TRUE_VALUE; } else { single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; ne = single_succ_edge (l1_bb); e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE); } ne->flags = EDGE_FALSE_VALUE; e->probability = profile_probability::guessed_always ().apply_scale (7, 8); ne->probability = e->probability.invert (); set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb); set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb); if (simt_maxlane) { cond_stmt = gimple_build_cond (LT_EXPR, simt_lane, simt_maxlane, NULL_TREE, NULL_TREE); gsi = gsi_last_bb (entry_bb); gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT); make_edge (entry_bb, l2_bb, EDGE_FALSE_VALUE); FALLTHRU_EDGE (entry_bb)->flags = EDGE_TRUE_VALUE; FALLTHRU_EDGE (entry_bb)->probability = profile_probability::guessed_always ().apply_scale (7, 8); BRANCH_EDGE (entry_bb)->probability = FALLTHRU_EDGE (entry_bb)->probability.invert (); l2_dom_bb = entry_bb; } set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb); if (!broken_loop) { class loop *loop = alloc_loop (); loop->header = l1_bb; loop->latch = cont_bb; add_loop (loop, l1_bb->loop_father); loop->safelen = safelen_int; if (simduid) { loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid); cfun->has_simduid_loops = true; } /* If not -fno-tree-loop-vectorize, hint that we want to vectorize the loop. */ if ((flag_tree_loop_vectorize || !global_options_set.x_flag_tree_loop_vectorize) && flag_tree_loop_optimize && loop->safelen > 1) { loop->force_vectorize = true; if (simdlen && tree_fits_uhwi_p (OMP_CLAUSE_SIMDLEN_EXPR (simdlen))) { unsigned HOST_WIDE_INT v = tree_to_uhwi (OMP_CLAUSE_SIMDLEN_EXPR (simdlen)); if (v < INT_MAX && v <= (unsigned HOST_WIDE_INT) loop->safelen) loop->simdlen = v; } cfun->has_force_vectorize_loops = true; } else if (dont_vectorize) loop->dont_vectorize = true; } else if (simduid) cfun->has_simduid_loops = true; } /* Taskloop construct is represented after gimplification with two GIMPLE_OMP_FOR constructs with GIMPLE_OMP_TASK sandwiched in between them. This routine expands the outer GIMPLE_OMP_FOR, which should just compute all the needed loop temporaries for GIMPLE_OMP_TASK. */ static void expand_omp_taskloop_for_outer (struct omp_region *region, struct omp_for_data *fd, gimple *inner_stmt) { tree type, bias = NULL_TREE; basic_block entry_bb, cont_bb, exit_bb; gimple_stmt_iterator gsi; gassign *assign_stmt; tree *counts = NULL; int i; gcc_assert (inner_stmt); gcc_assert (region->cont); gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_TASK && gimple_omp_task_taskloop_p (inner_stmt)); type = TREE_TYPE (fd->loop.v); /* See if we need to bias by LLONG_MIN. */ if (fd->iter_type == long_long_unsigned_type_node && TREE_CODE (type) == INTEGER_TYPE && !TYPE_UNSIGNED (type)) { tree n1, n2; if (fd->loop.cond_code == LT_EXPR) { n1 = fd->loop.n1; n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step); } else { n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step); n2 = fd->loop.n1; } if (TREE_CODE (n1) != INTEGER_CST || TREE_CODE (n2) != INTEGER_CST || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0))) bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type)); } entry_bb = region->entry; cont_bb = region->cont; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest); exit_bb = region->exit; gsi = gsi_last_nondebug_bb (entry_bb); gimple *for_stmt = gsi_stmt (gsi); gcc_assert (gimple_code (for_stmt) == GIMPLE_OMP_FOR); if (fd->collapse > 1) { int first_zero_iter = -1, dummy = -1; basic_block zero_iter_bb = NULL, dummy_bb = NULL, l2_dom_bb = NULL; counts = XALLOCAVEC (tree, fd->collapse); expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, zero_iter_bb, first_zero_iter, dummy_bb, dummy, l2_dom_bb); if (zero_iter_bb) { /* Some counts[i] vars might be uninitialized if some loop has zero iterations. But the body shouldn't be executed in that case, so just avoid uninit warnings. */ for (i = first_zero_iter; i < fd->collapse; i++) if (SSA_VAR_P (counts[i])) TREE_NO_WARNING (counts[i]) = 1; gsi_prev (&gsi); edge e = split_block (entry_bb, gsi_stmt (gsi)); entry_bb = e->dest; make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU); gsi = gsi_last_bb (entry_bb); set_immediate_dominator (CDI_DOMINATORS, entry_bb, get_immediate_dominator (CDI_DOMINATORS, zero_iter_bb)); } } tree t0, t1; t1 = fd->loop.n2; t0 = fd->loop.n1; if (POINTER_TYPE_P (TREE_TYPE (t0)) && TYPE_PRECISION (TREE_TYPE (t0)) != TYPE_PRECISION (fd->iter_type)) { /* Avoid casting pointers to integer of a different size. */ tree itype = signed_type_for (type); t1 = fold_convert (fd->iter_type, fold_convert (itype, t1)); t0 = fold_convert (fd->iter_type, fold_convert (itype, t0)); } else { t1 = fold_convert (fd->iter_type, t1); t0 = fold_convert (fd->iter_type, t0); } if (bias) { t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias); t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias); } tree innerc = omp_find_clause (gimple_omp_task_clauses (inner_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); tree startvar = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); tree endvar = OMP_CLAUSE_DECL (innerc); if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST) { gcc_assert (innerc); for (i = 1; i < fd->collapse; i++) { innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); } innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); if (innerc) { /* If needed (inner taskloop has lastprivate clause), propagate down the total number of iterations. */ tree t = force_gimple_operand_gsi (&gsi, fd->loop.n2, false, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } } t0 = force_gimple_operand_gsi (&gsi, t0, false, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (startvar, t0); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); t1 = force_gimple_operand_gsi (&gsi, t1, false, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (endvar, t1); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (fd->collapse > 1) expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar); /* Remove the GIMPLE_OMP_FOR statement. */ gsi = gsi_for_stmt (for_stmt); gsi_remove (&gsi, true); gsi = gsi_last_nondebug_bb (cont_bb); gsi_remove (&gsi, true); gsi = gsi_last_nondebug_bb (exit_bb); gsi_remove (&gsi, true); FALLTHRU_EDGE (entry_bb)->probability = profile_probability::always (); remove_edge (BRANCH_EDGE (entry_bb)); FALLTHRU_EDGE (cont_bb)->probability = profile_probability::always (); remove_edge (BRANCH_EDGE (cont_bb)); set_immediate_dominator (CDI_DOMINATORS, exit_bb, cont_bb); set_immediate_dominator (CDI_DOMINATORS, region->entry, recompute_dominator (CDI_DOMINATORS, region->entry)); } /* Taskloop construct is represented after gimplification with two GIMPLE_OMP_FOR constructs with GIMPLE_OMP_TASK sandwiched in between them. This routine expands the inner GIMPLE_OMP_FOR. GOMP_taskloop{,_ull} function arranges for each task to be given just a single range of iterations. */ static void expand_omp_taskloop_for_inner (struct omp_region *region, struct omp_for_data *fd, gimple *inner_stmt) { tree e, t, type, itype, vmain, vback, bias = NULL_TREE; basic_block entry_bb, exit_bb, body_bb, cont_bb, collapse_bb = NULL; basic_block fin_bb; gimple_stmt_iterator gsi; edge ep; bool broken_loop = region->cont == NULL; tree *counts = NULL; tree n1, n2, step; itype = type = TREE_TYPE (fd->loop.v); if (POINTER_TYPE_P (type)) itype = signed_type_for (type); /* See if we need to bias by LLONG_MIN. */ if (fd->iter_type == long_long_unsigned_type_node && TREE_CODE (type) == INTEGER_TYPE && !TYPE_UNSIGNED (type)) { tree n1, n2; if (fd->loop.cond_code == LT_EXPR) { n1 = fd->loop.n1; n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step); } else { n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step); n2 = fd->loop.n1; } if (TREE_CODE (n1) != INTEGER_CST || TREE_CODE (n2) != INTEGER_CST || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0))) bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type)); } entry_bb = region->entry; cont_bb = region->cont; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); fin_bb = BRANCH_EDGE (entry_bb)->dest; gcc_assert (broken_loop || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest)); body_bb = FALLTHRU_EDGE (entry_bb)->dest; if (!broken_loop) { gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); } exit_bb = region->exit; /* Iteration space partitioning goes in ENTRY_BB. */ gsi = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); if (fd->collapse > 1) { int first_zero_iter = -1, dummy = -1; basic_block l2_dom_bb = NULL, dummy_bb = NULL; counts = XALLOCAVEC (tree, fd->collapse); expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, fin_bb, first_zero_iter, dummy_bb, dummy, l2_dom_bb); t = NULL_TREE; } else t = integer_one_node; step = fd->loop.step; tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n1 = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n2 = OMP_CLAUSE_DECL (innerc); if (bias) { n1 = fold_build2 (PLUS_EXPR, fd->iter_type, n1, bias); n2 = fold_build2 (PLUS_EXPR, fd->iter_type, n2, bias); } n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1), true, NULL_TREE, true, GSI_SAME_STMT); n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2), true, NULL_TREE, true, GSI_SAME_STMT); step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step), true, NULL_TREE, true, GSI_SAME_STMT); tree startvar = fd->loop.v; tree endvar = NULL_TREE; if (gimple_omp_for_combined_p (fd->for_stmt)) { tree clauses = gimple_omp_for_clauses (inner_stmt); tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); startvar = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); endvar = OMP_CLAUSE_DECL (innerc); } t = fold_convert (TREE_TYPE (startvar), n1); t = force_gimple_operand_gsi (&gsi, t, DECL_P (startvar) && TREE_ADDRESSABLE (startvar), NULL_TREE, false, GSI_CONTINUE_LINKING); gimple *assign_stmt = gimple_build_assign (startvar, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); t = fold_convert (TREE_TYPE (startvar), n2); e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (endvar) { assign_stmt = gimple_build_assign (endvar, e); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e))) assign_stmt = gimple_build_assign (fd->loop.v, e); else assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } if (fd->collapse > 1) expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar); if (!broken_loop) { /* The code controlling the sequential loop replaces the GIMPLE_OMP_CONTINUE. */ gsi = gsi_last_nondebug_bb (cont_bb); gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi)); gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE); vmain = gimple_omp_continue_control_use (cont_stmt); vback = gimple_omp_continue_control_def (cont_stmt); if (!gimple_omp_for_combined_p (fd->for_stmt)) { if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (vmain, step); else t = fold_build2 (PLUS_EXPR, type, vmain, step); t = force_gimple_operand_gsi (&gsi, t, DECL_P (vback) && TREE_ADDRESSABLE (vback), NULL_TREE, true, GSI_SAME_STMT); assign_stmt = gimple_build_assign (vback, t); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); t = build2 (fd->loop.cond_code, boolean_type_node, DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, e); gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); } /* Remove the GIMPLE_OMP_CONTINUE statement. */ gsi_remove (&gsi, true); if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt)) collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb); } /* Remove the GIMPLE_OMP_FOR statement. */ gsi = gsi_for_stmt (fd->for_stmt); gsi_remove (&gsi, true); /* Remove the GIMPLE_OMP_RETURN statement. */ gsi = gsi_last_nondebug_bb (exit_bb); gsi_remove (&gsi, true); FALLTHRU_EDGE (entry_bb)->probability = profile_probability::always (); if (!broken_loop) remove_edge (BRANCH_EDGE (entry_bb)); else { remove_edge_and_dominated_blocks (BRANCH_EDGE (entry_bb)); region->outer->cont = NULL; } /* Connect all the blocks. */ if (!broken_loop) { ep = find_edge (cont_bb, body_bb); if (gimple_omp_for_combined_p (fd->for_stmt)) { remove_edge (ep); ep = NULL; } else if (fd->collapse > 1) { remove_edge (ep); ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE); } else ep->flags = EDGE_TRUE_VALUE; find_edge (cont_bb, fin_bb)->flags = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU; } set_immediate_dominator (CDI_DOMINATORS, body_bb, recompute_dominator (CDI_DOMINATORS, body_bb)); if (!broken_loop) set_immediate_dominator (CDI_DOMINATORS, fin_bb, recompute_dominator (CDI_DOMINATORS, fin_bb)); if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt)) { class loop *loop = alloc_loop (); loop->header = body_bb; if (collapse_bb == NULL) loop->latch = cont_bb; add_loop (loop, body_bb->loop_father); } } /* A subroutine of expand_omp_for. Generate code for an OpenACC partitioned loop. The lowering here is abstracted, in that the loop parameters are passed through internal functions, which are further lowered by oacc_device_lower, once we get to the target compiler. The loop is of the form: for (V = B; V LTGT E; V += S) {BODY} where LTGT is < or >. We may have a specified chunking size, CHUNKING (constant 0 for no chunking) and we will have a GWV partitioning mask, specifying dimensions over which the loop is to be partitioned (see note below). We generate code that looks like (this ignores tiling): <entry_bb> [incoming FALL->body, BRANCH->exit] typedef signedintify (typeof (V)) T; // underlying signed integral type T range = E - B; T chunk_no = 0; T DIR = LTGT == '<' ? +1 : -1; T chunk_max = GOACC_LOOP_CHUNK (dir, range, S, CHUNK_SIZE, GWV); T step = GOACC_LOOP_STEP (dir, range, S, CHUNK_SIZE, GWV); <head_bb> [created by splitting end of entry_bb] T offset = GOACC_LOOP_OFFSET (dir, range, S, CHUNK_SIZE, GWV, chunk_no); T bound = GOACC_LOOP_BOUND (dir, range, S, CHUNK_SIZE, GWV, offset); if (!(offset LTGT bound)) goto bottom_bb; <body_bb> [incoming] V = B + offset; {BODY} <cont_bb> [incoming, may == body_bb FALL->exit_bb, BRANCH->body_bb] offset += step; if (offset LTGT bound) goto body_bb; [*] <bottom_bb> [created by splitting start of exit_bb] insert BRANCH->head_bb chunk_no++; if (chunk < chunk_max) goto head_bb; <exit_bb> [incoming] V = B + ((range -/+ 1) / S +/- 1) * S [*] [*] Needed if V live at end of loop. */ static void expand_oacc_for (struct omp_region *region, struct omp_for_data *fd) { bool is_oacc_kernels_parallelized = (lookup_attribute ("oacc kernels parallelized", DECL_ATTRIBUTES (current_function_decl)) != NULL); { bool is_oacc_kernels = (lookup_attribute ("oacc kernels", DECL_ATTRIBUTES (current_function_decl)) != NULL); if (is_oacc_kernels_parallelized) gcc_checking_assert (is_oacc_kernels); } gcc_assert (gimple_in_ssa_p (cfun) == is_oacc_kernels_parallelized); /* In the following, some of the 'gimple_in_ssa_p (cfun)' conditionals are for SSA specifics, and some are for 'parloops' OpenACC 'kernels'-parallelized specifics. */ tree v = fd->loop.v; enum tree_code cond_code = fd->loop.cond_code; enum tree_code plus_code = PLUS_EXPR; tree chunk_size = integer_minus_one_node; tree gwv = integer_zero_node; tree iter_type = TREE_TYPE (v); tree diff_type = iter_type; tree plus_type = iter_type; struct oacc_collapse *counts = NULL; gcc_checking_assert (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_OACC_LOOP); gcc_assert (!gimple_omp_for_combined_into_p (fd->for_stmt)); gcc_assert (cond_code == LT_EXPR || cond_code == GT_EXPR); if (POINTER_TYPE_P (iter_type)) { plus_code = POINTER_PLUS_EXPR; plus_type = sizetype; } if (POINTER_TYPE_P (diff_type) || TYPE_UNSIGNED (diff_type)) diff_type = signed_type_for (diff_type); if (TYPE_PRECISION (diff_type) < TYPE_PRECISION (integer_type_node)) diff_type = integer_type_node; basic_block entry_bb = region->entry; /* BB ending in OMP_FOR */ basic_block exit_bb = region->exit; /* BB ending in OMP_RETURN */ basic_block cont_bb = region->cont; /* BB ending in OMP_CONTINUE */ basic_block bottom_bb = NULL; /* entry_bb has two successors; the branch edge is to the exit block, fallthrough edge to body. */ gcc_assert (EDGE_COUNT (entry_bb->succs) == 2 && BRANCH_EDGE (entry_bb)->dest == exit_bb); /* If cont_bb non-NULL, it has 2 successors. The branch successor is body_bb, or to a block whose only successor is the body_bb. Its fallthrough successor is the final block (same as the branch successor of the entry_bb). */ if (cont_bb) { basic_block body_bb = FALLTHRU_EDGE (entry_bb)->dest; basic_block bed = BRANCH_EDGE (cont_bb)->dest; gcc_assert (FALLTHRU_EDGE (cont_bb)->dest == exit_bb); gcc_assert (bed == body_bb || single_succ_edge (bed)->dest == body_bb); } else gcc_assert (!gimple_in_ssa_p (cfun)); /* The exit block only has entry_bb and cont_bb as predecessors. */ gcc_assert (EDGE_COUNT (exit_bb->preds) == 1 + (cont_bb != NULL)); tree chunk_no; tree chunk_max = NULL_TREE; tree bound, offset; tree step = create_tmp_var (diff_type, ".step"); bool up = cond_code == LT_EXPR; tree dir = build_int_cst (diff_type, up ? +1 : -1); bool chunking = !gimple_in_ssa_p (cfun); bool negating; /* Tiling vars. */ tree tile_size = NULL_TREE; tree element_s = NULL_TREE; tree e_bound = NULL_TREE, e_offset = NULL_TREE, e_step = NULL_TREE; basic_block elem_body_bb = NULL; basic_block elem_cont_bb = NULL; /* SSA instances. */ tree offset_incr = NULL_TREE; tree offset_init = NULL_TREE; gimple_stmt_iterator gsi; gassign *ass; gcall *call; gimple *stmt; tree expr; location_t loc; edge split, be, fte; /* Split the end of entry_bb to create head_bb. */ split = split_block (entry_bb, last_stmt (entry_bb)); basic_block head_bb = split->dest; entry_bb = split->src; /* Chunk setup goes at end of entry_bb, replacing the omp_for. */ gsi = gsi_last_nondebug_bb (entry_bb); gomp_for *for_stmt = as_a <gomp_for *> (gsi_stmt (gsi)); loc = gimple_location (for_stmt); if (gimple_in_ssa_p (cfun)) { offset_init = gimple_omp_for_index (for_stmt, 0); gcc_assert (integer_zerop (fd->loop.n1)); /* The SSA parallelizer does gang parallelism. */ gwv = build_int_cst (integer_type_node, GOMP_DIM_MASK (GOMP_DIM_GANG)); } if (fd->collapse > 1 || fd->tiling) { gcc_assert (!gimple_in_ssa_p (cfun) && up); counts = XALLOCAVEC (struct oacc_collapse, fd->collapse); tree total = expand_oacc_collapse_init (fd, &gsi, counts, TREE_TYPE (fd->loop.n2), loc); if (SSA_VAR_P (fd->loop.n2)) { total = force_gimple_operand_gsi (&gsi, total, false, NULL_TREE, true, GSI_SAME_STMT); ass = gimple_build_assign (fd->loop.n2, total); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); } } tree b = fd->loop.n1; tree e = fd->loop.n2; tree s = fd->loop.step; b = force_gimple_operand_gsi (&gsi, b, true, NULL_TREE, true, GSI_SAME_STMT); e = force_gimple_operand_gsi (&gsi, e, true, NULL_TREE, true, GSI_SAME_STMT); /* Convert the step, avoiding possible unsigned->signed overflow. */ negating = !up && TYPE_UNSIGNED (TREE_TYPE (s)); if (negating) s = fold_build1 (NEGATE_EXPR, TREE_TYPE (s), s); s = fold_convert (diff_type, s); if (negating) s = fold_build1 (NEGATE_EXPR, diff_type, s); s = force_gimple_operand_gsi (&gsi, s, true, NULL_TREE, true, GSI_SAME_STMT); if (!chunking) chunk_size = integer_zero_node; expr = fold_convert (diff_type, chunk_size); chunk_size = force_gimple_operand_gsi (&gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT); if (fd->tiling) { /* Determine the tile size and element step, modify the outer loop step size. */ tile_size = create_tmp_var (diff_type, ".tile_size"); expr = build_int_cst (diff_type, 1); for (int ix = 0; ix < fd->collapse; ix++) expr = fold_build2 (MULT_EXPR, diff_type, counts[ix].tile, expr); expr = force_gimple_operand_gsi (&gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT); ass = gimple_build_assign (tile_size, expr); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); element_s = create_tmp_var (diff_type, ".element_s"); ass = gimple_build_assign (element_s, s); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); expr = fold_build2 (MULT_EXPR, diff_type, s, tile_size); s = force_gimple_operand_gsi (&gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT); } /* Determine the range, avoiding possible unsigned->signed overflow. */ negating = !up && TYPE_UNSIGNED (iter_type); expr = fold_build2 (MINUS_EXPR, plus_type, fold_convert (plus_type, negating ? b : e), fold_convert (plus_type, negating ? e : b)); expr = fold_convert (diff_type, expr); if (negating) expr = fold_build1 (NEGATE_EXPR, diff_type, expr); tree range = force_gimple_operand_gsi (&gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT); chunk_no = build_int_cst (diff_type, 0); if (chunking) { gcc_assert (!gimple_in_ssa_p (cfun)); expr = chunk_no; chunk_max = create_tmp_var (diff_type, ".chunk_max"); chunk_no = create_tmp_var (diff_type, ".chunk_no"); ass = gimple_build_assign (chunk_no, expr); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); call = gimple_build_call_internal (IFN_GOACC_LOOP, 6, build_int_cst (integer_type_node, IFN_GOACC_LOOP_CHUNKS), dir, range, s, chunk_size, gwv); gimple_call_set_lhs (call, chunk_max); gimple_set_location (call, loc); gsi_insert_before (&gsi, call, GSI_SAME_STMT); } else chunk_size = chunk_no; call = gimple_build_call_internal (IFN_GOACC_LOOP, 6, build_int_cst (integer_type_node, IFN_GOACC_LOOP_STEP), dir, range, s, chunk_size, gwv); gimple_call_set_lhs (call, step); gimple_set_location (call, loc); gsi_insert_before (&gsi, call, GSI_SAME_STMT); /* Remove the GIMPLE_OMP_FOR. */ gsi_remove (&gsi, true); /* Fixup edges from head_bb. */ be = BRANCH_EDGE (head_bb); fte = FALLTHRU_EDGE (head_bb); be->flags |= EDGE_FALSE_VALUE; fte->flags ^= EDGE_FALLTHRU | EDGE_TRUE_VALUE; basic_block body_bb = fte->dest; if (gimple_in_ssa_p (cfun)) { gsi = gsi_last_nondebug_bb (cont_bb); gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi)); offset = gimple_omp_continue_control_use (cont_stmt); offset_incr = gimple_omp_continue_control_def (cont_stmt); } else { offset = create_tmp_var (diff_type, ".offset"); offset_init = offset_incr = offset; } bound = create_tmp_var (TREE_TYPE (offset), ".bound"); /* Loop offset & bound go into head_bb. */ gsi = gsi_start_bb (head_bb); call = gimple_build_call_internal (IFN_GOACC_LOOP, 7, build_int_cst (integer_type_node, IFN_GOACC_LOOP_OFFSET), dir, range, s, chunk_size, gwv, chunk_no); gimple_call_set_lhs (call, offset_init); gimple_set_location (call, loc); gsi_insert_after (&gsi, call, GSI_CONTINUE_LINKING); call = gimple_build_call_internal (IFN_GOACC_LOOP, 7, build_int_cst (integer_type_node, IFN_GOACC_LOOP_BOUND), dir, range, s, chunk_size, gwv, offset_init); gimple_call_set_lhs (call, bound); gimple_set_location (call, loc); gsi_insert_after (&gsi, call, GSI_CONTINUE_LINKING); expr = build2 (cond_code, boolean_type_node, offset_init, bound); gsi_insert_after (&gsi, gimple_build_cond_empty (expr), GSI_CONTINUE_LINKING); /* V assignment goes into body_bb. */ if (!gimple_in_ssa_p (cfun)) { gsi = gsi_start_bb (body_bb); expr = build2 (plus_code, iter_type, b, fold_convert (plus_type, offset)); expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE, true, GSI_SAME_STMT); ass = gimple_build_assign (v, expr); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); if (fd->collapse > 1 || fd->tiling) expand_oacc_collapse_vars (fd, false, &gsi, counts, v); if (fd->tiling) { /* Determine the range of the element loop -- usually simply the tile_size, but could be smaller if the final iteration of the outer loop is a partial tile. */ tree e_range = create_tmp_var (diff_type, ".e_range"); expr = build2 (MIN_EXPR, diff_type, build2 (MINUS_EXPR, diff_type, bound, offset), build2 (MULT_EXPR, diff_type, tile_size, element_s)); expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE, true, GSI_SAME_STMT); ass = gimple_build_assign (e_range, expr); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); /* Determine bound, offset & step of inner loop. */ e_bound = create_tmp_var (diff_type, ".e_bound"); e_offset = create_tmp_var (diff_type, ".e_offset"); e_step = create_tmp_var (diff_type, ".e_step"); /* Mark these as element loops. */ tree t, e_gwv = integer_minus_one_node; tree chunk = build_int_cst (diff_type, 0); /* Never chunked. */ t = build_int_cst (integer_type_node, IFN_GOACC_LOOP_OFFSET); call = gimple_build_call_internal (IFN_GOACC_LOOP, 7, t, dir, e_range, element_s, chunk, e_gwv, chunk); gimple_call_set_lhs (call, e_offset); gimple_set_location (call, loc); gsi_insert_before (&gsi, call, GSI_SAME_STMT); t = build_int_cst (integer_type_node, IFN_GOACC_LOOP_BOUND); call = gimple_build_call_internal (IFN_GOACC_LOOP, 7, t, dir, e_range, element_s, chunk, e_gwv, e_offset); gimple_call_set_lhs (call, e_bound); gimple_set_location (call, loc); gsi_insert_before (&gsi, call, GSI_SAME_STMT); t = build_int_cst (integer_type_node, IFN_GOACC_LOOP_STEP); call = gimple_build_call_internal (IFN_GOACC_LOOP, 6, t, dir, e_range, element_s, chunk, e_gwv); gimple_call_set_lhs (call, e_step); gimple_set_location (call, loc); gsi_insert_before (&gsi, call, GSI_SAME_STMT); /* Add test and split block. */ expr = build2 (cond_code, boolean_type_node, e_offset, e_bound); stmt = gimple_build_cond_empty (expr); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); split = split_block (body_bb, stmt); elem_body_bb = split->dest; if (cont_bb == body_bb) cont_bb = elem_body_bb; body_bb = split->src; split->flags ^= EDGE_FALLTHRU | EDGE_TRUE_VALUE; /* Add a dummy exit for the tiled block when cont_bb is missing. */ if (cont_bb == NULL) { edge e = make_edge (body_bb, exit_bb, EDGE_FALSE_VALUE); e->probability = profile_probability::even (); split->probability = profile_probability::even (); } /* Initialize the user's loop vars. */ gsi = gsi_start_bb (elem_body_bb); expand_oacc_collapse_vars (fd, true, &gsi, counts, e_offset); } } /* Loop increment goes into cont_bb. If this is not a loop, we will have spawned threads as if it was, and each one will execute one iteration. The specification is not explicit about whether such constructs are ill-formed or not, and they can occur, especially when noreturn routines are involved. */ if (cont_bb) { gsi = gsi_last_nondebug_bb (cont_bb); gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi)); loc = gimple_location (cont_stmt); if (fd->tiling) { /* Insert element loop increment and test. */ expr = build2 (PLUS_EXPR, diff_type, e_offset, e_step); expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE, true, GSI_SAME_STMT); ass = gimple_build_assign (e_offset, expr); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); expr = build2 (cond_code, boolean_type_node, e_offset, e_bound); stmt = gimple_build_cond_empty (expr); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); split = split_block (cont_bb, stmt); elem_cont_bb = split->src; cont_bb = split->dest; split->flags ^= EDGE_FALLTHRU | EDGE_FALSE_VALUE; split->probability = profile_probability::unlikely ().guessed (); edge latch_edge = make_edge (elem_cont_bb, elem_body_bb, EDGE_TRUE_VALUE); latch_edge->probability = profile_probability::likely ().guessed (); edge skip_edge = make_edge (body_bb, cont_bb, EDGE_FALSE_VALUE); skip_edge->probability = profile_probability::unlikely ().guessed (); edge loop_entry_edge = EDGE_SUCC (body_bb, 1 - skip_edge->dest_idx); loop_entry_edge->probability = profile_probability::likely ().guessed (); gsi = gsi_for_stmt (cont_stmt); } /* Increment offset. */ if (gimple_in_ssa_p (cfun)) expr = build2 (plus_code, iter_type, offset, fold_convert (plus_type, step)); else expr = build2 (PLUS_EXPR, diff_type, offset, step); expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE, true, GSI_SAME_STMT); ass = gimple_build_assign (offset_incr, expr); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); expr = build2 (cond_code, boolean_type_node, offset_incr, bound); gsi_insert_before (&gsi, gimple_build_cond_empty (expr), GSI_SAME_STMT); /* Remove the GIMPLE_OMP_CONTINUE. */ gsi_remove (&gsi, true); /* Fixup edges from cont_bb. */ be = BRANCH_EDGE (cont_bb); fte = FALLTHRU_EDGE (cont_bb); be->flags |= EDGE_TRUE_VALUE; fte->flags ^= EDGE_FALLTHRU | EDGE_FALSE_VALUE; if (chunking) { /* Split the beginning of exit_bb to make bottom_bb. We need to insert a nop at the start, because splitting is after a stmt, not before. */ gsi = gsi_start_bb (exit_bb); stmt = gimple_build_nop (); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); split = split_block (exit_bb, stmt); bottom_bb = split->src; exit_bb = split->dest; gsi = gsi_last_bb (bottom_bb); /* Chunk increment and test goes into bottom_bb. */ expr = build2 (PLUS_EXPR, diff_type, chunk_no, build_int_cst (diff_type, 1)); ass = gimple_build_assign (chunk_no, expr); gsi_insert_after (&gsi, ass, GSI_CONTINUE_LINKING); /* Chunk test at end of bottom_bb. */ expr = build2 (LT_EXPR, boolean_type_node, chunk_no, chunk_max); gsi_insert_after (&gsi, gimple_build_cond_empty (expr), GSI_CONTINUE_LINKING); /* Fixup edges from bottom_bb. */ split->flags ^= EDGE_FALLTHRU | EDGE_FALSE_VALUE; split->probability = profile_probability::unlikely ().guessed (); edge latch_edge = make_edge (bottom_bb, head_bb, EDGE_TRUE_VALUE); latch_edge->probability = profile_probability::likely ().guessed (); } } gsi = gsi_last_nondebug_bb (exit_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); loc = gimple_location (gsi_stmt (gsi)); if (!gimple_in_ssa_p (cfun)) { /* Insert the final value of V, in case it is live. This is the value for the only thread that survives past the join. */ expr = fold_build2 (MINUS_EXPR, diff_type, range, dir); expr = fold_build2 (PLUS_EXPR, diff_type, expr, s); expr = fold_build2 (TRUNC_DIV_EXPR, diff_type, expr, s); expr = fold_build2 (MULT_EXPR, diff_type, expr, s); expr = build2 (plus_code, iter_type, b, fold_convert (plus_type, expr)); expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE, true, GSI_SAME_STMT); ass = gimple_build_assign (v, expr); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); } /* Remove the OMP_RETURN. */ gsi_remove (&gsi, true); if (cont_bb) { /* We now have one, two or three nested loops. Update the loop structures. */ class loop *parent = entry_bb->loop_father; class loop *body = body_bb->loop_father; if (chunking) { class loop *chunk_loop = alloc_loop (); chunk_loop->header = head_bb; chunk_loop->latch = bottom_bb; add_loop (chunk_loop, parent); parent = chunk_loop; } else if (parent != body) { gcc_assert (body->header == body_bb); gcc_assert (body->latch == cont_bb || single_pred (body->latch) == cont_bb); parent = NULL; } if (parent) { class loop *body_loop = alloc_loop (); body_loop->header = body_bb; body_loop->latch = cont_bb; add_loop (body_loop, parent); if (fd->tiling) { /* Insert tiling's element loop. */ class loop *inner_loop = alloc_loop (); inner_loop->header = elem_body_bb; inner_loop->latch = elem_cont_bb; add_loop (inner_loop, body_loop); } } } } /* Expand the OMP loop defined by REGION. */ static void expand_omp_for (struct omp_region *region, gimple *inner_stmt) { struct omp_for_data fd; struct omp_for_data_loop *loops; loops = (struct omp_for_data_loop *) alloca (gimple_omp_for_collapse (last_stmt (region->entry)) * sizeof (struct omp_for_data_loop)); omp_extract_for_data (as_a <gomp_for *> (last_stmt (region->entry)), &fd, loops); region->sched_kind = fd.sched_kind; region->sched_modifiers = fd.sched_modifiers; region->has_lastprivate_conditional = fd.lastprivate_conditional != 0; gcc_assert (EDGE_COUNT (region->entry->succs) == 2); BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL; FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL; if (region->cont) { gcc_assert (EDGE_COUNT (region->cont->succs) == 2); BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL; FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL; } else /* If there isn't a continue then this is a degerate case where the introduction of abnormal edges during lowering will prevent original loops from being detected. Fix that up. */ loops_state_set (LOOPS_NEED_FIXUP); if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_SIMD) expand_omp_simd (region, &fd); else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_OACC_LOOP) { gcc_assert (!inner_stmt); expand_oacc_for (region, &fd); } else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_TASKLOOP) { if (gimple_omp_for_combined_into_p (fd.for_stmt)) expand_omp_taskloop_for_inner (region, &fd, inner_stmt); else expand_omp_taskloop_for_outer (region, &fd, inner_stmt); } else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC && !fd.have_ordered) { if (fd.chunk_size == NULL) expand_omp_for_static_nochunk (region, &fd, inner_stmt); else expand_omp_for_static_chunk (region, &fd, inner_stmt); } else { int fn_index, start_ix, next_ix; unsigned HOST_WIDE_INT sched = 0; tree sched_arg = NULL_TREE; gcc_assert (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_FOR); if (fd.chunk_size == NULL && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC) fd.chunk_size = integer_zero_node; switch (fd.sched_kind) { case OMP_CLAUSE_SCHEDULE_RUNTIME: if ((fd.sched_modifiers & OMP_CLAUSE_SCHEDULE_NONMONOTONIC) != 0 && fd.lastprivate_conditional == 0) { gcc_assert (!fd.have_ordered); fn_index = 6; sched = 4; } else if ((fd.sched_modifiers & OMP_CLAUSE_SCHEDULE_MONOTONIC) == 0 && !fd.have_ordered && fd.lastprivate_conditional == 0) fn_index = 7; else { fn_index = 3; sched = (HOST_WIDE_INT_1U << 31); } break; case OMP_CLAUSE_SCHEDULE_DYNAMIC: case OMP_CLAUSE_SCHEDULE_GUIDED: if ((fd.sched_modifiers & OMP_CLAUSE_SCHEDULE_MONOTONIC) == 0 && !fd.have_ordered && fd.lastprivate_conditional == 0) { fn_index = 3 + fd.sched_kind; sched = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_GUIDED) + 2; break; } fn_index = fd.sched_kind; sched = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_GUIDED) + 2; sched += (HOST_WIDE_INT_1U << 31); break; case OMP_CLAUSE_SCHEDULE_STATIC: gcc_assert (fd.have_ordered); fn_index = 0; sched = (HOST_WIDE_INT_1U << 31) + 1; break; default: gcc_unreachable (); } if (!fd.ordered) fn_index += fd.have_ordered * 8; if (fd.ordered) start_ix = ((int)BUILT_IN_GOMP_LOOP_DOACROSS_STATIC_START) + fn_index; else start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index; next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index; if (fd.have_reductemp || fd.have_pointer_condtemp) { if (fd.ordered) start_ix = (int)BUILT_IN_GOMP_LOOP_DOACROSS_START; else if (fd.have_ordered) start_ix = (int)BUILT_IN_GOMP_LOOP_ORDERED_START; else start_ix = (int)BUILT_IN_GOMP_LOOP_START; sched_arg = build_int_cstu (long_integer_type_node, sched); if (!fd.chunk_size) fd.chunk_size = integer_zero_node; } if (fd.iter_type == long_long_unsigned_type_node) { start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START - (int)BUILT_IN_GOMP_LOOP_STATIC_START); next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT); } expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix, (enum built_in_function) next_ix, sched_arg, inner_stmt); } if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_only_virtuals); } /* Expand code for an OpenMP sections directive. In pseudo code, we generate v = GOMP_sections_start (n); L0: switch (v) { case 0: goto L2; case 1: section 1; goto L1; case 2: ... case n: ... default: abort (); } L1: v = GOMP_sections_next (); goto L0; L2: reduction; If this is a combined parallel sections, replace the call to GOMP_sections_start with call to GOMP_sections_next. */ static void expand_omp_sections (struct omp_region *region) { tree t, u, vin = NULL, vmain, vnext, l2; unsigned len; basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb; gimple_stmt_iterator si, switch_si; gomp_sections *sections_stmt; gimple *stmt; gomp_continue *cont; edge_iterator ei; edge e; struct omp_region *inner; unsigned i, casei; bool exit_reachable = region->cont != NULL; gcc_assert (region->exit != NULL); entry_bb = region->entry; l0_bb = single_succ (entry_bb); l1_bb = region->cont; l2_bb = region->exit; if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb) l2 = gimple_block_label (l2_bb); else { /* This can happen if there are reductions. */ len = EDGE_COUNT (l0_bb->succs); gcc_assert (len > 0); e = EDGE_SUCC (l0_bb, len - 1); si = gsi_last_nondebug_bb (e->dest); l2 = NULL_TREE; if (gsi_end_p (si) || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION) l2 = gimple_block_label (e->dest); else FOR_EACH_EDGE (e, ei, l0_bb->succs) { si = gsi_last_nondebug_bb (e->dest); if (gsi_end_p (si) || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION) { l2 = gimple_block_label (e->dest); break; } } } if (exit_reachable) default_bb = create_empty_bb (l1_bb->prev_bb); else default_bb = create_empty_bb (l0_bb); /* We will build a switch() with enough cases for all the GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work and a default case to abort if something goes wrong. */ len = EDGE_COUNT (l0_bb->succs); /* Use vec::quick_push on label_vec throughout, since we know the size in advance. */ auto_vec<tree> label_vec (len); /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the GIMPLE_OMP_SECTIONS statement. */ si = gsi_last_nondebug_bb (entry_bb); sections_stmt = as_a <gomp_sections *> (gsi_stmt (si)); gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS); vin = gimple_omp_sections_control (sections_stmt); tree clauses = gimple_omp_sections_clauses (sections_stmt); tree reductmp = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_); tree condtmp = omp_find_clause (clauses, OMP_CLAUSE__CONDTEMP_); tree cond_var = NULL_TREE; if (reductmp || condtmp) { tree reductions = null_pointer_node, mem = null_pointer_node; tree memv = NULL_TREE, condtemp = NULL_TREE; gimple_stmt_iterator gsi = gsi_none (); gimple *g = NULL; if (reductmp) { reductions = OMP_CLAUSE_DECL (reductmp); gcc_assert (TREE_CODE (reductions) == SSA_NAME); g = SSA_NAME_DEF_STMT (reductions); reductions = gimple_assign_rhs1 (g); OMP_CLAUSE_DECL (reductmp) = reductions; gsi = gsi_for_stmt (g); } else gsi = si; if (condtmp) { condtemp = OMP_CLAUSE_DECL (condtmp); tree c = omp_find_clause (OMP_CLAUSE_CHAIN (condtmp), OMP_CLAUSE__CONDTEMP_); cond_var = OMP_CLAUSE_DECL (c); tree type = TREE_TYPE (condtemp); memv = create_tmp_var (type); TREE_ADDRESSABLE (memv) = 1; unsigned cnt = 0; for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)) ++cnt; unsigned HOST_WIDE_INT sz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (type))) * cnt; expand_omp_build_assign (&gsi, memv, build_int_cst (type, sz), false); mem = build_fold_addr_expr (memv); } t = build_int_cst (unsigned_type_node, len - 1); u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS2_START); stmt = gimple_build_call (u, 3, t, reductions, mem); gimple_call_set_lhs (stmt, vin); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); if (condtmp) { expand_omp_build_assign (&gsi, condtemp, memv, false); tree t = build2 (PLUS_EXPR, TREE_TYPE (cond_var), vin, build_one_cst (TREE_TYPE (cond_var))); expand_omp_build_assign (&gsi, cond_var, t, false); } if (reductmp) { gsi_remove (&gsi, true); release_ssa_name (gimple_assign_lhs (g)); } } else if (!is_combined_parallel (region)) { /* If we are not inside a combined parallel+sections region, call GOMP_sections_start. */ t = build_int_cst (unsigned_type_node, len - 1); u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START); stmt = gimple_build_call (u, 1, t); } else { /* Otherwise, call GOMP_sections_next. */ u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT); stmt = gimple_build_call (u, 0); } if (!reductmp && !condtmp) { gimple_call_set_lhs (stmt, vin); gsi_insert_after (&si, stmt, GSI_SAME_STMT); } gsi_remove (&si, true); /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in L0_BB. */ switch_si = gsi_last_nondebug_bb (l0_bb); gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH); if (exit_reachable) { cont = as_a <gomp_continue *> (last_stmt (l1_bb)); gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE); vmain = gimple_omp_continue_control_use (cont); vnext = gimple_omp_continue_control_def (cont); } else { vmain = vin; vnext = NULL_TREE; } t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2); label_vec.quick_push (t); i = 1; /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */ for (inner = region->inner, casei = 1; inner; inner = inner->next, i++, casei++) { basic_block s_entry_bb, s_exit_bb; /* Skip optional reduction region. */ if (inner->type == GIMPLE_OMP_ATOMIC_LOAD) { --i; --casei; continue; } s_entry_bb = inner->entry; s_exit_bb = inner->exit; t = gimple_block_label (s_entry_bb); u = build_int_cst (unsigned_type_node, casei); u = build_case_label (u, NULL, t); label_vec.quick_push (u); si = gsi_last_nondebug_bb (s_entry_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION); gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si))); gsi_remove (&si, true); single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU; if (s_exit_bb == NULL) continue; si = gsi_last_nondebug_bb (s_exit_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN); gsi_remove (&si, true); single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU; } /* Error handling code goes in DEFAULT_BB. */ t = gimple_block_label (default_bb); u = build_case_label (NULL, NULL, t); make_edge (l0_bb, default_bb, 0); add_bb_to_loop (default_bb, current_loops->tree_root); stmt = gimple_build_switch (vmain, u, label_vec); gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT); gsi_remove (&switch_si, true); si = gsi_start_bb (default_bb); stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0); gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING); if (exit_reachable) { tree bfn_decl; /* Code to get the next section goes in L1_BB. */ si = gsi_last_nondebug_bb (l1_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE); bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT); stmt = gimple_build_call (bfn_decl, 0); gimple_call_set_lhs (stmt, vnext); gsi_insert_before (&si, stmt, GSI_SAME_STMT); if (cond_var) { tree t = build2 (PLUS_EXPR, TREE_TYPE (cond_var), vnext, build_one_cst (TREE_TYPE (cond_var))); expand_omp_build_assign (&si, cond_var, t, false); } gsi_remove (&si, true); single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU; } /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */ si = gsi_last_nondebug_bb (l2_bb); if (gimple_omp_return_nowait_p (gsi_stmt (si))) t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT); else if (gimple_omp_return_lhs (gsi_stmt (si))) t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL); else t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END); stmt = gimple_build_call (t, 0); if (gimple_omp_return_lhs (gsi_stmt (si))) gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si))); gsi_insert_after (&si, stmt, GSI_SAME_STMT); gsi_remove (&si, true); set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb); } /* Expand code for an OpenMP single directive. We've already expanded much of the code, here we simply place the GOMP_barrier call. */ static void expand_omp_single (struct omp_region *region) { basic_block entry_bb, exit_bb; gimple_stmt_iterator si; entry_bb = region->entry; exit_bb = region->exit; si = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE); gsi_remove (&si, true); single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; si = gsi_last_nondebug_bb (exit_bb); if (!gimple_omp_return_nowait_p (gsi_stmt (si))) { tree t = gimple_omp_return_lhs (gsi_stmt (si)); gsi_insert_after (&si, omp_build_barrier (t), GSI_SAME_STMT); } gsi_remove (&si, true); single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU; } /* Generic expansion for OpenMP synchronization directives: master, ordered and critical. All we need to do here is remove the entry and exit markers for REGION. */ static void expand_omp_synch (struct omp_region *region) { basic_block entry_bb, exit_bb; gimple_stmt_iterator si; entry_bb = region->entry; exit_bb = region->exit; si = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS); if (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS && gimple_omp_teams_host (as_a <gomp_teams *> (gsi_stmt (si)))) { expand_omp_taskreg (region); return; } gsi_remove (&si, true); single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; if (exit_bb) { si = gsi_last_nondebug_bb (exit_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN); gsi_remove (&si, true); single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU; } } /* Translate enum omp_memory_order to enum memmodel. The two enums are using different numbers so that OMP_MEMORY_ORDER_UNSPECIFIED is 0. */ static enum memmodel omp_memory_order_to_memmodel (enum omp_memory_order mo) { switch (mo) { case OMP_MEMORY_ORDER_RELAXED: return MEMMODEL_RELAXED; case OMP_MEMORY_ORDER_ACQUIRE: return MEMMODEL_ACQUIRE; case OMP_MEMORY_ORDER_RELEASE: return MEMMODEL_RELEASE; case OMP_MEMORY_ORDER_ACQ_REL: return MEMMODEL_ACQ_REL; case OMP_MEMORY_ORDER_SEQ_CST: return MEMMODEL_SEQ_CST; default: gcc_unreachable (); } } /* A subroutine of expand_omp_atomic. Attempt to implement the atomic operation as a normal volatile load. */ static bool expand_omp_atomic_load (basic_block load_bb, tree addr, tree loaded_val, int index) { enum built_in_function tmpbase; gimple_stmt_iterator gsi; basic_block store_bb; location_t loc; gimple *stmt; tree decl, call, type, itype; gsi = gsi_last_nondebug_bb (load_bb); stmt = gsi_stmt (gsi); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD); loc = gimple_location (stmt); /* ??? If the target does not implement atomic_load_optab[mode], and mode is smaller than word size, then expand_atomic_load assumes that the load is atomic. We could avoid the builtin entirely in this case. */ tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1); decl = builtin_decl_explicit (tmpbase); if (decl == NULL_TREE) return false; type = TREE_TYPE (loaded_val); itype = TREE_TYPE (TREE_TYPE (decl)); enum omp_memory_order omo = gimple_omp_atomic_memory_order (stmt); tree mo = build_int_cst (NULL, omp_memory_order_to_memmodel (omo)); call = build_call_expr_loc (loc, decl, 2, addr, mo); if (!useless_type_conversion_p (type, itype)) call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call); call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call); force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT); gsi_remove (&gsi, true); store_bb = single_succ (load_bb); gsi = gsi_last_nondebug_bb (store_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE); gsi_remove (&gsi, true); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* A subroutine of expand_omp_atomic. Attempt to implement the atomic operation as a normal volatile store. */ static bool expand_omp_atomic_store (basic_block load_bb, tree addr, tree loaded_val, tree stored_val, int index) { enum built_in_function tmpbase; gimple_stmt_iterator gsi; basic_block store_bb = single_succ (load_bb); location_t loc; gimple *stmt; tree decl, call, type, itype; machine_mode imode; bool exchange; gsi = gsi_last_nondebug_bb (load_bb); stmt = gsi_stmt (gsi); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD); /* If the load value is needed, then this isn't a store but an exchange. */ exchange = gimple_omp_atomic_need_value_p (stmt); gsi = gsi_last_nondebug_bb (store_bb); stmt = gsi_stmt (gsi); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE); loc = gimple_location (stmt); /* ??? If the target does not implement atomic_store_optab[mode], and mode is smaller than word size, then expand_atomic_store assumes that the store is atomic. We could avoid the builtin entirely in this case. */ tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N); tmpbase = (enum built_in_function) ((int) tmpbase + index + 1); decl = builtin_decl_explicit (tmpbase); if (decl == NULL_TREE) return false; type = TREE_TYPE (stored_val); /* Dig out the type of the function's second argument. */ itype = TREE_TYPE (decl); itype = TYPE_ARG_TYPES (itype); itype = TREE_CHAIN (itype); itype = TREE_VALUE (itype); imode = TYPE_MODE (itype); if (exchange && !can_atomic_exchange_p (imode, true)) return false; if (!useless_type_conversion_p (itype, type)) stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val); enum omp_memory_order omo = gimple_omp_atomic_memory_order (stmt); tree mo = build_int_cst (NULL, omp_memory_order_to_memmodel (omo)); call = build_call_expr_loc (loc, decl, 3, addr, stored_val, mo); if (exchange) { if (!useless_type_conversion_p (type, itype)) call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call); call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call); } force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT); gsi_remove (&gsi, true); /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */ gsi = gsi_last_nondebug_bb (load_bb); gsi_remove (&gsi, true); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* A subroutine of expand_omp_atomic. Attempt to implement the atomic operation as a __atomic_fetch_op builtin. INDEX is log2 of the size of the data type, and thus usable to find the index of the builtin decl. Returns false if the expression is not of the proper form. */ static bool expand_omp_atomic_fetch_op (basic_block load_bb, tree addr, tree loaded_val, tree stored_val, int index) { enum built_in_function oldbase, newbase, tmpbase; tree decl, itype, call; tree lhs, rhs; basic_block store_bb = single_succ (load_bb); gimple_stmt_iterator gsi; gimple *stmt; location_t loc; enum tree_code code; bool need_old, need_new; machine_mode imode; /* We expect to find the following sequences: load_bb: GIMPLE_OMP_ATOMIC_LOAD (tmp, mem) store_bb: val = tmp OP something; (or: something OP tmp) GIMPLE_OMP_STORE (val) ???FIXME: Allow a more flexible sequence. Perhaps use data flow to pick the statements. */ gsi = gsi_after_labels (store_bb); stmt = gsi_stmt (gsi); if (is_gimple_debug (stmt)) { gsi_next_nondebug (&gsi); if (gsi_end_p (gsi)) return false; stmt = gsi_stmt (gsi); } loc = gimple_location (stmt); if (!is_gimple_assign (stmt)) return false; gsi_next_nondebug (&gsi); if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE) return false; need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi)); need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb)); enum omp_memory_order omo = gimple_omp_atomic_memory_order (last_stmt (load_bb)); enum memmodel mo = omp_memory_order_to_memmodel (omo); gcc_checking_assert (!need_old || !need_new); if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0)) return false; /* Check for one of the supported fetch-op operations. */ code = gimple_assign_rhs_code (stmt); switch (code) { case PLUS_EXPR: case POINTER_PLUS_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N; newbase = BUILT_IN_ATOMIC_ADD_FETCH_N; break; case MINUS_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N; newbase = BUILT_IN_ATOMIC_SUB_FETCH_N; break; case BIT_AND_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_AND_N; newbase = BUILT_IN_ATOMIC_AND_FETCH_N; break; case BIT_IOR_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_OR_N; newbase = BUILT_IN_ATOMIC_OR_FETCH_N; break; case BIT_XOR_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N; newbase = BUILT_IN_ATOMIC_XOR_FETCH_N; break; default: return false; } /* Make sure the expression is of the proper form. */ if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0)) rhs = gimple_assign_rhs2 (stmt); else if (commutative_tree_code (gimple_assign_rhs_code (stmt)) && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0)) rhs = gimple_assign_rhs1 (stmt); else return false; tmpbase = ((enum built_in_function) ((need_new ? newbase : oldbase) + index + 1)); decl = builtin_decl_explicit (tmpbase); if (decl == NULL_TREE) return false; itype = TREE_TYPE (TREE_TYPE (decl)); imode = TYPE_MODE (itype); /* We could test all of the various optabs involved, but the fact of the matter is that (with the exception of i486 vs i586 and xadd) all targets that support any atomic operaton optab also implements compare-and-swap. Let optabs.c take care of expanding any compare-and-swap loop. */ if (!can_compare_and_swap_p (imode, true) || !can_atomic_load_p (imode)) return false; gsi = gsi_last_nondebug_bb (load_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD); /* OpenMP does not imply any barrier-like semantics on its atomic ops. It only requires that the operation happen atomically. Thus we can use the RELAXED memory model. */ call = build_call_expr_loc (loc, decl, 3, addr, fold_convert_loc (loc, itype, rhs), build_int_cst (NULL, mo)); if (need_old || need_new) { lhs = need_old ? loaded_val : stored_val; call = fold_convert_loc (loc, TREE_TYPE (lhs), call); call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call); } else call = fold_convert_loc (loc, void_type_node, call); force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT); gsi_remove (&gsi, true); gsi = gsi_last_nondebug_bb (store_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE); gsi_remove (&gsi, true); gsi = gsi_last_nondebug_bb (store_bb); stmt = gsi_stmt (gsi); gsi_remove (&gsi, true); if (gimple_in_ssa_p (cfun)) { release_defs (stmt); update_ssa (TODO_update_ssa_no_phi); } return true; } /* A subroutine of expand_omp_atomic. Implement the atomic operation as: oldval = *addr; repeat: newval = rhs; // with oldval replacing *addr in rhs oldval = __sync_val_compare_and_swap (addr, oldval, newval); if (oldval != newval) goto repeat; INDEX is log2 of the size of the data type, and thus usable to find the index of the builtin decl. */ static bool expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb, tree addr, tree loaded_val, tree stored_val, int index) { tree loadedi, storedi, initial, new_storedi, old_vali; tree type, itype, cmpxchg, iaddr, atype; gimple_stmt_iterator si; basic_block loop_header = single_succ (load_bb); gimple *phi, *stmt; edge e; enum built_in_function fncode; /* ??? We need a non-pointer interface to __atomic_compare_exchange in order to use the RELAXED memory model effectively. */ fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N + index + 1); cmpxchg = builtin_decl_explicit (fncode); if (cmpxchg == NULL_TREE) return false; type = TYPE_MAIN_VARIANT (TREE_TYPE (loaded_val)); atype = type; itype = TREE_TYPE (TREE_TYPE (cmpxchg)); if (!can_compare_and_swap_p (TYPE_MODE (itype), true) || !can_atomic_load_p (TYPE_MODE (itype))) return false; /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */ si = gsi_last_nondebug_bb (load_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD); /* For floating-point values, we'll need to view-convert them to integers so that we can perform the atomic compare and swap. Simplify the following code by always setting up the "i"ntegral variables. */ if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type)) { tree iaddr_val; iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode, true)); atype = itype; iaddr_val = force_gimple_operand_gsi (&si, fold_convert (TREE_TYPE (iaddr), addr), false, NULL_TREE, true, GSI_SAME_STMT); stmt = gimple_build_assign (iaddr, iaddr_val); gsi_insert_before (&si, stmt, GSI_SAME_STMT); loadedi = create_tmp_var (itype); if (gimple_in_ssa_p (cfun)) loadedi = make_ssa_name (loadedi); } else { iaddr = addr; loadedi = loaded_val; } fncode = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1); tree loaddecl = builtin_decl_explicit (fncode); if (loaddecl) initial = fold_convert (atype, build_call_expr (loaddecl, 2, iaddr, build_int_cst (NULL_TREE, MEMMODEL_RELAXED))); else { tree off = build_int_cst (build_pointer_type_for_mode (atype, ptr_mode, true), 0); initial = build2 (MEM_REF, atype, iaddr, off); } initial = force_gimple_operand_gsi (&si, initial, true, NULL_TREE, true, GSI_SAME_STMT); /* Move the value to the LOADEDI temporary. */ if (gimple_in_ssa_p (cfun)) { gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header))); phi = create_phi_node (loadedi, loop_header); SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)), initial); } else gsi_insert_before (&si, gimple_build_assign (loadedi, initial), GSI_SAME_STMT); if (loadedi != loaded_val) { gimple_stmt_iterator gsi2; tree x; x = build1 (VIEW_CONVERT_EXPR, type, loadedi); gsi2 = gsi_start_bb (loop_header); if (gimple_in_ssa_p (cfun)) { gassign *stmt; x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE, true, GSI_SAME_STMT); stmt = gimple_build_assign (loaded_val, x); gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT); } else { x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x); force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE, true, GSI_SAME_STMT); } } gsi_remove (&si, true); si = gsi_last_nondebug_bb (store_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE); if (iaddr == addr) storedi = stored_val; else storedi = force_gimple_operand_gsi (&si, build1 (VIEW_CONVERT_EXPR, itype, stored_val), true, NULL_TREE, true, GSI_SAME_STMT); /* Build the compare&swap statement. */ new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi); new_storedi = force_gimple_operand_gsi (&si, fold_convert (TREE_TYPE (loadedi), new_storedi), true, NULL_TREE, true, GSI_SAME_STMT); if (gimple_in_ssa_p (cfun)) old_vali = loadedi; else { old_vali = create_tmp_var (TREE_TYPE (loadedi)); stmt = gimple_build_assign (old_vali, loadedi); gsi_insert_before (&si, stmt, GSI_SAME_STMT); stmt = gimple_build_assign (loadedi, new_storedi); gsi_insert_before (&si, stmt, GSI_SAME_STMT); } /* Note that we always perform the comparison as an integer, even for floating point. This allows the atomic operation to properly succeed even with NaNs and -0.0. */ tree ne = build2 (NE_EXPR, boolean_type_node, new_storedi, old_vali); stmt = gimple_build_cond_empty (ne); gsi_insert_before (&si, stmt, GSI_SAME_STMT); /* Update cfg. */ e = single_succ_edge (store_bb); e->flags &= ~EDGE_FALLTHRU; e->flags |= EDGE_FALSE_VALUE; /* Expect no looping. */ e->probability = profile_probability::guessed_always (); e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE); e->probability = profile_probability::guessed_never (); /* Copy the new value to loadedi (we already did that before the condition if we are not in SSA). */ if (gimple_in_ssa_p (cfun)) { phi = gimple_seq_first_stmt (phi_nodes (loop_header)); SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi); } /* Remove GIMPLE_OMP_ATOMIC_STORE. */ gsi_remove (&si, true); class loop *loop = alloc_loop (); loop->header = loop_header; loop->latch = store_bb; add_loop (loop, loop_header->loop_father); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* A subroutine of expand_omp_atomic. Implement the atomic operation as: GOMP_atomic_start (); *addr = rhs; GOMP_atomic_end (); The result is not globally atomic, but works so long as all parallel references are within #pragma omp atomic directives. According to responses received from omp@openmp.org, appears to be within spec. Which makes sense, since that's how several other compilers handle this situation as well. LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're expanding. STORED_VAL is the operand of the matching GIMPLE_OMP_ATOMIC_STORE. We replace GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with loaded_val = *addr; and replace GIMPLE_OMP_ATOMIC_STORE (stored_val) with *addr = stored_val; */ static bool expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb, tree addr, tree loaded_val, tree stored_val) { gimple_stmt_iterator si; gassign *stmt; tree t; si = gsi_last_nondebug_bb (load_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD); t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START); t = build_call_expr (t, 0); force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT); tree mem = build_simple_mem_ref (addr); TREE_TYPE (mem) = TREE_TYPE (loaded_val); TREE_OPERAND (mem, 1) = fold_convert (build_pointer_type_for_mode (TREE_TYPE (mem), ptr_mode, true), TREE_OPERAND (mem, 1)); stmt = gimple_build_assign (loaded_val, mem); gsi_insert_before (&si, stmt, GSI_SAME_STMT); gsi_remove (&si, true); si = gsi_last_nondebug_bb (store_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE); stmt = gimple_build_assign (unshare_expr (mem), stored_val); gsi_insert_before (&si, stmt, GSI_SAME_STMT); t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END); t = build_call_expr (t, 0); force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT); gsi_remove (&si, true); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand using expand_omp_atomic_fetch_op. If it failed, we try to call expand_omp_atomic_pipeline, and if it fails too, the ultimate fallback is wrapping the operation in a mutex (expand_omp_atomic_mutex). REGION is the atomic region built by build_omp_regions_1(). */ static void expand_omp_atomic (struct omp_region *region) { basic_block load_bb = region->entry, store_bb = region->exit; gomp_atomic_load *load = as_a <gomp_atomic_load *> (last_stmt (load_bb)); gomp_atomic_store *store = as_a <gomp_atomic_store *> (last_stmt (store_bb)); tree loaded_val = gimple_omp_atomic_load_lhs (load); tree addr = gimple_omp_atomic_load_rhs (load); tree stored_val = gimple_omp_atomic_store_val (store); tree type = TYPE_MAIN_VARIANT (TREE_TYPE (loaded_val)); HOST_WIDE_INT index; /* Make sure the type is one of the supported sizes. */ index = tree_to_uhwi (TYPE_SIZE_UNIT (type)); index = exact_log2 (index); if (index >= 0 && index <= 4) { unsigned int align = TYPE_ALIGN_UNIT (type); /* __sync builtins require strict data alignment. */ if (exact_log2 (align) >= index) { /* Atomic load. */ scalar_mode smode; if (loaded_val == stored_val && (is_int_mode (TYPE_MODE (type), &smode) || is_float_mode (TYPE_MODE (type), &smode)) && GET_MODE_BITSIZE (smode) <= BITS_PER_WORD && expand_omp_atomic_load (load_bb, addr, loaded_val, index)) return; /* Atomic store. */ if ((is_int_mode (TYPE_MODE (type), &smode) || is_float_mode (TYPE_MODE (type), &smode)) && GET_MODE_BITSIZE (smode) <= BITS_PER_WORD && store_bb == single_succ (load_bb) && first_stmt (store_bb) == store && expand_omp_atomic_store (load_bb, addr, loaded_val, stored_val, index)) return; /* When possible, use specialized atomic update functions. */ if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)) && store_bb == single_succ (load_bb) && expand_omp_atomic_fetch_op (load_bb, addr, loaded_val, stored_val, index)) return; /* If we don't have specialized __sync builtins, try and implement as a compare and swap loop. */ if (expand_omp_atomic_pipeline (load_bb, store_bb, addr, loaded_val, stored_val, index)) return; } } /* The ultimate fallback is wrapping the operation in a mutex. */ expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val); } /* Mark the loops inside the kernels region starting at REGION_ENTRY and ending at REGION_EXIT. */ static void mark_loops_in_oacc_kernels_region (basic_block region_entry, basic_block region_exit) { class loop *outer = region_entry->loop_father; gcc_assert (region_exit == NULL || outer == region_exit->loop_father); /* Don't parallelize the kernels region if it contains more than one outer loop. */ unsigned int nr_outer_loops = 0; class loop *single_outer = NULL; for (class loop *loop = outer->inner; loop != NULL; loop = loop->next) { gcc_assert (loop_outer (loop) == outer); if (!dominated_by_p (CDI_DOMINATORS, loop->header, region_entry)) continue; if (region_exit != NULL && dominated_by_p (CDI_DOMINATORS, loop->header, region_exit)) continue; nr_outer_loops++; single_outer = loop; } if (nr_outer_loops != 1) return; for (class loop *loop = single_outer->inner; loop != NULL; loop = loop->inner) if (loop->next) return; /* Mark the loops in the region. */ for (class loop *loop = single_outer; loop != NULL; loop = loop->inner) loop->in_oacc_kernels_region = true; } /* Types used to pass grid and wortkgroup sizes to kernel invocation. */ struct GTY(()) grid_launch_attributes_trees { tree kernel_dim_array_type; tree kernel_lattrs_dimnum_decl; tree kernel_lattrs_grid_decl; tree kernel_lattrs_group_decl; tree kernel_launch_attributes_type; }; static GTY(()) struct grid_launch_attributes_trees *grid_attr_trees; /* Create types used to pass kernel launch attributes to target. */ static void grid_create_kernel_launch_attr_types (void) { if (grid_attr_trees) return; grid_attr_trees = ggc_alloc <grid_launch_attributes_trees> (); tree dim_arr_index_type = build_index_type (build_int_cst (integer_type_node, 2)); grid_attr_trees->kernel_dim_array_type = build_array_type (uint32_type_node, dim_arr_index_type); grid_attr_trees->kernel_launch_attributes_type = make_node (RECORD_TYPE); grid_attr_trees->kernel_lattrs_dimnum_decl = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("ndim"), uint32_type_node); DECL_CHAIN (grid_attr_trees->kernel_lattrs_dimnum_decl) = NULL_TREE; grid_attr_trees->kernel_lattrs_grid_decl = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("grid_size"), grid_attr_trees->kernel_dim_array_type); DECL_CHAIN (grid_attr_trees->kernel_lattrs_grid_decl) = grid_attr_trees->kernel_lattrs_dimnum_decl; grid_attr_trees->kernel_lattrs_group_decl = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("group_size"), grid_attr_trees->kernel_dim_array_type); DECL_CHAIN (grid_attr_trees->kernel_lattrs_group_decl) = grid_attr_trees->kernel_lattrs_grid_decl; finish_builtin_struct (grid_attr_trees->kernel_launch_attributes_type, "__gomp_kernel_launch_attributes", grid_attr_trees->kernel_lattrs_group_decl, NULL_TREE); } /* Insert before the current statement in GSI a store of VALUE to INDEX of array (of type kernel_dim_array_type) FLD_DECL of RANGE_VAR. VALUE must be of type uint32_type_node. */ static void grid_insert_store_range_dim (gimple_stmt_iterator *gsi, tree range_var, tree fld_decl, int index, tree value) { tree ref = build4 (ARRAY_REF, uint32_type_node, build3 (COMPONENT_REF, grid_attr_trees->kernel_dim_array_type, range_var, fld_decl, NULL_TREE), build_int_cst (integer_type_node, index), NULL_TREE, NULL_TREE); gsi_insert_before (gsi, gimple_build_assign (ref, value), GSI_SAME_STMT); } /* Return a tree representation of a pointer to a structure with grid and work-group size information. Statements filling that information will be inserted before GSI, TGT_STMT is the target statement which has the necessary information in it. */ static tree grid_get_kernel_launch_attributes (gimple_stmt_iterator *gsi, gomp_target *tgt_stmt) { grid_create_kernel_launch_attr_types (); tree lattrs = create_tmp_var (grid_attr_trees->kernel_launch_attributes_type, "__kernel_launch_attrs"); unsigned max_dim = 0; for (tree clause = gimple_omp_target_clauses (tgt_stmt); clause; clause = OMP_CLAUSE_CHAIN (clause)) { if (OMP_CLAUSE_CODE (clause) != OMP_CLAUSE__GRIDDIM_) continue; unsigned dim = OMP_CLAUSE__GRIDDIM__DIMENSION (clause); max_dim = MAX (dim, max_dim); grid_insert_store_range_dim (gsi, lattrs, grid_attr_trees->kernel_lattrs_grid_decl, dim, OMP_CLAUSE__GRIDDIM__SIZE (clause)); grid_insert_store_range_dim (gsi, lattrs, grid_attr_trees->kernel_lattrs_group_decl, dim, OMP_CLAUSE__GRIDDIM__GROUP (clause)); } tree dimref = build3 (COMPONENT_REF, uint32_type_node, lattrs, grid_attr_trees->kernel_lattrs_dimnum_decl, NULL_TREE); gcc_checking_assert (max_dim <= 2); tree dimensions = build_int_cstu (uint32_type_node, max_dim + 1); gsi_insert_before (gsi, gimple_build_assign (dimref, dimensions), GSI_SAME_STMT); TREE_ADDRESSABLE (lattrs) = 1; return build_fold_addr_expr (lattrs); } /* Build target argument identifier from the DEVICE identifier, value identifier ID and whether the element also has a SUBSEQUENT_PARAM. */ static tree get_target_argument_identifier_1 (int device, bool subseqent_param, int id) { tree t = build_int_cst (integer_type_node, device); if (subseqent_param) t = fold_build2 (BIT_IOR_EXPR, integer_type_node, t, build_int_cst (integer_type_node, GOMP_TARGET_ARG_SUBSEQUENT_PARAM)); t = fold_build2 (BIT_IOR_EXPR, integer_type_node, t, build_int_cst (integer_type_node, id)); return t; } /* Like above but return it in type that can be directly stored as an element of the argument array. */ static tree get_target_argument_identifier (int device, bool subseqent_param, int id) { tree t = get_target_argument_identifier_1 (device, subseqent_param, id); return fold_convert (ptr_type_node, t); } /* Return a target argument consisting of DEVICE identifier, value identifier ID, and the actual VALUE. */ static tree get_target_argument_value (gimple_stmt_iterator *gsi, int device, int id, tree value) { tree t = fold_build2 (LSHIFT_EXPR, integer_type_node, fold_convert (integer_type_node, value), build_int_cst (unsigned_type_node, GOMP_TARGET_ARG_VALUE_SHIFT)); t = fold_build2 (BIT_IOR_EXPR, integer_type_node, t, get_target_argument_identifier_1 (device, false, id)); t = fold_convert (ptr_type_node, t); return force_gimple_operand_gsi (gsi, t, true, NULL, true, GSI_SAME_STMT); } /* If VALUE is an integer constant greater than -2^15 and smaller than 2^15, push one argument to ARGS with both the DEVICE, ID and VALUE embedded in it, otherwise push an identifier (with DEVICE and ID) and the VALUE in two arguments. */ static void push_target_argument_according_to_value (gimple_stmt_iterator *gsi, int device, int id, tree value, vec <tree> *args) { if (tree_fits_shwi_p (value) && tree_to_shwi (value) > -(1 << 15) && tree_to_shwi (value) < (1 << 15)) args->quick_push (get_target_argument_value (gsi, device, id, value)); else { args->quick_push (get_target_argument_identifier (device, true, id)); value = fold_convert (ptr_type_node, value); value = force_gimple_operand_gsi (gsi, value, true, NULL, true, GSI_SAME_STMT); args->quick_push (value); } } /* Create an array of arguments that is then passed to GOMP_target. */ static tree get_target_arguments (gimple_stmt_iterator *gsi, gomp_target *tgt_stmt) { auto_vec <tree, 6> args; tree clauses = gimple_omp_target_clauses (tgt_stmt); tree t, c = omp_find_clause (clauses, OMP_CLAUSE_NUM_TEAMS); if (c) t = OMP_CLAUSE_NUM_TEAMS_EXPR (c); else t = integer_minus_one_node; push_target_argument_according_to_value (gsi, GOMP_TARGET_ARG_DEVICE_ALL, GOMP_TARGET_ARG_NUM_TEAMS, t, &args); c = omp_find_clause (clauses, OMP_CLAUSE_THREAD_LIMIT); if (c) t = OMP_CLAUSE_THREAD_LIMIT_EXPR (c); else t = integer_minus_one_node; push_target_argument_according_to_value (gsi, GOMP_TARGET_ARG_DEVICE_ALL, GOMP_TARGET_ARG_THREAD_LIMIT, t, &args); /* Add HSA-specific grid sizes, if available. */ if (omp_find_clause (gimple_omp_target_clauses (tgt_stmt), OMP_CLAUSE__GRIDDIM_)) { int id = GOMP_TARGET_ARG_HSA_KERNEL_ATTRIBUTES; t = get_target_argument_identifier (GOMP_DEVICE_HSA, true, id); args.quick_push (t); args.quick_push (grid_get_kernel_launch_attributes (gsi, tgt_stmt)); } /* Produce more, perhaps device specific, arguments here. */ tree argarray = create_tmp_var (build_array_type_nelts (ptr_type_node, args.length () + 1), ".omp_target_args"); for (unsigned i = 0; i < args.length (); i++) { tree ref = build4 (ARRAY_REF, ptr_type_node, argarray, build_int_cst (integer_type_node, i), NULL_TREE, NULL_TREE); gsi_insert_before (gsi, gimple_build_assign (ref, args[i]), GSI_SAME_STMT); } tree ref = build4 (ARRAY_REF, ptr_type_node, argarray, build_int_cst (integer_type_node, args.length ()), NULL_TREE, NULL_TREE); gsi_insert_before (gsi, gimple_build_assign (ref, null_pointer_node), GSI_SAME_STMT); TREE_ADDRESSABLE (argarray) = 1; return build_fold_addr_expr (argarray); } /* Expand the GIMPLE_OMP_TARGET starting at REGION. */ static void expand_omp_target (struct omp_region *region) { basic_block entry_bb, exit_bb, new_bb; struct function *child_cfun; tree child_fn, block, t; gimple_stmt_iterator gsi; gomp_target *entry_stmt; gimple *stmt; edge e; bool offloaded; int target_kind; entry_stmt = as_a <gomp_target *> (last_stmt (region->entry)); target_kind = gimple_omp_target_kind (entry_stmt); new_bb = region->entry; offloaded = is_gimple_omp_offloaded (entry_stmt); switch (target_kind) { case GF_OMP_TARGET_KIND_REGION: case GF_OMP_TARGET_KIND_UPDATE: case GF_OMP_TARGET_KIND_ENTER_DATA: case GF_OMP_TARGET_KIND_EXIT_DATA: case GF_OMP_TARGET_KIND_OACC_PARALLEL: case GF_OMP_TARGET_KIND_OACC_KERNELS: case GF_OMP_TARGET_KIND_OACC_SERIAL: case GF_OMP_TARGET_KIND_OACC_UPDATE: case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA: case GF_OMP_TARGET_KIND_OACC_DECLARE: case GF_OMP_TARGET_KIND_DATA: case GF_OMP_TARGET_KIND_OACC_DATA: case GF_OMP_TARGET_KIND_OACC_HOST_DATA: break; default: gcc_unreachable (); } child_fn = NULL_TREE; child_cfun = NULL; if (offloaded) { child_fn = gimple_omp_target_child_fn (entry_stmt); child_cfun = DECL_STRUCT_FUNCTION (child_fn); } /* Supported by expand_omp_taskreg, but not here. */ if (child_cfun != NULL) gcc_checking_assert (!child_cfun->cfg); gcc_checking_assert (!gimple_in_ssa_p (cfun)); entry_bb = region->entry; exit_bb = region->exit; if (target_kind == GF_OMP_TARGET_KIND_OACC_KERNELS) mark_loops_in_oacc_kernels_region (region->entry, region->exit); /* Going on, all OpenACC compute constructs are mapped to 'BUILT_IN_GOACC_PARALLEL', and get their compute regions outlined. To distinguish between them, we attach attributes. */ switch (target_kind) { case GF_OMP_TARGET_KIND_OACC_PARALLEL: DECL_ATTRIBUTES (child_fn) = tree_cons (get_identifier ("oacc parallel"), NULL_TREE, DECL_ATTRIBUTES (child_fn)); break; case GF_OMP_TARGET_KIND_OACC_KERNELS: DECL_ATTRIBUTES (child_fn) = tree_cons (get_identifier ("oacc kernels"), NULL_TREE, DECL_ATTRIBUTES (child_fn)); break; case GF_OMP_TARGET_KIND_OACC_SERIAL: DECL_ATTRIBUTES (child_fn) = tree_cons (get_identifier ("oacc serial"), NULL_TREE, DECL_ATTRIBUTES (child_fn)); break; default: /* Make sure we don't miss any. */ gcc_checking_assert (!(is_gimple_omp_oacc (entry_stmt) && is_gimple_omp_offloaded (entry_stmt))); break; } if (offloaded) { unsigned srcidx, dstidx, num; /* If the offloading region needs data sent from the parent function, then the very first statement (except possible tree profile counter updates) of the offloading body is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since &.OMP_DATA_O is passed as an argument to the child function, we need to replace it with the argument as seen by the child function. In most cases, this will end up being the identity assignment .OMP_DATA_I = .OMP_DATA_I. However, if the offloading body had a function call that has been inlined, the original PARM_DECL .OMP_DATA_I may have been converted into a different local variable. In which case, we need to keep the assignment. */ tree data_arg = gimple_omp_target_data_arg (entry_stmt); if (data_arg) { basic_block entry_succ_bb = single_succ (entry_bb); gimple_stmt_iterator gsi; tree arg; gimple *tgtcopy_stmt = NULL; tree sender = TREE_VEC_ELT (data_arg, 0); for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi)) { gcc_assert (!gsi_end_p (gsi)); stmt = gsi_stmt (gsi); if (gimple_code (stmt) != GIMPLE_ASSIGN) continue; if (gimple_num_ops (stmt) == 2) { tree arg = gimple_assign_rhs1 (stmt); /* We're ignoring the subcode because we're effectively doing a STRIP_NOPS. */ if (TREE_CODE (arg) == ADDR_EXPR && TREE_OPERAND (arg, 0) == sender) { tgtcopy_stmt = stmt; break; } } } gcc_assert (tgtcopy_stmt != NULL); arg = DECL_ARGUMENTS (child_fn); gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg); gsi_remove (&gsi, true); } /* Declare local variables needed in CHILD_CFUN. */ block = DECL_INITIAL (child_fn); BLOCK_VARS (block) = vec2chain (child_cfun->local_decls); /* The gimplifier could record temporaries in the offloading block rather than in containing function's local_decls chain, which would mean cgraph missed finalizing them. Do it now. */ for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t)) if (VAR_P (t) && TREE_STATIC (t) && !DECL_EXTERNAL (t)) varpool_node::finalize_decl (t); DECL_SAVED_TREE (child_fn) = NULL; /* We'll create a CFG for child_fn, so no gimple body is needed. */ gimple_set_body (child_fn, NULL); TREE_USED (block) = 1; /* Reset DECL_CONTEXT on function arguments. */ for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t)) DECL_CONTEXT (t) = child_fn; /* Split ENTRY_BB at GIMPLE_*, so that it can be moved to the child function. */ gsi = gsi_last_nondebug_bb (entry_bb); stmt = gsi_stmt (gsi); gcc_assert (stmt && gimple_code (stmt) == gimple_code (entry_stmt)); e = split_block (entry_bb, stmt); gsi_remove (&gsi, true); entry_bb = e->dest; single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */ if (exit_bb) { gsi = gsi_last_nondebug_bb (exit_bb); gcc_assert (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); stmt = gimple_build_return (NULL); gsi_insert_after (&gsi, stmt, GSI_SAME_STMT); gsi_remove (&gsi, true); } /* Move the offloading region into CHILD_CFUN. */ block = gimple_block (entry_stmt); new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block); if (exit_bb) single_succ_edge (new_bb)->flags = EDGE_FALLTHRU; /* When the OMP expansion process cannot guarantee an up-to-date loop tree arrange for the child function to fixup loops. */ if (loops_state_satisfies_p (LOOPS_NEED_FIXUP)) child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP; /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */ num = vec_safe_length (child_cfun->local_decls); for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++) { t = (*child_cfun->local_decls)[srcidx]; if (DECL_CONTEXT (t) == cfun->decl) continue; if (srcidx != dstidx) (*child_cfun->local_decls)[dstidx] = t; dstidx++; } if (dstidx != num) vec_safe_truncate (child_cfun->local_decls, dstidx); /* Inform the callgraph about the new function. */ child_cfun->curr_properties = cfun->curr_properties; child_cfun->has_simduid_loops |= cfun->has_simduid_loops; child_cfun->has_force_vectorize_loops |= cfun->has_force_vectorize_loops; cgraph_node *node = cgraph_node::get_create (child_fn); node->parallelized_function = 1; cgraph_node::add_new_function (child_fn, true); /* Add the new function to the offload table. */ if (ENABLE_OFFLOADING) { if (in_lto_p) DECL_PRESERVE_P (child_fn) = 1; vec_safe_push (offload_funcs, child_fn); } bool need_asm = DECL_ASSEMBLER_NAME_SET_P (current_function_decl) && !DECL_ASSEMBLER_NAME_SET_P (child_fn); /* Fix the callgraph edges for child_cfun. Those for cfun will be fixed in a following pass. */ push_cfun (child_cfun); if (need_asm) assign_assembler_name_if_needed (child_fn); cgraph_edge::rebuild_edges (); /* Some EH regions might become dead, see PR34608. If pass_cleanup_cfg isn't the first pass to happen with the new child, these dead EH edges might cause problems. Clean them up now. */ if (flag_exceptions) { basic_block bb; bool changed = false; FOR_EACH_BB_FN (bb, cfun) changed |= gimple_purge_dead_eh_edges (bb); if (changed) cleanup_tree_cfg (); } if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP)) verify_loop_structure (); pop_cfun (); if (dump_file && !gimple_in_ssa_p (cfun)) { omp_any_child_fn_dumped = true; dump_function_header (dump_file, child_fn, dump_flags); dump_function_to_file (child_fn, dump_file, dump_flags); } adjust_context_and_scope (region, gimple_block (entry_stmt), child_fn); } /* Emit a library call to launch the offloading region, or do data transfers. */ tree t1, t2, t3, t4, depend, c, clauses; enum built_in_function start_ix; unsigned int flags_i = 0; switch (gimple_omp_target_kind (entry_stmt)) { case GF_OMP_TARGET_KIND_REGION: start_ix = BUILT_IN_GOMP_TARGET; break; case GF_OMP_TARGET_KIND_DATA: start_ix = BUILT_IN_GOMP_TARGET_DATA; break; case GF_OMP_TARGET_KIND_UPDATE: start_ix = BUILT_IN_GOMP_TARGET_UPDATE; break; case GF_OMP_TARGET_KIND_ENTER_DATA: start_ix = BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA; break; case GF_OMP_TARGET_KIND_EXIT_DATA: start_ix = BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA; flags_i |= GOMP_TARGET_FLAG_EXIT_DATA; break; case GF_OMP_TARGET_KIND_OACC_PARALLEL: case GF_OMP_TARGET_KIND_OACC_KERNELS: case GF_OMP_TARGET_KIND_OACC_SERIAL: start_ix = BUILT_IN_GOACC_PARALLEL; break; case GF_OMP_TARGET_KIND_OACC_DATA: case GF_OMP_TARGET_KIND_OACC_HOST_DATA: start_ix = BUILT_IN_GOACC_DATA_START; break; case GF_OMP_TARGET_KIND_OACC_UPDATE: start_ix = BUILT_IN_GOACC_UPDATE; break; case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA: start_ix = BUILT_IN_GOACC_ENTER_EXIT_DATA; break; case GF_OMP_TARGET_KIND_OACC_DECLARE: start_ix = BUILT_IN_GOACC_DECLARE; break; default: gcc_unreachable (); } clauses = gimple_omp_target_clauses (entry_stmt); tree device = NULL_TREE; location_t device_loc = UNKNOWN_LOCATION; tree goacc_flags = NULL_TREE; if (is_gimple_omp_oacc (entry_stmt)) { /* By default, no GOACC_FLAGs are set. */ goacc_flags = integer_zero_node; } else { c = omp_find_clause (clauses, OMP_CLAUSE_DEVICE); if (c) { device = OMP_CLAUSE_DEVICE_ID (c); device_loc = OMP_CLAUSE_LOCATION (c); } else { /* By default, the value of DEVICE is GOMP_DEVICE_ICV (let runtime library choose). */ device = build_int_cst (integer_type_node, GOMP_DEVICE_ICV); device_loc = gimple_location (entry_stmt); } c = omp_find_clause (clauses, OMP_CLAUSE_NOWAIT); if (c) flags_i |= GOMP_TARGET_FLAG_NOWAIT; } /* By default, there is no conditional. */ tree cond = NULL_TREE; c = omp_find_clause (clauses, OMP_CLAUSE_IF); if (c) cond = OMP_CLAUSE_IF_EXPR (c); /* If we found the clause 'if (cond)', build: OpenACC: goacc_flags = (cond ? goacc_flags : flags | GOACC_FLAG_HOST_FALLBACK) OpenMP: device = (cond ? device : GOMP_DEVICE_HOST_FALLBACK) */ if (cond) { tree *tp; if (is_gimple_omp_oacc (entry_stmt)) tp = &goacc_flags; else { /* Ensure 'device' is of the correct type. */ device = fold_convert_loc (device_loc, integer_type_node, device); tp = &device; } cond = gimple_boolify (cond); basic_block cond_bb, then_bb, else_bb; edge e; tree tmp_var; tmp_var = create_tmp_var (TREE_TYPE (*tp)); if (offloaded) e = split_block_after_labels (new_bb); else { gsi = gsi_last_nondebug_bb (new_bb); gsi_prev (&gsi); e = split_block (new_bb, gsi_stmt (gsi)); } cond_bb = e->src; new_bb = e->dest; remove_edge (e); then_bb = create_empty_bb (cond_bb); else_bb = create_empty_bb (then_bb); set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb); set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb); stmt = gimple_build_cond_empty (cond); gsi = gsi_last_bb (cond_bb); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); gsi = gsi_start_bb (then_bb); stmt = gimple_build_assign (tmp_var, *tp); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); gsi = gsi_start_bb (else_bb); if (is_gimple_omp_oacc (entry_stmt)) stmt = gimple_build_assign (tmp_var, BIT_IOR_EXPR, *tp, build_int_cst (integer_type_node, GOACC_FLAG_HOST_FALLBACK)); else stmt = gimple_build_assign (tmp_var, build_int_cst (integer_type_node, GOMP_DEVICE_HOST_FALLBACK)); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE); make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE); add_bb_to_loop (then_bb, cond_bb->loop_father); add_bb_to_loop (else_bb, cond_bb->loop_father); make_edge (then_bb, new_bb, EDGE_FALLTHRU); make_edge (else_bb, new_bb, EDGE_FALLTHRU); *tp = tmp_var; gsi = gsi_last_nondebug_bb (new_bb); } else { gsi = gsi_last_nondebug_bb (new_bb); if (device != NULL_TREE) device = force_gimple_operand_gsi (&gsi, device, true, NULL_TREE, true, GSI_SAME_STMT); } t = gimple_omp_target_data_arg (entry_stmt); if (t == NULL) { t1 = size_zero_node; t2 = build_zero_cst (ptr_type_node); t3 = t2; t4 = t2; } else { t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1)))); t1 = size_binop (PLUS_EXPR, t1, size_int (1)); t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0)); t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1)); t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2)); } gimple *g; bool tagging = false; /* The maximum number used by any start_ix, without varargs. */ auto_vec<tree, 11> args; if (is_gimple_omp_oacc (entry_stmt)) { tree goacc_flags_m = fold_build1 (GOACC_FLAGS_MARSHAL_OP, TREE_TYPE (goacc_flags), goacc_flags); goacc_flags_m = force_gimple_operand_gsi (&gsi, goacc_flags_m, true, NULL_TREE, true, GSI_SAME_STMT); args.quick_push (goacc_flags_m); } else args.quick_push (device); if (offloaded) args.quick_push (build_fold_addr_expr (child_fn)); args.quick_push (t1); args.quick_push (t2); args.quick_push (t3); args.quick_push (t4); switch (start_ix) { case BUILT_IN_GOACC_DATA_START: case BUILT_IN_GOACC_DECLARE: case BUILT_IN_GOMP_TARGET_DATA: break; case BUILT_IN_GOMP_TARGET: case BUILT_IN_GOMP_TARGET_UPDATE: case BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA: args.quick_push (build_int_cst (unsigned_type_node, flags_i)); c = omp_find_clause (clauses, OMP_CLAUSE_DEPEND); if (c) depend = OMP_CLAUSE_DECL (c); else depend = build_int_cst (ptr_type_node, 0); args.quick_push (depend); if (start_ix == BUILT_IN_GOMP_TARGET) args.quick_push (get_target_arguments (&gsi, entry_stmt)); break; case BUILT_IN_GOACC_PARALLEL: if (lookup_attribute ("oacc serial", DECL_ATTRIBUTES (child_fn)) != NULL) { tree dims = NULL_TREE; unsigned int ix; /* For serial constructs we set all dimensions to 1. */ for (ix = GOMP_DIM_MAX; ix--;) dims = tree_cons (NULL_TREE, integer_one_node, dims); oacc_replace_fn_attrib (child_fn, dims); } else oacc_set_fn_attrib (child_fn, clauses, &args); tagging = true; /* FALLTHRU */ case BUILT_IN_GOACC_ENTER_EXIT_DATA: case BUILT_IN_GOACC_UPDATE: { tree t_async = NULL_TREE; /* If present, use the value specified by the respective clause, making sure that is of the correct type. */ c = omp_find_clause (clauses, OMP_CLAUSE_ASYNC); if (c) t_async = fold_convert_loc (OMP_CLAUSE_LOCATION (c), integer_type_node, OMP_CLAUSE_ASYNC_EXPR (c)); else if (!tagging) /* Default values for t_async. */ t_async = fold_convert_loc (gimple_location (entry_stmt), integer_type_node, build_int_cst (integer_type_node, GOMP_ASYNC_SYNC)); if (tagging && t_async) { unsigned HOST_WIDE_INT i_async = GOMP_LAUNCH_OP_MAX; if (TREE_CODE (t_async) == INTEGER_CST) { /* See if we can pack the async arg in to the tag's operand. */ i_async = TREE_INT_CST_LOW (t_async); if (i_async < GOMP_LAUNCH_OP_MAX) t_async = NULL_TREE; else i_async = GOMP_LAUNCH_OP_MAX; } args.safe_push (oacc_launch_pack (GOMP_LAUNCH_ASYNC, NULL_TREE, i_async)); } if (t_async) args.safe_push (force_gimple_operand_gsi (&gsi, t_async, true, NULL_TREE, true, GSI_SAME_STMT)); /* Save the argument index, and ... */ unsigned t_wait_idx = args.length (); unsigned num_waits = 0; c = omp_find_clause (clauses, OMP_CLAUSE_WAIT); if (!tagging || c) /* ... push a placeholder. */ args.safe_push (integer_zero_node); for (; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_WAIT) { tree arg = fold_convert_loc (OMP_CLAUSE_LOCATION (c), integer_type_node, OMP_CLAUSE_WAIT_EXPR (c)); arg = force_gimple_operand_gsi (&gsi, arg, true, NULL_TREE, true, GSI_SAME_STMT); args.safe_push (arg); num_waits++; } if (!tagging || num_waits) { tree len; /* Now that we know the number, update the placeholder. */ if (tagging) len = oacc_launch_pack (GOMP_LAUNCH_WAIT, NULL_TREE, num_waits); else len = build_int_cst (integer_type_node, num_waits); len = fold_convert_loc (gimple_location (entry_stmt), unsigned_type_node, len); args[t_wait_idx] = len; } } break; default: gcc_unreachable (); } if (tagging) /* Push terminal marker - zero. */ args.safe_push (oacc_launch_pack (0, NULL_TREE, 0)); g = gimple_build_call_vec (builtin_decl_explicit (start_ix), args); gimple_set_location (g, gimple_location (entry_stmt)); gsi_insert_before (&gsi, g, GSI_SAME_STMT); if (!offloaded) { g = gsi_stmt (gsi); gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET); gsi_remove (&gsi, true); } } /* Expand KFOR loop as a HSA grifidied kernel, i.e. as a body only with iteration variable derived from the thread number. INTRA_GROUP means this is an expansion of a loop iterating over work-items within a separate iteration over groups. */ static void grid_expand_omp_for_loop (struct omp_region *kfor, bool intra_group) { gimple_stmt_iterator gsi; gomp_for *for_stmt = as_a <gomp_for *> (last_stmt (kfor->entry)); gcc_checking_assert (gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_GRID_LOOP); size_t collapse = gimple_omp_for_collapse (for_stmt); struct omp_for_data_loop *loops = XALLOCAVEC (struct omp_for_data_loop, gimple_omp_for_collapse (for_stmt)); struct omp_for_data fd; remove_edge (BRANCH_EDGE (kfor->entry)); basic_block body_bb = FALLTHRU_EDGE (kfor->entry)->dest; gcc_assert (kfor->cont); omp_extract_for_data (for_stmt, &fd, loops); gsi = gsi_start_bb (body_bb); for (size_t dim = 0; dim < collapse; dim++) { tree type, itype; itype = type = TREE_TYPE (fd.loops[dim].v); if (POINTER_TYPE_P (type)) itype = signed_type_for (type); tree n1 = fd.loops[dim].n1; tree step = fd.loops[dim].step; n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1), true, NULL_TREE, true, GSI_SAME_STMT); step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step), true, NULL_TREE, true, GSI_SAME_STMT); tree threadid; if (gimple_omp_for_grid_group_iter (for_stmt)) { gcc_checking_assert (!intra_group); threadid = build_call_expr (builtin_decl_explicit (BUILT_IN_HSA_WORKGROUPID), 1, build_int_cstu (unsigned_type_node, dim)); } else if (intra_group) threadid = build_call_expr (builtin_decl_explicit (BUILT_IN_HSA_WORKITEMID), 1, build_int_cstu (unsigned_type_node, dim)); else threadid = build_call_expr (builtin_decl_explicit (BUILT_IN_HSA_WORKITEMABSID), 1, build_int_cstu (unsigned_type_node, dim)); threadid = fold_convert (itype, threadid); threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE, true, GSI_SAME_STMT); tree startvar = fd.loops[dim].v; tree t = fold_build2 (MULT_EXPR, itype, threadid, step); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (n1, t); else t = fold_build2 (PLUS_EXPR, type, t, n1); t = fold_convert (type, t); t = force_gimple_operand_gsi (&gsi, t, DECL_P (startvar) && TREE_ADDRESSABLE (startvar), NULL_TREE, true, GSI_SAME_STMT); gassign *assign_stmt = gimple_build_assign (startvar, t); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); } /* Remove the omp for statement. */ gsi = gsi_last_nondebug_bb (kfor->entry); gsi_remove (&gsi, true); /* Remove the GIMPLE_OMP_CONTINUE statement. */ gsi = gsi_last_nondebug_bb (kfor->cont); gcc_assert (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_CONTINUE); gsi_remove (&gsi, true); /* Replace the GIMPLE_OMP_RETURN with a barrier, if necessary. */ gsi = gsi_last_nondebug_bb (kfor->exit); gcc_assert (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); if (intra_group) gsi_insert_before (&gsi, omp_build_barrier (NULL_TREE), GSI_SAME_STMT); gsi_remove (&gsi, true); /* Fixup the much simpler CFG. */ remove_edge (find_edge (kfor->cont, body_bb)); if (kfor->cont != body_bb) set_immediate_dominator (CDI_DOMINATORS, kfor->cont, body_bb); set_immediate_dominator (CDI_DOMINATORS, kfor->exit, kfor->cont); } /* Structure passed to grid_remap_kernel_arg_accesses so that it can remap argument_decls. */ struct grid_arg_decl_map { tree old_arg; tree new_arg; }; /* Invoked through walk_gimple_op, will remap all PARM_DECLs to the ones pertaining to kernel function. */ static tree grid_remap_kernel_arg_accesses (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = (struct walk_stmt_info *) data; struct grid_arg_decl_map *adm = (struct grid_arg_decl_map *) wi->info; tree t = *tp; if (t == adm->old_arg) *tp = adm->new_arg; *walk_subtrees = !TYPE_P (t) && !DECL_P (t); return NULL_TREE; } /* If TARGET region contains a kernel body for loop, remove its region from the TARGET and expand it in HSA gridified kernel fashion. */ static void grid_expand_target_grid_body (struct omp_region *target) { if (!hsa_gen_requested_p ()) return; gomp_target *tgt_stmt = as_a <gomp_target *> (last_stmt (target->entry)); struct omp_region **pp; for (pp = &target->inner; *pp; pp = &(*pp)->next) if ((*pp)->type == GIMPLE_OMP_GRID_BODY) break; struct omp_region *gpukernel = *pp; tree orig_child_fndecl = gimple_omp_target_child_fn (tgt_stmt); if (!gpukernel) { /* HSA cannot handle OACC stuff. */ if (gimple_omp_target_kind (tgt_stmt) != GF_OMP_TARGET_KIND_REGION) return; gcc_checking_assert (orig_child_fndecl); gcc_assert (!omp_find_clause (gimple_omp_target_clauses (tgt_stmt), OMP_CLAUSE__GRIDDIM_)); cgraph_node *n = cgraph_node::get (orig_child_fndecl); hsa_register_kernel (n); return; } gcc_assert (omp_find_clause (gimple_omp_target_clauses (tgt_stmt), OMP_CLAUSE__GRIDDIM_)); tree inside_block = gimple_block (first_stmt (single_succ (gpukernel->entry))); *pp = gpukernel->next; for (pp = &gpukernel->inner; *pp; pp = &(*pp)->next) if ((*pp)->type == GIMPLE_OMP_FOR) break; struct omp_region *kfor = *pp; gcc_assert (kfor); gomp_for *for_stmt = as_a <gomp_for *> (last_stmt (kfor->entry)); gcc_assert (gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_GRID_LOOP); *pp = kfor->next; if (kfor->inner) { if (gimple_omp_for_grid_group_iter (for_stmt)) { struct omp_region **next_pp; for (pp = &kfor->inner; *pp; pp = next_pp) { next_pp = &(*pp)->next; if ((*pp)->type != GIMPLE_OMP_FOR) continue; gomp_for *inner = as_a <gomp_for *> (last_stmt ((*pp)->entry)); gcc_assert (gimple_omp_for_kind (inner) == GF_OMP_FOR_KIND_GRID_LOOP); grid_expand_omp_for_loop (*pp, true); *pp = (*pp)->next; next_pp = pp; } } expand_omp (kfor->inner); } if (gpukernel->inner) expand_omp (gpukernel->inner); tree kern_fndecl = copy_node (orig_child_fndecl); DECL_NAME (kern_fndecl) = clone_function_name_numbered (kern_fndecl, "kernel"); SET_DECL_ASSEMBLER_NAME (kern_fndecl, DECL_NAME (kern_fndecl)); tree tgtblock = gimple_block (tgt_stmt); tree fniniblock = make_node (BLOCK); BLOCK_ABSTRACT_ORIGIN (fniniblock) = BLOCK_ORIGIN (tgtblock); BLOCK_SOURCE_LOCATION (fniniblock) = BLOCK_SOURCE_LOCATION (tgtblock); BLOCK_SOURCE_END_LOCATION (fniniblock) = BLOCK_SOURCE_END_LOCATION (tgtblock); BLOCK_SUPERCONTEXT (fniniblock) = kern_fndecl; DECL_INITIAL (kern_fndecl) = fniniblock; push_struct_function (kern_fndecl); cfun->function_end_locus = gimple_location (tgt_stmt); init_tree_ssa (cfun); pop_cfun (); tree old_parm_decl = DECL_ARGUMENTS (kern_fndecl); gcc_assert (!DECL_CHAIN (old_parm_decl)); tree new_parm_decl = copy_node (DECL_ARGUMENTS (kern_fndecl)); DECL_CONTEXT (new_parm_decl) = kern_fndecl; DECL_ARGUMENTS (kern_fndecl) = new_parm_decl; gcc_assert (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (kern_fndecl)))); DECL_RESULT (kern_fndecl) = copy_node (DECL_RESULT (kern_fndecl)); DECL_CONTEXT (DECL_RESULT (kern_fndecl)) = kern_fndecl; struct function *kern_cfun = DECL_STRUCT_FUNCTION (kern_fndecl); kern_cfun->curr_properties = cfun->curr_properties; grid_expand_omp_for_loop (kfor, false); /* Remove the omp for statement. */ gimple_stmt_iterator gsi = gsi_last_nondebug_bb (gpukernel->entry); gsi_remove (&gsi, true); /* Replace the GIMPLE_OMP_RETURN at the end of the kernel region with a real return. */ gsi = gsi_last_nondebug_bb (gpukernel->exit); gcc_assert (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); gimple *ret_stmt = gimple_build_return (NULL); gsi_insert_after (&gsi, ret_stmt, GSI_SAME_STMT); gsi_remove (&gsi, true); /* Statements in the first BB in the target construct have been produced by target lowering and must be copied inside the GPUKERNEL, with the two exceptions of the first OMP statement and the OMP_DATA assignment statement. */ gsi = gsi_start_bb (single_succ (gpukernel->entry)); tree data_arg = gimple_omp_target_data_arg (tgt_stmt); tree sender = data_arg ? TREE_VEC_ELT (data_arg, 0) : NULL; for (gimple_stmt_iterator tsi = gsi_start_bb (single_succ (target->entry)); !gsi_end_p (tsi); gsi_next (&tsi)) { gimple *stmt = gsi_stmt (tsi); if (is_gimple_omp (stmt)) break; if (sender && is_gimple_assign (stmt) && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR && TREE_OPERAND (gimple_assign_rhs1 (stmt), 0) == sender) continue; gimple *copy = gimple_copy (stmt); gsi_insert_before (&gsi, copy, GSI_SAME_STMT); gimple_set_block (copy, fniniblock); } move_sese_region_to_fn (kern_cfun, single_succ (gpukernel->entry), gpukernel->exit, inside_block); cgraph_node *kcn = cgraph_node::get_create (kern_fndecl); kcn->mark_force_output (); cgraph_node *orig_child = cgraph_node::get (orig_child_fndecl); hsa_register_kernel (kcn, orig_child); cgraph_node::add_new_function (kern_fndecl, true); push_cfun (kern_cfun); cgraph_edge::rebuild_edges (); /* Re-map any mention of the PARM_DECL of the original function to the PARM_DECL of the new one. TODO: It would be great if lowering produced references into the GPU kernel decl straight away and we did not have to do this. */ struct grid_arg_decl_map adm; adm.old_arg = old_parm_decl; adm.new_arg = new_parm_decl; basic_block bb; FOR_EACH_BB_FN (bb, kern_cfun) { for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); wi.info = &adm; walk_gimple_op (stmt, grid_remap_kernel_arg_accesses, &wi); } } pop_cfun (); return; } /* Expand the parallel region tree rooted at REGION. Expansion proceeds in depth-first order. Innermost regions are expanded first. This way, parallel regions that require a new function to be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any internal dependencies in their body. */ static void expand_omp (struct omp_region *region) { omp_any_child_fn_dumped = false; while (region) { location_t saved_location; gimple *inner_stmt = NULL; /* First, determine whether this is a combined parallel+workshare region. */ if (region->type == GIMPLE_OMP_PARALLEL) determine_parallel_type (region); else if (region->type == GIMPLE_OMP_TARGET) grid_expand_target_grid_body (region); if (region->type == GIMPLE_OMP_FOR && gimple_omp_for_combined_p (last_stmt (region->entry))) inner_stmt = last_stmt (region->inner->entry); if (region->inner) expand_omp (region->inner); saved_location = input_location; if (gimple_has_location (last_stmt (region->entry))) input_location = gimple_location (last_stmt (region->entry)); switch (region->type) { case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: expand_omp_taskreg (region); break; case GIMPLE_OMP_FOR: expand_omp_for (region, inner_stmt); break; case GIMPLE_OMP_SECTIONS: expand_omp_sections (region); break; case GIMPLE_OMP_SECTION: /* Individual omp sections are handled together with their parent GIMPLE_OMP_SECTIONS region. */ break; case GIMPLE_OMP_SINGLE: expand_omp_single (region); break; case GIMPLE_OMP_ORDERED: { gomp_ordered *ord_stmt = as_a <gomp_ordered *> (last_stmt (region->entry)); if (omp_find_clause (gimple_omp_ordered_clauses (ord_stmt), OMP_CLAUSE_DEPEND)) { /* We'll expand these when expanding corresponding worksharing region with ordered(n) clause. */ gcc_assert (region->outer && region->outer->type == GIMPLE_OMP_FOR); region->ord_stmt = ord_stmt; break; } } /* FALLTHRU */ case GIMPLE_OMP_MASTER: case GIMPLE_OMP_TASKGROUP: case GIMPLE_OMP_CRITICAL: case GIMPLE_OMP_TEAMS: expand_omp_synch (region); break; case GIMPLE_OMP_ATOMIC_LOAD: expand_omp_atomic (region); break; case GIMPLE_OMP_TARGET: expand_omp_target (region); break; default: gcc_unreachable (); } input_location = saved_location; region = region->next; } if (omp_any_child_fn_dumped) { if (dump_file) dump_function_header (dump_file, current_function_decl, dump_flags); omp_any_child_fn_dumped = false; } } /* Helper for build_omp_regions. Scan the dominator tree starting at block BB. PARENT is the region that contains BB. If SINGLE_TREE is true, the function ends once a single tree is built (otherwise, whole forest of OMP constructs may be built). */ static void build_omp_regions_1 (basic_block bb, struct omp_region *parent, bool single_tree) { gimple_stmt_iterator gsi; gimple *stmt; basic_block son; gsi = gsi_last_nondebug_bb (bb); if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi))) { struct omp_region *region; enum gimple_code code; stmt = gsi_stmt (gsi); code = gimple_code (stmt); if (code == GIMPLE_OMP_RETURN) { /* STMT is the return point out of region PARENT. Mark it as the exit point and make PARENT the immediately enclosing region. */ gcc_assert (parent); region = parent; region->exit = bb; parent = parent->outer; } else if (code == GIMPLE_OMP_ATOMIC_STORE) { /* GIMPLE_OMP_ATOMIC_STORE is analogous to GIMPLE_OMP_RETURN, but matches with GIMPLE_OMP_ATOMIC_LOAD. */ gcc_assert (parent); gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD); region = parent; region->exit = bb; parent = parent->outer; } else if (code == GIMPLE_OMP_CONTINUE) { gcc_assert (parent); parent->cont = bb; } else if (code == GIMPLE_OMP_SECTIONS_SWITCH) { /* GIMPLE_OMP_SECTIONS_SWITCH is part of GIMPLE_OMP_SECTIONS, and we do nothing for it. */ } else { region = new_omp_region (bb, code, parent); /* Otherwise... */ if (code == GIMPLE_OMP_TARGET) { switch (gimple_omp_target_kind (stmt)) { case GF_OMP_TARGET_KIND_REGION: case GF_OMP_TARGET_KIND_OACC_PARALLEL: case GF_OMP_TARGET_KIND_OACC_KERNELS: case GF_OMP_TARGET_KIND_OACC_SERIAL: break; case GF_OMP_TARGET_KIND_UPDATE: case GF_OMP_TARGET_KIND_ENTER_DATA: case GF_OMP_TARGET_KIND_EXIT_DATA: case GF_OMP_TARGET_KIND_DATA: case GF_OMP_TARGET_KIND_OACC_DATA: case GF_OMP_TARGET_KIND_OACC_HOST_DATA: case GF_OMP_TARGET_KIND_OACC_UPDATE: case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA: case GF_OMP_TARGET_KIND_OACC_DECLARE: /* ..., other than for those stand-alone directives... */ region = NULL; break; default: gcc_unreachable (); } } else if (code == GIMPLE_OMP_ORDERED && omp_find_clause (gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt)), OMP_CLAUSE_DEPEND)) /* #pragma omp ordered depend is also just a stand-alone directive. */ region = NULL; else if (code == GIMPLE_OMP_TASK && gimple_omp_task_taskwait_p (stmt)) /* #pragma omp taskwait depend(...) is a stand-alone directive. */ region = NULL; /* ..., this directive becomes the parent for a new region. */ if (region) parent = region; } } if (single_tree && !parent) return; for (son = first_dom_son (CDI_DOMINATORS, bb); son; son = next_dom_son (CDI_DOMINATORS, son)) build_omp_regions_1 (son, parent, single_tree); } /* Builds the tree of OMP regions rooted at ROOT, storing it to root_omp_region. */ static void build_omp_regions_root (basic_block root) { gcc_assert (root_omp_region == NULL); build_omp_regions_1 (root, NULL, true); gcc_assert (root_omp_region != NULL); } /* Expands omp construct (and its subconstructs) starting in HEAD. */ void omp_expand_local (basic_block head) { build_omp_regions_root (head); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\nOMP region tree\n\n"); dump_omp_region (dump_file, root_omp_region, 0); fprintf (dump_file, "\n"); } remove_exit_barriers (root_omp_region); expand_omp (root_omp_region); omp_free_regions (); } /* Scan the CFG and build a tree of OMP regions. Return the root of the OMP region tree. */ static void build_omp_regions (void) { gcc_assert (root_omp_region == NULL); calculate_dominance_info (CDI_DOMINATORS); build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, false); } /* Main entry point for expanding OMP-GIMPLE into runtime calls. */ static unsigned int execute_expand_omp (void) { build_omp_regions (); if (!root_omp_region) return 0; if (dump_file) { fprintf (dump_file, "\nOMP region tree\n\n"); dump_omp_region (dump_file, root_omp_region, 0); fprintf (dump_file, "\n"); } remove_exit_barriers (root_omp_region); expand_omp (root_omp_region); if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP)) verify_loop_structure (); cleanup_tree_cfg (); omp_free_regions (); return 0; } /* OMP expansion -- the default pass, run before creation of SSA form. */ namespace { const pass_data pass_data_expand_omp = { GIMPLE_PASS, /* type */ "ompexp", /* name */ OPTGROUP_OMP, /* optinfo_flags */ TV_NONE, /* tv_id */ PROP_gimple_any, /* properties_required */ PROP_gimple_eomp, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ }; class pass_expand_omp : public gimple_opt_pass { public: pass_expand_omp (gcc::context *ctxt) : gimple_opt_pass (pass_data_expand_omp, ctxt) {} /* opt_pass methods: */ virtual unsigned int execute (function *) { bool gate = ((flag_openacc != 0 || flag_openmp != 0 || flag_openmp_simd != 0) && !seen_error ()); /* This pass always runs, to provide PROP_gimple_eomp. But often, there is nothing to do. */ if (!gate) return 0; return execute_expand_omp (); } }; // class pass_expand_omp } // anon namespace gimple_opt_pass * make_pass_expand_omp (gcc::context *ctxt) { return new pass_expand_omp (ctxt); } namespace { const pass_data pass_data_expand_omp_ssa = { GIMPLE_PASS, /* type */ "ompexpssa", /* name */ OPTGROUP_OMP, /* optinfo_flags */ TV_NONE, /* tv_id */ PROP_cfg | PROP_ssa, /* properties_required */ PROP_gimple_eomp, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_cleanup_cfg | TODO_rebuild_alias, /* todo_flags_finish */ }; class pass_expand_omp_ssa : public gimple_opt_pass { public: pass_expand_omp_ssa (gcc::context *ctxt) : gimple_opt_pass (pass_data_expand_omp_ssa, ctxt) {} /* opt_pass methods: */ virtual bool gate (function *fun) { return !(fun->curr_properties & PROP_gimple_eomp); } virtual unsigned int execute (function *) { return execute_expand_omp (); } opt_pass * clone () { return new pass_expand_omp_ssa (m_ctxt); } }; // class pass_expand_omp_ssa } // anon namespace gimple_opt_pass * make_pass_expand_omp_ssa (gcc::context *ctxt) { return new pass_expand_omp_ssa (ctxt); } /* Called from tree-cfg.c::make_edges to create cfg edges for all relevant GIMPLE_* codes. */ bool omp_make_gimple_edges (basic_block bb, struct omp_region **region, int *region_idx) { gimple *last = last_stmt (bb); enum gimple_code code = gimple_code (last); struct omp_region *cur_region = *region; bool fallthru = false; switch (code) { case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_FOR: case GIMPLE_OMP_SINGLE: case GIMPLE_OMP_TEAMS: case GIMPLE_OMP_MASTER: case GIMPLE_OMP_TASKGROUP: case GIMPLE_OMP_CRITICAL: case GIMPLE_OMP_SECTION: case GIMPLE_OMP_GRID_BODY: cur_region = new_omp_region (bb, code, cur_region); fallthru = true; break; case GIMPLE_OMP_TASK: cur_region = new_omp_region (bb, code, cur_region); fallthru = true; if (gimple_omp_task_taskwait_p (last)) cur_region = cur_region->outer; break; case GIMPLE_OMP_ORDERED: cur_region = new_omp_region (bb, code, cur_region); fallthru = true; if (omp_find_clause (gimple_omp_ordered_clauses (as_a <gomp_ordered *> (last)), OMP_CLAUSE_DEPEND)) cur_region = cur_region->outer; break; case GIMPLE_OMP_TARGET: cur_region = new_omp_region (bb, code, cur_region); fallthru = true; switch (gimple_omp_target_kind (last)) { case GF_OMP_TARGET_KIND_REGION: case GF_OMP_TARGET_KIND_OACC_PARALLEL: case GF_OMP_TARGET_KIND_OACC_KERNELS: case GF_OMP_TARGET_KIND_OACC_SERIAL: break; case GF_OMP_TARGET_KIND_UPDATE: case GF_OMP_TARGET_KIND_ENTER_DATA: case GF_OMP_TARGET_KIND_EXIT_DATA: case GF_OMP_TARGET_KIND_DATA: case GF_OMP_TARGET_KIND_OACC_DATA: case GF_OMP_TARGET_KIND_OACC_HOST_DATA: case GF_OMP_TARGET_KIND_OACC_UPDATE: case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA: case GF_OMP_TARGET_KIND_OACC_DECLARE: cur_region = cur_region->outer; break; default: gcc_unreachable (); } break; case GIMPLE_OMP_SECTIONS: cur_region = new_omp_region (bb, code, cur_region); fallthru = true; break; case GIMPLE_OMP_SECTIONS_SWITCH: fallthru = false; break; case GIMPLE_OMP_ATOMIC_LOAD: case GIMPLE_OMP_ATOMIC_STORE: fallthru = true; break; case GIMPLE_OMP_RETURN: /* In the case of a GIMPLE_OMP_SECTION, the edge will go somewhere other than the next block. This will be created later. */ cur_region->exit = bb; if (cur_region->type == GIMPLE_OMP_TASK) /* Add an edge corresponding to not scheduling the task immediately. */ make_edge (cur_region->entry, bb, EDGE_ABNORMAL); fallthru = cur_region->type != GIMPLE_OMP_SECTION; cur_region = cur_region->outer; break; case GIMPLE_OMP_CONTINUE: cur_region->cont = bb; switch (cur_region->type) { case GIMPLE_OMP_FOR: /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE succs edges as abnormal to prevent splitting them. */ single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL; /* Make the loopback edge. */ make_edge (bb, single_succ (cur_region->entry), EDGE_ABNORMAL); /* Create an edge from GIMPLE_OMP_FOR to exit, which corresponds to the case that the body of the loop is not executed at all. */ make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL); make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL); fallthru = false; break; case GIMPLE_OMP_SECTIONS: /* Wire up the edges into and out of the nested sections. */ { basic_block switch_bb = single_succ (cur_region->entry); struct omp_region *i; for (i = cur_region->inner; i ; i = i->next) { gcc_assert (i->type == GIMPLE_OMP_SECTION); make_edge (switch_bb, i->entry, 0); make_edge (i->exit, bb, EDGE_FALLTHRU); } /* Make the loopback edge to the block with GIMPLE_OMP_SECTIONS_SWITCH. */ make_edge (bb, switch_bb, 0); /* Make the edge from the switch to exit. */ make_edge (switch_bb, bb->next_bb, 0); fallthru = false; } break; case GIMPLE_OMP_TASK: fallthru = true; break; default: gcc_unreachable (); } break; default: gcc_unreachable (); } if (*region != cur_region) { *region = cur_region; if (cur_region) *region_idx = cur_region->entry->index; else *region_idx = 0; } return fallthru; } #include "gt-omp-expand.h"
test.c
#include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif int main(int argv, char* argc) { int inode = 1, nnode = 1; #ifdef _OPENMP nnode = omp_get_num_procs(); omp_set_num_threads(nnode); #endif printf ("Found %d CPUs. Using all of them!\n", nnode); #pragma omp parallel private(inode) { #ifdef _OPENMP inode = omp_get_thread_num(); #endif printf ("Hello world from node %d!\n", inode); } return 0; }
vc5.c
#define A(a, x, y, z) (a[(z) * ny * nx + (y) * nx + x]) static void inner(const float *restrict const f, float *restrict const fp, const int nx, const int ny, const int nz, const int nxi, const float *restrict const model_padded2_dt2, const float dt, const float *restrict const sources, const int *restrict const sources_x, const int *restrict const sources_y, const int *restrict const sources_z, const int num_sources, const int source_len, const float *restrict const fd_coeff, const int step) { int x; int y; int z; int i; int sx; int sy; int sz; float f_xx; #pragma omp parallel for default(none) private(y, x, f_xx, i) for (z = 8; z < nz - 8; z++) { for (y = 8; y < ny - 8; y++) { for (x = 8; x < nxi + 8; x++) { f_xx = 3 * fd_coeff[0] * A(f, x, y, z); for (i = 1; i < 9; i++) { f_xx += fd_coeff[i] * (A(f, x + i, y, z) + A(f, x - i, y, z) + A(f, x, y + i, z) + A(f, x, y - i, z) + A(f, x, y, z + i) + A(f, x, y, z - i)); } A(fp, x, y, z) = A(model_padded2_dt2, x, y, z) * f_xx + 2 * A(f, x, y, z) - A(fp, x, y, z); } } } for (i = 0; i < num_sources; i++) { sx = sources_x[i] + 8; sy = sources_y[i] + 8; sz = sources_z[i] + 8; A(fp, sx, sy, sz) += A(model_padded2_dt2, sx, sy, sz) * sources[i * source_len + step] * dt; } } void step(float *restrict f, float *restrict fp, const int nx, const int ny, const int nz, const int nxi, const float *restrict const model_padded2_dt2, const float dx, const float dt, const float *restrict const sources, const int *restrict const sources_x, const int *restrict const sources_y, const int *restrict const sources_z, const int num_sources, const int source_len, const int num_steps) { int step; float *tmp; float fd_coeff[9] = { -924708642.0f / 302702400 / (dx * dx), 538137600.0f / 302702400 / (dx * dx), -94174080.0f / 302702400 / (dx * dx), 22830080.0f / 302702400 / (dx * dx), -5350800.0f / 302702400 / (dx * dx), 1053696.0f / 302702400 / (dx * dx), -156800.0f / 302702400 / (dx * dx), 15360.0f / 302702400 / (dx * dx), -735.0f / 302702400 / (dx * dx) }; for (step = 0; step < num_steps; step++) { inner(f, fp, nx, ny, nz, nxi, model_padded2_dt2, dt, sources, sources_x, sources_y, sources_z, num_sources, source_len, fd_coeff, step); tmp = f; f = fp; fp = tmp; } }
averaging.c
/* This source file is part of the Geophysical Fluids Modeling Framework (GAME), which is released under the MIT license. Github repository: https://github.com/OpenNWP/GAME */ /* This file contains functions that perform averagings. */ #include <stdio.h> #include <geos95.h> #include "../game_types.h" int remap_verpri2horpri_vector(Vector_field vector_field, int layer_index, int h_index, double *component, Grid *grid) { /* reconstructs the vertical vector component *component at edge h_index in layer layer_index */ *component // layer above = grid -> inner_product_weights[8*(layer_index*NO_OF_SCALARS_H + grid -> from_index[h_index]) + 6] *vector_field[layer_index*NO_OF_VECTORS_PER_LAYER + grid -> from_index[h_index]]; *component += grid -> inner_product_weights[8*(layer_index*NO_OF_SCALARS_H + grid -> to_index[h_index]) + 6] *vector_field[layer_index*NO_OF_VECTORS_PER_LAYER + grid -> to_index[h_index]]; // layer below if (layer_index < NO_OF_LAYERS - 1) { *component += grid -> inner_product_weights[8*(layer_index*NO_OF_SCALARS_H + grid -> from_index[h_index]) + 7] *vector_field[(layer_index + 1)*NO_OF_VECTORS_PER_LAYER + grid -> from_index[h_index]]; *component += grid -> inner_product_weights[8*(layer_index*NO_OF_SCALARS_H + grid -> to_index[h_index]) + 7] *vector_field[(layer_index + 1)*NO_OF_VECTORS_PER_LAYER + grid -> to_index[h_index]]; } // horizontal average *component = 0.5*(*component); return 0; } int vertical_contravariant_corr(Vector_field vector_field, int layer_index, int h_index, Grid *grid, double *result) { /* calculates (the vertical contravariant component - the vertical covariant component) of a vector field out of the horizontal contravariant components */ // Attention: adjacent_signs_h appears twice, thus does not need to be taken into account. *result = 0; int scalar_index, vector_index; int no_of_edges = 6; if (h_index < NO_OF_PENTAGONS) { no_of_edges = 5; } if (layer_index >= NO_OF_LAYERS - grid -> no_of_oro_layers) { if (layer_index == NO_OF_LAYERS - grid -> no_of_oro_layers) { for (int i = 0; i < no_of_edges; ++i) { scalar_index = layer_index*NO_OF_SCALARS_H + h_index; vector_index = NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + grid -> adjacent_vector_indices_h[6*h_index + i]; *result += -0.5 *grid -> inner_product_weights[8*scalar_index + i] *grid -> slope[vector_index] *vector_field[vector_index]; } } else { for (int i = 0; i < no_of_edges; ++i) { scalar_index = (layer_index - 1)*NO_OF_SCALARS_H + h_index; vector_index = NO_OF_SCALARS_H + (layer_index - 1)*NO_OF_VECTORS_PER_LAYER + grid -> adjacent_vector_indices_h[6*h_index + i]; *result += -0.5 *grid -> inner_product_weights[8*scalar_index + i] *grid -> slope[vector_index] *vector_field[vector_index]; } for (int i = 0; i < no_of_edges; ++i) { scalar_index = layer_index*NO_OF_SCALARS_H + h_index; vector_index = NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + grid -> adjacent_vector_indices_h[6*h_index + i]; *result += -0.5 *grid -> inner_product_weights[8*scalar_index + i] *grid -> slope[vector_index] *vector_field[vector_index]; } } } return 0; } int horizontal_covariant(Vector_field vector_field, int layer_index, int h_index, Grid *grid, double *result) { /* calculates the horizontal covariant component of a vector field out of the horizontal contravariant and the vertical covariant components */ int vector_index = NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + h_index; *result = vector_field[vector_index]; if (layer_index >= NO_OF_LAYERS - grid -> no_of_oro_layers) { double vertical_component = 0; remap_verpri2horpri_vector(vector_field, layer_index, h_index, &vertical_component, grid); *result += grid -> slope[vector_index]*vertical_component; } return 0; } int vector_field_hor_cov_to_con(Vector_field cov_to_con_field, Grid *grid) { /* This function transforms the covariant horizontal measure numbers of a vector field to contravariant measure numbers. */ int layer_index, h_index, vector_index; double vertical_gradient; // loop over all horizontal vector points in the orography layers #pragma omp parallel for private(layer_index, h_index, vertical_gradient, vector_index) for (int i = 0; i < grid -> no_of_oro_layers*NO_OF_VECTORS_H; ++i) { layer_index = i/NO_OF_VECTORS_H; h_index = i - layer_index*NO_OF_VECTORS_H; remap_verpri2horpri_vector(cov_to_con_field, layer_index + (NO_OF_LAYERS - grid -> no_of_oro_layers), h_index, &vertical_gradient, grid); vector_index = NO_OF_SCALARS_H + (NO_OF_LAYERS - grid -> no_of_oro_layers + layer_index)*NO_OF_VECTORS_PER_LAYER + h_index; cov_to_con_field[vector_index] += -grid -> slope[vector_index]*vertical_gradient; } return 0; } int tangential_wind(Vector_field in_field, int layer_index, int h_index, double *component, Grid *grid) { /* This function computes the tangential component *component of the vector field in_field at edge h_index in layer layer_index using the TRSK weights. */ // initializing the result with zero *component = 0; // loop over the maximum of ten edges for (int i = 0; i < 10; ++i) { *component += grid -> trsk_weights[10*h_index + i] *in_field[NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + grid -> trsk_indices[10*h_index + i]]; } return 0; } int calc_uv_at_edge(Vector_field in_field, Vector_field out_field_u, Vector_field out_field_v, Grid *grid) { /* This function diagnozes eastward and northward components of a vector field at edges. */ int layer_index, h_index; // orthogonal and tangential component at edge, respectively double wind_0, wind_1; #pragma omp parallel for private(layer_index, h_index, wind_0, wind_1) for (int i = 0; i < NO_OF_H_VECTORS; ++i) { layer_index = i/NO_OF_VECTORS_H; h_index = i - layer_index*NO_OF_VECTORS_H; wind_0 = in_field[NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + h_index]; // finding the tangential component tangential_wind(in_field, layer_index, h_index, &wind_1, grid); // turning the Cartesian coordinate system to obtain u and v passive_turn(wind_0, wind_1, -grid -> direction[h_index], &out_field_u[NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + h_index], &out_field_v[NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + h_index]); } return 0; } int curl_field_to_cells(Curl_field in_field, Scalar_field out_field, Grid *grid) { /* This function averages a curl field from edges to cell centers. */ int layer_index, h_index, no_of_edges; #pragma omp parallel for private (layer_index, h_index, no_of_edges) for (int i = 0; i < NO_OF_SCALARS; ++i) { layer_index = i/NO_OF_SCALARS_H; h_index = i - layer_index*NO_OF_SCALARS_H; // initializing the result with zero out_field[i] = 0; // determining the number of edges of the cell at hand no_of_edges = 6; if (h_index < NO_OF_PENTAGONS) { no_of_edges = 5; } // loop over all edges of the respective cell for (int j = 0; j < no_of_edges; ++j) { out_field[i] += 0.5 *grid -> inner_product_weights[8*i + j] *in_field[NO_OF_VECTORS_H + layer_index*2*NO_OF_VECTORS_H + grid -> adjacent_vector_indices_h[6*h_index + j]]; } } return 0; } int edges_to_cells(Vector_field in_field, Scalar_field out_field, Grid *grid) { /* This function averages a vector field from edges to cell centers. */ int layer_index, h_index, no_of_edges; #pragma omp parallel for private (layer_index, h_index, no_of_edges) for (int i = 0; i < NO_OF_SCALARS; ++i) { layer_index = i/NO_OF_SCALARS_H; h_index = i - layer_index*NO_OF_SCALARS_H; // initializing the result with zero out_field[i] = 0; // determining the number of edges of the cell at hand no_of_edges = 6; if (h_index < NO_OF_PENTAGONS) { no_of_edges = 5; } // loop over all cell edges for (int j = 0; j < no_of_edges; ++j) { out_field[i] += 0.5 *grid -> inner_product_weights[8*i + j] *in_field[NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + grid -> adjacent_vector_indices_h[6*h_index + j]]; } } return 0; }
place_report_mpi_omp.c
#define _GNU_SOURCE #include <stdio.h> #include <unistd.h> #include <string.h> #include <sched.h> #include <mpi.h> #include <omp.h> /* Heavily modified from xthi.c code */ /* xthi.c code is used in examples for hybrid MPI/OpenMP affinity from a few HPC sites */ /* xthi.c originally borrowed some of this code from util-linux-2.13-pre7/schedutils/taskset.c */ static char *cpuset_to_cstr(cpu_set_t *mask, char *str) { char *ptr = str; int i, j, entry_made = 0; for (i = 0; i < CPU_SETSIZE; i++) { if (CPU_ISSET(i, mask)) { int run = 0; entry_made = 1; for (j = i + 1; j < CPU_SETSIZE; j++) { if (CPU_ISSET(j, mask)) run++; else break; } if (!run) sprintf(ptr, "%d,", i); else if (run == 1) { sprintf(ptr, "%d,%d,", i, i + 1); i++; } else { sprintf(ptr, "%d-%d,", i, i + run); i += run; } while (*ptr != 0) ptr++; } } ptr -= entry_made; *ptr = 0; return(str); } void place_report_mpi_omp(void) { int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); int socket_global[144]; char clbuf_global[144][7 * CPU_SETSIZE]; #pragma omp parallel { if (omp_get_thread_num() == 0 && rank == 0){ printf("Running with %d thread(s)\n",omp_get_num_threads()); int bind_policy = omp_get_proc_bind(); switch (bind_policy) { case omp_proc_bind_false: printf(" proc_bind is false\n"); break; case omp_proc_bind_true: printf(" proc_bind is true\n"); break; case omp_proc_bind_master: printf(" proc_bind is master\n"); break; case omp_proc_bind_close: printf(" proc_bind is close\n"); break; case omp_proc_bind_spread: printf(" proc_bind is spread\n"); } printf(" proc_num_places is %d\n",omp_get_num_places()); } int thread = omp_get_thread_num(); cpu_set_t coremask; char clbuf[7 * CPU_SETSIZE], hnbuf[64]; memset(clbuf, 0, sizeof(clbuf)); memset(hnbuf, 0, sizeof(hnbuf)); gethostname(hnbuf, sizeof(hnbuf)); sched_getaffinity(0, sizeof(coremask), &coremask); cpuset_to_cstr(&coremask, clbuf); strcpy(clbuf_global[thread],clbuf); socket_global[omp_get_thread_num()] = omp_get_place_num(); #pragma omp barrier #pragma omp master for (int i=0; i<omp_get_num_threads(); i++){ printf("Hello from rank %02d, thread %02d, on %s. (core affinity = %2s) OpenMP socket is %2d\n", rank, i, hnbuf, clbuf_global[i], socket_global[i]); } } }
omp_single_nowait.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include "omp_testsuite.h" int my_iterations; #pragma omp threadprivate(my_iterations) int test_omp_single_nowait() { int nr_iterations; int total_iterations = 0; int i; nr_iterations = 0; my_iterations = 0; #pragma omp parallel private(i) { for (i = 0; i < LOOPCOUNT; i++) { #pragma omp single nowait { #pragma omp atomic nr_iterations++; } } } #pragma omp parallel private(i) { my_iterations = 0; for (i = 0; i < LOOPCOUNT; i++) { #pragma omp single nowait { my_iterations++; } } #pragma omp critical { total_iterations += my_iterations; } } return ((nr_iterations == LOOPCOUNT) && (total_iterations == LOOPCOUNT)); } /* end of check_single_nowait*/ int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_single_nowait()) { num_failed++; } } return num_failed; }
a.32.1.c
/* { dg-do compile } */ /* { dg-require-effective-target tls } */ #include <stdlib.h> float *work; int size; float tol; void build (void); #pragma omp threadprivate(work,size,tol) void a32 (float t, int n) { tol = t; size = n; #pragma omp parallel copyin(tol,size) { build (); } } void build () { int i; work = (float *) malloc (sizeof (float) * size); for (i = 0; i < size; ++i) work[i] = tol; }
omp3.c
#include<stdio.h> int main() { int i; omp_set_num_threads(); #pragma omp parallel for for (i = 0; i <= 15; i++) { if (omp_get_thread_num() == 0) { printf("%d\n", omp_get_num_procs()); printf("%d\n", omp_get_num_threads()); } } return 0; }
feature.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF EEEEE AAA TTTTT U U RRRR EEEEE % % F E A A T U U R R E % % FFF EEE AAAAA T U U RRRR EEE % % F E A A T U U R R E % % F EEEEE A A T UUU R R EEEEE % % % % % % MagickCore Image Feature Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/animate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/compress.h" #include "MagickCore/constitute.h" #include "MagickCore/display.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/feature.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/image-private.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/morphology-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/semaphore.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/timer.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a n n y E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CannyEdgeImage() uses a multi-stage algorithm to detect a wide range of % edges in images. % % The format of the CannyEdgeImage method is: % % Image *CannyEdgeImage(const Image *image,const double radius, % const double sigma,const double lower_percent, % const double upper_percent,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the gaussian smoothing filter. % % o sigma: the sigma of the gaussian smoothing filter. % % o lower_percent: percentage of edge pixels in the lower threshold. % % o upper_percent: percentage of edge pixels in the upper threshold. % % o exception: return any errors or warnings in this structure. % */ typedef struct _CannyInfo { double magnitude, intensity; int orientation; ssize_t x, y; } CannyInfo; static inline MagickBooleanType IsAuthenticPixel(const Image *image, const ssize_t x,const ssize_t y) { if ((x < 0) || (x >= (ssize_t) image->columns)) return(MagickFalse); if ((y < 0) || (y >= (ssize_t) image->rows)) return(MagickFalse); return(MagickTrue); } static MagickBooleanType TraceEdges(Image *edge_image,CacheView *edge_view, MatrixInfo *canny_cache,const ssize_t x,const ssize_t y, const double lower_threshold,ExceptionInfo *exception) { CannyInfo edge, pixel; MagickBooleanType status; Quantum *q; ssize_t i; q=GetCacheViewAuthenticPixels(edge_view,x,y,1,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); *q=QuantumRange; status=SyncCacheViewAuthenticPixels(edge_view,exception); if (status == MagickFalse) return(MagickFalse); if (GetMatrixElement(canny_cache,0,0,&edge) == MagickFalse) return(MagickFalse); edge.x=x; edge.y=y; if (SetMatrixElement(canny_cache,0,0,&edge) == MagickFalse) return(MagickFalse); for (i=1; i != 0; ) { ssize_t v; i--; status=GetMatrixElement(canny_cache,i,0,&edge); if (status == MagickFalse) return(MagickFalse); for (v=(-1); v <= 1; v++) { ssize_t u; for (u=(-1); u <= 1; u++) { if ((u == 0) && (v == 0)) continue; if (IsAuthenticPixel(edge_image,edge.x+u,edge.y+v) == MagickFalse) continue; /* Not an edge if gradient value is below the lower threshold. */ q=GetCacheViewAuthenticPixels(edge_view,edge.x+u,edge.y+v,1,1, exception); if (q == (Quantum *) NULL) return(MagickFalse); status=GetMatrixElement(canny_cache,edge.x+u,edge.y+v,&pixel); if (status == MagickFalse) return(MagickFalse); if ((GetPixelIntensity(edge_image,q) == 0.0) && (pixel.intensity >= lower_threshold)) { *q=QuantumRange; status=SyncCacheViewAuthenticPixels(edge_view,exception); if (status == MagickFalse) return(MagickFalse); edge.x+=u; edge.y+=v; status=SetMatrixElement(canny_cache,i,0,&edge); if (status == MagickFalse) return(MagickFalse); i++; } } } } return(MagickTrue); } MagickExport Image *CannyEdgeImage(const Image *image,const double radius, const double sigma,const double lower_percent,const double upper_percent, ExceptionInfo *exception) { #define CannyEdgeImageTag "CannyEdge/Image" CacheView *edge_view; CannyInfo element; char geometry[MagickPathExtent]; double lower_threshold, max, min, upper_threshold; Image *edge_image; KernelInfo *kernel_info; MagickBooleanType status; MagickOffsetType progress; MatrixInfo *canny_cache; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Filter out noise. */ (void) FormatLocaleString(geometry,MagickPathExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); edge_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (edge_image == (Image *) NULL) return((Image *) NULL); if (TransformImageColorspace(edge_image,GRAYColorspace,exception) == MagickFalse) { edge_image=DestroyImage(edge_image); return((Image *) NULL); } (void) SetImageAlphaChannel(edge_image,OffAlphaChannel,exception); /* Find the intensity gradient of the image. */ canny_cache=AcquireMatrixInfo(edge_image->columns,edge_image->rows, sizeof(CannyInfo),exception); if (canny_cache == (MatrixInfo *) NULL) { edge_image=DestroyImage(edge_image); return((Image *) NULL); } status=MagickTrue; edge_view=AcquireVirtualCacheView(edge_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(edge_image,edge_image,edge_image->rows,1) #endif for (y=0; y < (ssize_t) edge_image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns+1,2, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo pixel; double dx, dy; const Quantum *magick_restrict kernel_pixels; ssize_t v; static double Gx[2][2] = { { -1.0, +1.0 }, { -1.0, +1.0 } }, Gy[2][2] = { { +1.0, +1.0 }, { -1.0, -1.0 } }; (void) memset(&pixel,0,sizeof(pixel)); dx=0.0; dy=0.0; kernel_pixels=p; for (v=0; v < 2; v++) { ssize_t u; for (u=0; u < 2; u++) { double intensity; intensity=GetPixelIntensity(edge_image,kernel_pixels+u); dx+=0.5*Gx[v][u]*intensity; dy+=0.5*Gy[v][u]*intensity; } kernel_pixels+=edge_image->columns+1; } pixel.magnitude=hypot(dx,dy); pixel.orientation=0; if (fabs(dx) > MagickEpsilon) { double slope; slope=dy/dx; if (slope < 0.0) { if (slope < -2.41421356237) pixel.orientation=0; else if (slope < -0.414213562373) pixel.orientation=1; else pixel.orientation=2; } else { if (slope > 2.41421356237) pixel.orientation=0; else if (slope > 0.414213562373) pixel.orientation=3; else pixel.orientation=2; } } if (SetMatrixElement(canny_cache,x,y,&pixel) == MagickFalse) continue; p+=GetPixelChannels(edge_image); } } edge_view=DestroyCacheView(edge_view); /* Non-maxima suppression, remove pixels that are not considered to be part of an edge. */ progress=0; (void) GetMatrixElement(canny_cache,0,0,&element); max=element.intensity; min=element.intensity; edge_view=AcquireAuthenticCacheView(edge_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(edge_image,edge_image,edge_image->rows,1) #endif for (y=0; y < (ssize_t) edge_image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(edge_view,0,y,edge_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo alpha_pixel, beta_pixel, pixel; (void) GetMatrixElement(canny_cache,x,y,&pixel); switch (pixel.orientation) { case 0: default: { /* 0 degrees, north and south. */ (void) GetMatrixElement(canny_cache,x,y-1,&alpha_pixel); (void) GetMatrixElement(canny_cache,x,y+1,&beta_pixel); break; } case 1: { /* 45 degrees, northwest and southeast. */ (void) GetMatrixElement(canny_cache,x-1,y-1,&alpha_pixel); (void) GetMatrixElement(canny_cache,x+1,y+1,&beta_pixel); break; } case 2: { /* 90 degrees, east and west. */ (void) GetMatrixElement(canny_cache,x-1,y,&alpha_pixel); (void) GetMatrixElement(canny_cache,x+1,y,&beta_pixel); break; } case 3: { /* 135 degrees, northeast and southwest. */ (void) GetMatrixElement(canny_cache,x+1,y-1,&beta_pixel); (void) GetMatrixElement(canny_cache,x-1,y+1,&alpha_pixel); break; } } pixel.intensity=pixel.magnitude; if ((pixel.magnitude < alpha_pixel.magnitude) || (pixel.magnitude < beta_pixel.magnitude)) pixel.intensity=0; (void) SetMatrixElement(canny_cache,x,y,&pixel); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CannyEdgeImage) #endif { if (pixel.intensity < min) min=pixel.intensity; if (pixel.intensity > max) max=pixel.intensity; } *q=0; q+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(edge_view,exception) == MagickFalse) status=MagickFalse; } edge_view=DestroyCacheView(edge_view); /* Estimate hysteresis threshold. */ lower_threshold=lower_percent*(max-min)+min; upper_threshold=upper_percent*(max-min)+min; /* Hysteresis threshold. */ edge_view=AcquireAuthenticCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo pixel; const Quantum *magick_restrict p; /* Edge if pixel gradient higher than upper threshold. */ p=GetCacheViewVirtualPixels(edge_view,x,y,1,1,exception); if (p == (const Quantum *) NULL) continue; status=GetMatrixElement(canny_cache,x,y,&pixel); if (status == MagickFalse) continue; if ((GetPixelIntensity(edge_image,p) == 0.0) && (pixel.intensity >= upper_threshold)) status=TraceEdges(edge_image,edge_view,canny_cache,x,y,lower_threshold, exception); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CannyEdgeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } edge_view=DestroyCacheView(edge_view); /* Free resources. */ canny_cache=DestroyMatrixInfo(canny_cache); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e F e a t u r e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageFeatures() returns features for each channel in the image in % each of four directions (horizontal, vertical, left and right diagonals) % for the specified distance. The features include the angular second % moment, contrast, correlation, sum of squares: variance, inverse difference % moment, sum average, sum varience, sum entropy, entropy, difference variance, % difference entropy, information measures of correlation 1, information % measures of correlation 2, and maximum correlation coefficient. You can % access the red channel contrast, for example, like this: % % channel_features=GetImageFeatures(image,1,exception); % contrast=channel_features[RedPixelChannel].contrast[0]; % % Use MagickRelinquishMemory() to free the features buffer. % % The format of the GetImageFeatures method is: % % ChannelFeatures *GetImageFeatures(const Image *image, % const size_t distance,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o distance: the distance. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } MagickExport ChannelFeatures *GetImageFeatures(const Image *image, const size_t distance,ExceptionInfo *exception) { typedef struct _ChannelStatistics { PixelInfo direction[4]; /* horizontal, vertical, left and right diagonals */ } ChannelStatistics; CacheView *image_view; ChannelFeatures *channel_features; ChannelStatistics **cooccurrence, correlation, *density_x, *density_xy, *density_y, entropy_x, entropy_xy, entropy_xy1, entropy_xy2, entropy_y, mean, **Q, *sum, sum_squares, variance; PixelPacket gray, *grays; MagickBooleanType status; ssize_t i, r; size_t length; unsigned int number_grays; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->columns < (distance+1)) || (image->rows < (distance+1))) return((ChannelFeatures *) NULL); length=MaxPixelChannels+1UL; channel_features=(ChannelFeatures *) AcquireQuantumMemory(length, sizeof(*channel_features)); if (channel_features == (ChannelFeatures *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(channel_features,0,length* sizeof(*channel_features)); /* Form grays. */ grays=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*grays)); if (grays == (PixelPacket *) NULL) { channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } for (i=0; i <= (ssize_t) MaxMap; i++) { grays[i].red=(~0U); grays[i].green=(~0U); grays[i].blue=(~0U); grays[i].alpha=(~0U); grays[i].black=(~0U); } status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (r=0; r < (ssize_t) image->rows; r++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,r,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { grays[ScaleQuantumToMap(GetPixelRed(image,p))].red= ScaleQuantumToMap(GetPixelRed(image,p)); grays[ScaleQuantumToMap(GetPixelGreen(image,p))].green= ScaleQuantumToMap(GetPixelGreen(image,p)); grays[ScaleQuantumToMap(GetPixelBlue(image,p))].blue= ScaleQuantumToMap(GetPixelBlue(image,p)); if (image->colorspace == CMYKColorspace) grays[ScaleQuantumToMap(GetPixelBlack(image,p))].black= ScaleQuantumToMap(GetPixelBlack(image,p)); if (image->alpha_trait != UndefinedPixelTrait) grays[ScaleQuantumToMap(GetPixelAlpha(image,p))].alpha= ScaleQuantumToMap(GetPixelAlpha(image,p)); p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { grays=(PixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); return(channel_features); } (void) memset(&gray,0,sizeof(gray)); for (i=0; i <= (ssize_t) MaxMap; i++) { if (grays[i].red != ~0U) grays[gray.red++].red=grays[i].red; if (grays[i].green != ~0U) grays[gray.green++].green=grays[i].green; if (grays[i].blue != ~0U) grays[gray.blue++].blue=grays[i].blue; if (image->colorspace == CMYKColorspace) if (grays[i].black != ~0U) grays[gray.black++].black=grays[i].black; if (image->alpha_trait != UndefinedPixelTrait) if (grays[i].alpha != ~0U) grays[gray.alpha++].alpha=grays[i].alpha; } /* Allocate spatial dependence matrix. */ number_grays=gray.red; if (gray.green > number_grays) number_grays=gray.green; if (gray.blue > number_grays) number_grays=gray.blue; if (image->colorspace == CMYKColorspace) if (gray.black > number_grays) number_grays=gray.black; if (image->alpha_trait != UndefinedPixelTrait) if (gray.alpha > number_grays) number_grays=gray.alpha; cooccurrence=(ChannelStatistics **) AcquireQuantumMemory(number_grays, sizeof(*cooccurrence)); density_x=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_x)); density_xy=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_xy)); density_y=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_y)); Q=(ChannelStatistics **) AcquireQuantumMemory(number_grays,sizeof(*Q)); sum=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(*sum)); if ((cooccurrence == (ChannelStatistics **) NULL) || (density_x == (ChannelStatistics *) NULL) || (density_xy == (ChannelStatistics *) NULL) || (density_y == (ChannelStatistics *) NULL) || (Q == (ChannelStatistics **) NULL) || (sum == (ChannelStatistics *) NULL)) { if (Q != (ChannelStatistics **) NULL) { for (i=0; i < (ssize_t) number_grays; i++) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); Q=(ChannelStatistics **) RelinquishMagickMemory(Q); } if (sum != (ChannelStatistics *) NULL) sum=(ChannelStatistics *) RelinquishMagickMemory(sum); if (density_y != (ChannelStatistics *) NULL) density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); if (density_xy != (ChannelStatistics *) NULL) density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); if (density_x != (ChannelStatistics *) NULL) density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); if (cooccurrence != (ChannelStatistics **) NULL) { for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory( cooccurrence); } grays=(PixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } (void) memset(&correlation,0,sizeof(correlation)); (void) memset(density_x,0,2*(number_grays+1)*sizeof(*density_x)); (void) memset(density_xy,0,2*(number_grays+1)*sizeof(*density_xy)); (void) memset(density_y,0,2*(number_grays+1)*sizeof(*density_y)); (void) memset(&mean,0,sizeof(mean)); (void) memset(sum,0,number_grays*sizeof(*sum)); (void) memset(&sum_squares,0,sizeof(sum_squares)); (void) memset(density_xy,0,2*number_grays*sizeof(*density_xy)); (void) memset(&entropy_x,0,sizeof(entropy_x)); (void) memset(&entropy_xy,0,sizeof(entropy_xy)); (void) memset(&entropy_xy1,0,sizeof(entropy_xy1)); (void) memset(&entropy_xy2,0,sizeof(entropy_xy2)); (void) memset(&entropy_y,0,sizeof(entropy_y)); (void) memset(&variance,0,sizeof(variance)); for (i=0; i < (ssize_t) number_grays; i++) { cooccurrence[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays, sizeof(**cooccurrence)); Q[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(**Q)); if ((cooccurrence[i] == (ChannelStatistics *) NULL) || (Q[i] == (ChannelStatistics *) NULL)) break; (void) memset(cooccurrence[i],0,number_grays* sizeof(**cooccurrence)); (void) memset(Q[i],0,number_grays*sizeof(**Q)); } if (i < (ssize_t) number_grays) { for (i--; i >= 0; i--) { if (Q[i] != (ChannelStatistics *) NULL) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); if (cooccurrence[i] != (ChannelStatistics *) NULL) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); } Q=(ChannelStatistics **) RelinquishMagickMemory(Q); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); sum=(ChannelStatistics *) RelinquishMagickMemory(sum); density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); grays=(PixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } /* Initialize spatial dependence matrix. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); for (r=0; r < (ssize_t) image->rows; r++) { const Quantum *magick_restrict p; ssize_t x; ssize_t offset, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-(ssize_t) distance,r,image->columns+ 2*distance,distance+2,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } p+=distance*GetPixelChannels(image);; for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < 4; i++) { switch (i) { case 0: default: { /* Horizontal adjacency. */ offset=(ssize_t) distance; break; } case 1: { /* Vertical adjacency. */ offset=(ssize_t) (image->columns+2*distance); break; } case 2: { /* Right diagonal adjacency. */ offset=(ssize_t) ((image->columns+2*distance)-distance); break; } case 3: { /* Left diagonal adjacency. */ offset=(ssize_t) ((image->columns+2*distance)+distance); break; } } u=0; v=0; while (grays[u].red != ScaleQuantumToMap(GetPixelRed(image,p))) u++; while (grays[v].red != ScaleQuantumToMap(GetPixelRed(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].red++; cooccurrence[v][u].direction[i].red++; u=0; v=0; while (grays[u].green != ScaleQuantumToMap(GetPixelGreen(image,p))) u++; while (grays[v].green != ScaleQuantumToMap(GetPixelGreen(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].green++; cooccurrence[v][u].direction[i].green++; u=0; v=0; while (grays[u].blue != ScaleQuantumToMap(GetPixelBlue(image,p))) u++; while (grays[v].blue != ScaleQuantumToMap(GetPixelBlue(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].blue++; cooccurrence[v][u].direction[i].blue++; if (image->colorspace == CMYKColorspace) { u=0; v=0; while (grays[u].black != ScaleQuantumToMap(GetPixelBlack(image,p))) u++; while (grays[v].black != ScaleQuantumToMap(GetPixelBlack(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].black++; cooccurrence[v][u].direction[i].black++; } if (image->alpha_trait != UndefinedPixelTrait) { u=0; v=0; while (grays[u].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p))) u++; while (grays[v].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].alpha++; cooccurrence[v][u].direction[i].alpha++; } } p+=GetPixelChannels(image); } } grays=(PixelPacket *) RelinquishMagickMemory(grays); image_view=DestroyCacheView(image_view); if (status == MagickFalse) { for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } /* Normalize spatial dependence matrix. */ for (i=0; i < 4; i++) { double normalize; ssize_t y; switch (i) { case 0: default: { /* Horizontal adjacency. */ normalize=2.0*image->rows*(image->columns-distance); break; } case 1: { /* Vertical adjacency. */ normalize=2.0*(image->rows-distance)*image->columns; break; } case 2: { /* Right diagonal adjacency. */ normalize=2.0*(image->rows-distance)*(image->columns-distance); break; } case 3: { /* Left diagonal adjacency. */ normalize=2.0*(image->rows-distance)*(image->columns-distance); break; } } normalize=PerceptibleReciprocal(normalize); for (y=0; y < (ssize_t) number_grays; y++) { ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { cooccurrence[x][y].direction[i].red*=normalize; cooccurrence[x][y].direction[i].green*=normalize; cooccurrence[x][y].direction[i].blue*=normalize; if (image->colorspace == CMYKColorspace) cooccurrence[x][y].direction[i].black*=normalize; if (image->alpha_trait != UndefinedPixelTrait) cooccurrence[x][y].direction[i].alpha*=normalize; } } } /* Compute texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { ssize_t y; for (y=0; y < (ssize_t) number_grays; y++) { ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Angular second moment: measure of homogeneity of the image. */ channel_features[RedPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].red* cooccurrence[x][y].direction[i].red; channel_features[GreenPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].green* cooccurrence[x][y].direction[i].green; channel_features[BluePixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].blue* cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].black* cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].alpha* cooccurrence[x][y].direction[i].alpha; /* Correlation: measure of linear-dependencies in the image. */ sum[y].direction[i].red+=cooccurrence[x][y].direction[i].red; sum[y].direction[i].green+=cooccurrence[x][y].direction[i].green; sum[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) sum[y].direction[i].black+=cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) sum[y].direction[i].alpha+=cooccurrence[x][y].direction[i].alpha; correlation.direction[i].red+=x*y*cooccurrence[x][y].direction[i].red; correlation.direction[i].green+=x*y* cooccurrence[x][y].direction[i].green; correlation.direction[i].blue+=x*y* cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) correlation.direction[i].black+=x*y* cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) correlation.direction[i].alpha+=x*y* cooccurrence[x][y].direction[i].alpha; /* Inverse Difference Moment. */ channel_features[RedPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].red/((y-x)*(y-x)+1); channel_features[GreenPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].green/((y-x)*(y-x)+1); channel_features[BluePixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].blue/((y-x)*(y-x)+1); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].black/((y-x)*(y-x)+1); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].alpha/((y-x)*(y-x)+1); /* Sum average. */ density_xy[y+x+2].direction[i].red+= cooccurrence[x][y].direction[i].red; density_xy[y+x+2].direction[i].green+= cooccurrence[x][y].direction[i].green; density_xy[y+x+2].direction[i].blue+= cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_xy[y+x+2].direction[i].black+= cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) density_xy[y+x+2].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; /* Entropy. */ channel_features[RedPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].red* MagickLog10(cooccurrence[x][y].direction[i].red); channel_features[GreenPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].green* MagickLog10(cooccurrence[x][y].direction[i].green); channel_features[BluePixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].blue* MagickLog10(cooccurrence[x][y].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].black* MagickLog10(cooccurrence[x][y].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].alpha* MagickLog10(cooccurrence[x][y].direction[i].alpha); /* Information Measures of Correlation. */ density_x[x].direction[i].red+=cooccurrence[x][y].direction[i].red; density_x[x].direction[i].green+=cooccurrence[x][y].direction[i].green; density_x[x].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->alpha_trait != UndefinedPixelTrait) density_x[x].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; if (image->colorspace == CMYKColorspace) density_x[x].direction[i].black+= cooccurrence[x][y].direction[i].black; density_y[y].direction[i].red+=cooccurrence[x][y].direction[i].red; density_y[y].direction[i].green+=cooccurrence[x][y].direction[i].green; density_y[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_y[y].direction[i].black+= cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) density_y[y].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; } mean.direction[i].red+=y*sum[y].direction[i].red; sum_squares.direction[i].red+=y*y*sum[y].direction[i].red; mean.direction[i].green+=y*sum[y].direction[i].green; sum_squares.direction[i].green+=y*y*sum[y].direction[i].green; mean.direction[i].blue+=y*sum[y].direction[i].blue; sum_squares.direction[i].blue+=y*y*sum[y].direction[i].blue; if (image->colorspace == CMYKColorspace) { mean.direction[i].black+=y*sum[y].direction[i].black; sum_squares.direction[i].black+=y*y*sum[y].direction[i].black; } if (image->alpha_trait != UndefinedPixelTrait) { mean.direction[i].alpha+=y*sum[y].direction[i].alpha; sum_squares.direction[i].alpha+=y*y*sum[y].direction[i].alpha; } } /* Correlation: measure of linear-dependencies in the image. */ channel_features[RedPixelChannel].correlation[i]= (correlation.direction[i].red-mean.direction[i].red* mean.direction[i].red)/(sqrt(sum_squares.direction[i].red- (mean.direction[i].red*mean.direction[i].red))*sqrt( sum_squares.direction[i].red-(mean.direction[i].red* mean.direction[i].red))); channel_features[GreenPixelChannel].correlation[i]= (correlation.direction[i].green-mean.direction[i].green* mean.direction[i].green)/(sqrt(sum_squares.direction[i].green- (mean.direction[i].green*mean.direction[i].green))*sqrt( sum_squares.direction[i].green-(mean.direction[i].green* mean.direction[i].green))); channel_features[BluePixelChannel].correlation[i]= (correlation.direction[i].blue-mean.direction[i].blue* mean.direction[i].blue)/(sqrt(sum_squares.direction[i].blue- (mean.direction[i].blue*mean.direction[i].blue))*sqrt( sum_squares.direction[i].blue-(mean.direction[i].blue* mean.direction[i].blue))); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].correlation[i]= (correlation.direction[i].black-mean.direction[i].black* mean.direction[i].black)/(sqrt(sum_squares.direction[i].black- (mean.direction[i].black*mean.direction[i].black))*sqrt( sum_squares.direction[i].black-(mean.direction[i].black* mean.direction[i].black))); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].correlation[i]= (correlation.direction[i].alpha-mean.direction[i].alpha* mean.direction[i].alpha)/(sqrt(sum_squares.direction[i].alpha- (mean.direction[i].alpha*mean.direction[i].alpha))*sqrt( sum_squares.direction[i].alpha-(mean.direction[i].alpha* mean.direction[i].alpha))); } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { ssize_t x; for (x=2; x < (ssize_t) (2*number_grays); x++) { /* Sum average. */ channel_features[RedPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].red; channel_features[GreenPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].green; channel_features[BluePixelChannel].sum_average[i]+= x*density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].alpha; /* Sum entropy. */ channel_features[RedPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].red* MagickLog10(density_xy[x].direction[i].red); channel_features[GreenPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].green* MagickLog10(density_xy[x].direction[i].green); channel_features[BluePixelChannel].sum_entropy[i]-= density_xy[x].direction[i].blue* MagickLog10(density_xy[x].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].black* MagickLog10(density_xy[x].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].alpha* MagickLog10(density_xy[x].direction[i].alpha); /* Sum variance. */ channel_features[RedPixelChannel].sum_variance[i]+= (x-channel_features[RedPixelChannel].sum_entropy[i])* (x-channel_features[RedPixelChannel].sum_entropy[i])* density_xy[x].direction[i].red; channel_features[GreenPixelChannel].sum_variance[i]+= (x-channel_features[GreenPixelChannel].sum_entropy[i])* (x-channel_features[GreenPixelChannel].sum_entropy[i])* density_xy[x].direction[i].green; channel_features[BluePixelChannel].sum_variance[i]+= (x-channel_features[BluePixelChannel].sum_entropy[i])* (x-channel_features[BluePixelChannel].sum_entropy[i])* density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].sum_variance[i]+= (x-channel_features[BlackPixelChannel].sum_entropy[i])* (x-channel_features[BlackPixelChannel].sum_entropy[i])* density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].sum_variance[i]+= (x-channel_features[AlphaPixelChannel].sum_entropy[i])* (x-channel_features[AlphaPixelChannel].sum_entropy[i])* density_xy[x].direction[i].alpha; } } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { ssize_t y; for (y=0; y < (ssize_t) number_grays; y++) { ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Sum of Squares: Variance */ variance.direction[i].red+=(y-mean.direction[i].red+1)* (y-mean.direction[i].red+1)*cooccurrence[x][y].direction[i].red; variance.direction[i].green+=(y-mean.direction[i].green+1)* (y-mean.direction[i].green+1)*cooccurrence[x][y].direction[i].green; variance.direction[i].blue+=(y-mean.direction[i].blue+1)* (y-mean.direction[i].blue+1)*cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) variance.direction[i].black+=(y-mean.direction[i].black+1)* (y-mean.direction[i].black+1)*cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) variance.direction[i].alpha+=(y-mean.direction[i].alpha+1)* (y-mean.direction[i].alpha+1)* cooccurrence[x][y].direction[i].alpha; /* Sum average / Difference Variance. */ density_xy[MagickAbsoluteValue(y-x)].direction[i].red+= cooccurrence[x][y].direction[i].red; density_xy[MagickAbsoluteValue(y-x)].direction[i].green+= cooccurrence[x][y].direction[i].green; density_xy[MagickAbsoluteValue(y-x)].direction[i].blue+= cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_xy[MagickAbsoluteValue(y-x)].direction[i].black+= cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) density_xy[MagickAbsoluteValue(y-x)].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; /* Information Measures of Correlation. */ entropy_xy.direction[i].red-=cooccurrence[x][y].direction[i].red* MagickLog10(cooccurrence[x][y].direction[i].red); entropy_xy.direction[i].green-=cooccurrence[x][y].direction[i].green* MagickLog10(cooccurrence[x][y].direction[i].green); entropy_xy.direction[i].blue-=cooccurrence[x][y].direction[i].blue* MagickLog10(cooccurrence[x][y].direction[i].blue); if (image->colorspace == CMYKColorspace) entropy_xy.direction[i].black-=cooccurrence[x][y].direction[i].black* MagickLog10(cooccurrence[x][y].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) entropy_xy.direction[i].alpha-= cooccurrence[x][y].direction[i].alpha*MagickLog10( cooccurrence[x][y].direction[i].alpha); entropy_xy1.direction[i].red-=(cooccurrence[x][y].direction[i].red* MagickLog10(density_x[x].direction[i].red*density_y[y].direction[i].red)); entropy_xy1.direction[i].green-=(cooccurrence[x][y].direction[i].green* MagickLog10(density_x[x].direction[i].green* density_y[y].direction[i].green)); entropy_xy1.direction[i].blue-=(cooccurrence[x][y].direction[i].blue* MagickLog10(density_x[x].direction[i].blue*density_y[y].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_xy1.direction[i].black-=( cooccurrence[x][y].direction[i].black*MagickLog10( density_x[x].direction[i].black*density_y[y].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_xy1.direction[i].alpha-=( cooccurrence[x][y].direction[i].alpha*MagickLog10( density_x[x].direction[i].alpha*density_y[y].direction[i].alpha)); entropy_xy2.direction[i].red-=(density_x[x].direction[i].red* density_y[y].direction[i].red*MagickLog10(density_x[x].direction[i].red* density_y[y].direction[i].red)); entropy_xy2.direction[i].green-=(density_x[x].direction[i].green* density_y[y].direction[i].green*MagickLog10(density_x[x].direction[i].green* density_y[y].direction[i].green)); entropy_xy2.direction[i].blue-=(density_x[x].direction[i].blue* density_y[y].direction[i].blue*MagickLog10(density_x[x].direction[i].blue* density_y[y].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_xy2.direction[i].black-=(density_x[x].direction[i].black* density_y[y].direction[i].black*MagickLog10( density_x[x].direction[i].black*density_y[y].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_xy2.direction[i].alpha-=(density_x[x].direction[i].alpha* density_y[y].direction[i].alpha*MagickLog10( density_x[x].direction[i].alpha*density_y[y].direction[i].alpha)); } } channel_features[RedPixelChannel].variance_sum_of_squares[i]= variance.direction[i].red; channel_features[GreenPixelChannel].variance_sum_of_squares[i]= variance.direction[i].green; channel_features[BluePixelChannel].variance_sum_of_squares[i]= variance.direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].variance_sum_of_squares[i]= variance.direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].variance_sum_of_squares[i]= variance.direction[i].alpha; } /* Compute more texture features. */ (void) memset(&variance,0,sizeof(variance)); (void) memset(&sum_squares,0,sizeof(sum_squares)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Difference variance. */ variance.direction[i].red+=density_xy[x].direction[i].red; variance.direction[i].green+=density_xy[x].direction[i].green; variance.direction[i].blue+=density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) variance.direction[i].black+=density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) variance.direction[i].alpha+=density_xy[x].direction[i].alpha; sum_squares.direction[i].red+=density_xy[x].direction[i].red* density_xy[x].direction[i].red; sum_squares.direction[i].green+=density_xy[x].direction[i].green* density_xy[x].direction[i].green; sum_squares.direction[i].blue+=density_xy[x].direction[i].blue* density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) sum_squares.direction[i].black+=density_xy[x].direction[i].black* density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) sum_squares.direction[i].alpha+=density_xy[x].direction[i].alpha* density_xy[x].direction[i].alpha; /* Difference entropy. */ channel_features[RedPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].red* MagickLog10(density_xy[x].direction[i].red); channel_features[GreenPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].green* MagickLog10(density_xy[x].direction[i].green); channel_features[BluePixelChannel].difference_entropy[i]-= density_xy[x].direction[i].blue* MagickLog10(density_xy[x].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].black* MagickLog10(density_xy[x].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].alpha* MagickLog10(density_xy[x].direction[i].alpha); /* Information Measures of Correlation. */ entropy_x.direction[i].red-=(density_x[x].direction[i].red* MagickLog10(density_x[x].direction[i].red)); entropy_x.direction[i].green-=(density_x[x].direction[i].green* MagickLog10(density_x[x].direction[i].green)); entropy_x.direction[i].blue-=(density_x[x].direction[i].blue* MagickLog10(density_x[x].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_x.direction[i].black-=(density_x[x].direction[i].black* MagickLog10(density_x[x].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_x.direction[i].alpha-=(density_x[x].direction[i].alpha* MagickLog10(density_x[x].direction[i].alpha)); entropy_y.direction[i].red-=(density_y[x].direction[i].red* MagickLog10(density_y[x].direction[i].red)); entropy_y.direction[i].green-=(density_y[x].direction[i].green* MagickLog10(density_y[x].direction[i].green)); entropy_y.direction[i].blue-=(density_y[x].direction[i].blue* MagickLog10(density_y[x].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_y.direction[i].black-=(density_y[x].direction[i].black* MagickLog10(density_y[x].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_y.direction[i].alpha-=(density_y[x].direction[i].alpha* MagickLog10(density_y[x].direction[i].alpha)); } /* Difference variance. */ channel_features[RedPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].red)- (variance.direction[i].red*variance.direction[i].red))/ ((double) number_grays*number_grays*number_grays*number_grays); channel_features[GreenPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].green)- (variance.direction[i].green*variance.direction[i].green))/ ((double) number_grays*number_grays*number_grays*number_grays); channel_features[BluePixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].blue)- (variance.direction[i].blue*variance.direction[i].blue))/ ((double) number_grays*number_grays*number_grays*number_grays); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].black)- (variance.direction[i].black*variance.direction[i].black))/ ((double) number_grays*number_grays*number_grays*number_grays); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].alpha)- (variance.direction[i].alpha*variance.direction[i].alpha))/ ((double) number_grays*number_grays*number_grays*number_grays); /* Information Measures of Correlation. */ channel_features[RedPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].red-entropy_xy1.direction[i].red)/ (entropy_x.direction[i].red > entropy_y.direction[i].red ? entropy_x.direction[i].red : entropy_y.direction[i].red); channel_features[GreenPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].green-entropy_xy1.direction[i].green)/ (entropy_x.direction[i].green > entropy_y.direction[i].green ? entropy_x.direction[i].green : entropy_y.direction[i].green); channel_features[BluePixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].blue-entropy_xy1.direction[i].blue)/ (entropy_x.direction[i].blue > entropy_y.direction[i].blue ? entropy_x.direction[i].blue : entropy_y.direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].black-entropy_xy1.direction[i].black)/ (entropy_x.direction[i].black > entropy_y.direction[i].black ? entropy_x.direction[i].black : entropy_y.direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].alpha-entropy_xy1.direction[i].alpha)/ (entropy_x.direction[i].alpha > entropy_y.direction[i].alpha ? entropy_x.direction[i].alpha : entropy_y.direction[i].alpha); channel_features[RedPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].red- entropy_xy.direction[i].red))))); channel_features[GreenPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].green- entropy_xy.direction[i].green))))); channel_features[BluePixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].blue- entropy_xy.direction[i].blue))))); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].black- entropy_xy.direction[i].black))))); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].alpha- entropy_xy.direction[i].alpha))))); } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { ssize_t z; for (z=0; z < (ssize_t) number_grays; z++) { ssize_t y; ChannelStatistics pixel; (void) memset(&pixel,0,sizeof(pixel)); for (y=0; y < (ssize_t) number_grays; y++) { ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Contrast: amount of local variations present in an image. */ if (((y-x) == z) || ((x-y) == z)) { pixel.direction[i].red+=cooccurrence[x][y].direction[i].red; pixel.direction[i].green+=cooccurrence[x][y].direction[i].green; pixel.direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) pixel.direction[i].black+=cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) pixel.direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; } /* Maximum Correlation Coefficient. */ if ((fabs(density_x[z].direction[i].red) > MagickEpsilon) && (fabs(density_y[x].direction[i].red) > MagickEpsilon)) Q[z][y].direction[i].red+=cooccurrence[z][x].direction[i].red* cooccurrence[y][x].direction[i].red/density_x[z].direction[i].red/ density_y[x].direction[i].red; if ((fabs(density_x[z].direction[i].green) > MagickEpsilon) && (fabs(density_y[x].direction[i].red) > MagickEpsilon)) Q[z][y].direction[i].green+=cooccurrence[z][x].direction[i].green* cooccurrence[y][x].direction[i].green/ density_x[z].direction[i].green/density_y[x].direction[i].red; if ((fabs(density_x[z].direction[i].blue) > MagickEpsilon) && (fabs(density_y[x].direction[i].blue) > MagickEpsilon)) Q[z][y].direction[i].blue+=cooccurrence[z][x].direction[i].blue* cooccurrence[y][x].direction[i].blue/ density_x[z].direction[i].blue/density_y[x].direction[i].blue; if (image->colorspace == CMYKColorspace) if ((fabs(density_x[z].direction[i].black) > MagickEpsilon) && (fabs(density_y[x].direction[i].black) > MagickEpsilon)) Q[z][y].direction[i].black+=cooccurrence[z][x].direction[i].black* cooccurrence[y][x].direction[i].black/ density_x[z].direction[i].black/density_y[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) if ((fabs(density_x[z].direction[i].alpha) > MagickEpsilon) && (fabs(density_y[x].direction[i].alpha) > MagickEpsilon)) Q[z][y].direction[i].alpha+= cooccurrence[z][x].direction[i].alpha* cooccurrence[y][x].direction[i].alpha/ density_x[z].direction[i].alpha/ density_y[x].direction[i].alpha; } } channel_features[RedPixelChannel].contrast[i]+=z*z* pixel.direction[i].red; channel_features[GreenPixelChannel].contrast[i]+=z*z* pixel.direction[i].green; channel_features[BluePixelChannel].contrast[i]+=z*z* pixel.direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].contrast[i]+=z*z* pixel.direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].contrast[i]+=z*z* pixel.direction[i].alpha; } /* Maximum Correlation Coefficient. Future: return second largest eigenvalue of Q. */ channel_features[RedPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); channel_features[GreenPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); channel_features[BluePixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); } /* Relinquish resources. */ sum=(ChannelStatistics *) RelinquishMagickMemory(sum); for (i=0; i < (ssize_t) number_grays; i++) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); Q=(ChannelStatistics **) RelinquishMagickMemory(Q); density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); return(channel_features); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H o u g h L i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Use HoughLineImage() in conjunction with any binary edge extracted image (we % recommand Canny) to identify lines in the image. The algorithm accumulates % counts for every white pixel for every possible orientation (for angles from % 0 to 179 in 1 degree increments) and distance from the center of the image to % the corner (in 1 px increments) and stores the counts in an accumulator % matrix of angle vs distance. The size of the accumulator is 180x(diagonal/2). % Next it searches this space for peaks in counts and converts the locations % of the peaks to slope and intercept in the normal x,y input image space. Use % the slope/intercepts to find the endpoints clipped to the bounds of the % image. The lines are then drawn. The counts are a measure of the length of % the lines. % % The format of the HoughLineImage method is: % % Image *HoughLineImage(const Image *image,const size_t width, % const size_t height,const size_t threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width, height: find line pairs as local maxima in this neighborhood. % % o threshold: the line count threshold. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } static Image *RenderHoughLines(const ImageInfo *image_info,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define BoundingBox "viewbox" DrawInfo *draw_info; Image *image; MagickBooleanType status; /* Open image. */ image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } image->columns=columns; image->rows=rows; draw_info=CloneDrawInfo(image_info,(DrawInfo *) NULL); draw_info->affine.sx=image->resolution.x == 0.0 ? 1.0 : image->resolution.x/ DefaultResolution; draw_info->affine.sy=image->resolution.y == 0.0 ? 1.0 : image->resolution.y/ DefaultResolution; image->columns=(size_t) (draw_info->affine.sx*image->columns); image->rows=(size_t) (draw_info->affine.sy*image->rows); status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); if (SetImageBackgroundColor(image,exception) == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Render drawing. */ if (GetBlobStreamData(image) == (unsigned char *) NULL) draw_info->primitive=FileToString(image->filename,~0UL,exception); else { draw_info->primitive=(char *) AcquireQuantumMemory(1,(size_t) GetBlobSize(image)+1); if (draw_info->primitive != (char *) NULL) { (void) memcpy(draw_info->primitive,GetBlobStreamData(image), (size_t) GetBlobSize(image)); draw_info->primitive[GetBlobSize(image)]='\0'; } } (void) DrawImage(image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } MagickExport Image *HoughLineImage(const Image *image,const size_t width, const size_t height,const size_t threshold,ExceptionInfo *exception) { #define HoughLineImageTag "HoughLine/Image" CacheView *image_view; char message[MagickPathExtent], path[MagickPathExtent]; const char *artifact; double hough_height; Image *lines_image = NULL; ImageInfo *image_info; int file; MagickBooleanType status; MagickOffsetType progress; MatrixInfo *accumulator; PointInfo center; ssize_t y; size_t accumulator_height, accumulator_width, line_count; /* Create the accumulator. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); accumulator_width=180; hough_height=((sqrt(2.0)*(double) (image->rows > image->columns ? image->rows : image->columns))/2.0); accumulator_height=(size_t) (2.0*hough_height); accumulator=AcquireMatrixInfo(accumulator_width,accumulator_height, sizeof(double),exception); if (accumulator == (MatrixInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (NullMatrix(accumulator) == MagickFalse) { accumulator=DestroyMatrixInfo(accumulator); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Populate the accumulator. */ status=MagickTrue; progress=0; center.x=(double) image->columns/2.0; center.y=(double) image->rows/2.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelIntensity(image,p) > (QuantumRange/2.0)) { ssize_t i; for (i=0; i < 180; i++) { double count, radius; radius=(((double) x-center.x)*cos(DegreesToRadians((double) i)))+ (((double) y-center.y)*sin(DegreesToRadians((double) i))); (void) GetMatrixElement(accumulator,i,(ssize_t) MagickRound(radius+hough_height),&count); count++; (void) SetMatrixElement(accumulator,i,(ssize_t) MagickRound(radius+hough_height),&count); } } p+=GetPixelChannels(image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CannyEdgeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { accumulator=DestroyMatrixInfo(accumulator); return((Image *) NULL); } /* Generate line segments from accumulator. */ file=AcquireUniqueFileResource(path); if (file == -1) { accumulator=DestroyMatrixInfo(accumulator); return((Image *) NULL); } (void) FormatLocaleString(message,MagickPathExtent, "# Hough line transform: %.20gx%.20g%+.20g\n",(double) width, (double) height,(double) threshold); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; (void) FormatLocaleString(message,MagickPathExtent, "viewbox 0 0 %.20g %.20g\n",(double) image->columns,(double) image->rows); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; (void) FormatLocaleString(message,MagickPathExtent, "# x1,y1 x2,y2 # count angle distance\n"); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; line_count=image->columns > image->rows ? image->columns/4 : image->rows/4; if (threshold != 0) line_count=threshold; for (y=0; y < (ssize_t) accumulator_height; y++) { ssize_t x; for (x=0; x < (ssize_t) accumulator_width; x++) { double count; (void) GetMatrixElement(accumulator,x,y,&count); if (count >= (double) line_count) { double maxima; SegmentInfo line; ssize_t v; /* Is point a local maxima? */ maxima=count; for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++) { ssize_t u; for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++) { if ((u != 0) || (v !=0)) { (void) GetMatrixElement(accumulator,x+u,y+v,&count); if (count > maxima) { maxima=count; break; } } } if (u < (ssize_t) (width/2)) break; } (void) GetMatrixElement(accumulator,x,y,&count); if (maxima > count) continue; if ((x >= 45) && (x <= 135)) { /* y = (r-x cos(t))/sin(t) */ line.x1=0.0; line.y1=((double) (y-(accumulator_height/2.0))-((line.x1- (image->columns/2.0))*cos(DegreesToRadians((double) x))))/ sin(DegreesToRadians((double) x))+(image->rows/2.0); line.x2=(double) image->columns; line.y2=((double) (y-(accumulator_height/2.0))-((line.x2- (image->columns/2.0))*cos(DegreesToRadians((double) x))))/ sin(DegreesToRadians((double) x))+(image->rows/2.0); } else { /* x = (r-y cos(t))/sin(t) */ line.y1=0.0; line.x1=((double) (y-(accumulator_height/2.0))-((line.y1- (image->rows/2.0))*sin(DegreesToRadians((double) x))))/ cos(DegreesToRadians((double) x))+(image->columns/2.0); line.y2=(double) image->rows; line.x2=((double) (y-(accumulator_height/2.0))-((line.y2- (image->rows/2.0))*sin(DegreesToRadians((double) x))))/ cos(DegreesToRadians((double) x))+(image->columns/2.0); } (void) FormatLocaleString(message,MagickPathExtent, "line %g,%g %g,%g # %g %g %g\n",line.x1,line.y1,line.x2,line.y2, maxima,(double) x,(double) y); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; } } } (void) close(file); /* Render lines to image canvas. */ image_info=AcquireImageInfo(); image_info->background_color=image->background_color; (void) FormatLocaleString(image_info->filename,MagickPathExtent,"%s",path); artifact=GetImageArtifact(image,"background"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"background",artifact); artifact=GetImageArtifact(image,"fill"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"fill",artifact); artifact=GetImageArtifact(image,"stroke"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"stroke",artifact); artifact=GetImageArtifact(image,"strokewidth"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"strokewidth",artifact); lines_image=RenderHoughLines(image_info,image->columns,image->rows,exception); artifact=GetImageArtifact(image,"hough-lines:accumulator"); if ((lines_image != (Image *) NULL) && (IsStringTrue(artifact) != MagickFalse)) { Image *accumulator_image; accumulator_image=MatrixToImage(accumulator,exception); if (accumulator_image != (Image *) NULL) AppendImageToList(&lines_image,accumulator_image); } /* Free resources. */ accumulator=DestroyMatrixInfo(accumulator); image_info=DestroyImageInfo(image_info); (void) RelinquishUniqueFileResource(path); return(GetFirstImageInList(lines_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M e a n S h i f t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MeanShiftImage() delineate arbitrarily shaped clusters in the image. For % each pixel, it visits all the pixels in the neighborhood specified by % the window centered at the pixel and excludes those that are outside the % radius=(window-1)/2 surrounding the pixel. From those pixels, it finds those % that are within the specified color distance from the current mean, and % computes a new x,y centroid from those coordinates and a new mean. This new % x,y centroid is used as the center for a new window. This process iterates % until it converges and the final mean is replaces the (original window % center) pixel value. It repeats this process for the next pixel, etc., % until it processes all pixels in the image. Results are typically better with % colorspaces other than sRGB. We recommend YIQ, YUV or YCbCr. % % The format of the MeanShiftImage method is: % % Image *MeanShiftImage(const Image *image,const size_t width, % const size_t height,const double color_distance, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width, height: find pixels in this neighborhood. % % o color_distance: the color distance. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MeanShiftImage(const Image *image,const size_t width, const size_t height,const double color_distance,ExceptionInfo *exception) { #define MaxMeanShiftIterations 100 #define MeanShiftImageTag "MeanShift/Image" CacheView *image_view, *mean_view, *pixel_view; Image *mean_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); mean_image=CloneImage(image,0,0,MagickTrue,exception); if (mean_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(mean_image,DirectClass,exception) == MagickFalse) { mean_image=DestroyImage(mean_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); pixel_view=AcquireVirtualCacheView(image,exception); mean_view=AcquireAuthenticCacheView(mean_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status,progress) \ magick_number_threads(mean_image,mean_image,mean_image->rows,1) #endif for (y=0; y < (ssize_t) mean_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(mean_view,0,y,mean_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) mean_image->columns; x++) { PixelInfo mean_pixel, previous_pixel; PointInfo mean_location, previous_location; ssize_t i; GetPixelInfo(image,&mean_pixel); GetPixelInfoPixel(image,p,&mean_pixel); mean_location.x=(double) x; mean_location.y=(double) y; for (i=0; i < MaxMeanShiftIterations; i++) { double distance, gamma; PixelInfo sum_pixel; PointInfo sum_location; ssize_t count, v; sum_location.x=0.0; sum_location.y=0.0; GetPixelInfo(image,&sum_pixel); previous_location=mean_location; previous_pixel=mean_pixel; count=0; for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++) { ssize_t u; for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++) { if ((v*v+u*u) <= (ssize_t) ((width/2)*(height/2))) { PixelInfo pixel; status=GetOneCacheViewVirtualPixelInfo(pixel_view,(ssize_t) MagickRound(mean_location.x+u),(ssize_t) MagickRound( mean_location.y+v),&pixel,exception); distance=(mean_pixel.red-pixel.red)*(mean_pixel.red-pixel.red)+ (mean_pixel.green-pixel.green)*(mean_pixel.green-pixel.green)+ (mean_pixel.blue-pixel.blue)*(mean_pixel.blue-pixel.blue); if (distance <= (color_distance*color_distance)) { sum_location.x+=mean_location.x+u; sum_location.y+=mean_location.y+v; sum_pixel.red+=pixel.red; sum_pixel.green+=pixel.green; sum_pixel.blue+=pixel.blue; sum_pixel.alpha+=pixel.alpha; count++; } } } } gamma=PerceptibleReciprocal(count); mean_location.x=gamma*sum_location.x; mean_location.y=gamma*sum_location.y; mean_pixel.red=gamma*sum_pixel.red; mean_pixel.green=gamma*sum_pixel.green; mean_pixel.blue=gamma*sum_pixel.blue; mean_pixel.alpha=gamma*sum_pixel.alpha; distance=(mean_location.x-previous_location.x)* (mean_location.x-previous_location.x)+ (mean_location.y-previous_location.y)* (mean_location.y-previous_location.y)+ 255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)* 255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)+ 255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)* 255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)+ 255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue)* 255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue); if (distance <= 3.0) break; } SetPixelRed(mean_image,ClampToQuantum(mean_pixel.red),q); SetPixelGreen(mean_image,ClampToQuantum(mean_pixel.green),q); SetPixelBlue(mean_image,ClampToQuantum(mean_pixel.blue),q); SetPixelAlpha(mean_image,ClampToQuantum(mean_pixel.alpha),q); p+=GetPixelChannels(image); q+=GetPixelChannels(mean_image); } if (SyncCacheViewAuthenticPixels(mean_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MeanShiftImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } mean_view=DestroyCacheView(mean_view); pixel_view=DestroyCacheView(pixel_view); image_view=DestroyCacheView(image_view); return(mean_image); }
GB_binop__plus_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__plus_uint64 // A.*B function (eWiseMult): GB_AemultB__plus_uint64 // A*D function (colscale): GB_AxD__plus_uint64 // D*A function (rowscale): GB_DxB__plus_uint64 // C+=B function (dense accum): GB_Cdense_accumB__plus_uint64 // C+=b function (dense accum): GB_Cdense_accumb__plus_uint64 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__plus_uint64 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__plus_uint64 // C=scalar+B GB_bind1st__plus_uint64 // C=scalar+B' GB_bind1st_tran__plus_uint64 // C=A+scalar GB_bind2nd__plus_uint64 // C=A'+scalar GB_bind2nd_tran__plus_uint64 // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x + y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_UINT64 || GxB_NO_PLUS_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__plus_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__plus_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__plus_uint64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__plus_uint64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__plus_uint64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__plus_uint64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__plus_uint64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__plus_uint64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__plus_uint64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = Bx [p] ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__plus_uint64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = Ax [p] ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB_bind1st_tran__plus_uint64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB_bind2nd_tran__plus_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ast-dump-openmp-section.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test() { #pragma omp sections { #pragma omp section ; } } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-section.c:3:1, line:9:1> line:3:6 test 'void ()' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:9:1> // CHECK-NEXT: `-OMPSectionsDirective {{.*}} <line:4:1, col:21> // CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3, line:8:3> // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: |-CompoundStmt {{.*}} <line:5:3, line:8:3> openmp_structured_block // CHECK-NEXT: | `-OMPSectionDirective {{.*}} <line:6:1, col:20> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:7:5> // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-NullStmt {{.*}} <col:5> openmp_structured_block // CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <line:6:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-section.c:6:1) *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-section.c:4:1) *const restrict' // CHECK-NEXT: |-RecordDecl {{.*}} <line:6:1> col:1 implicit struct definition // CHECK-NEXT: | `-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: |-NullStmt {{.*}} <line:7:5> openmp_structured_block // CHECK-NEXT: `-ImplicitParamDecl {{.*}} <line:6:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-section.c:6:1) *const restrict'
Stmt.h
//===- Stmt.h - Classes for representing statements -------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/DependenceFlags.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/BitmaskEnum.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iterator> #include <string> namespace llvm { class FoldingSetNodeID; } // namespace llvm namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class AddrLabelExpr; class LabelDecl; class ODRHash; class PrinterHelper; struct PrintingPolicy; class RecordDecl; class SourceManager; class StringLiteral; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class alignas(void *) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: friend class ASTStmtReader; friend class ASTStmtWriter; void *operator new(size_t bytes) noexcept { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) noexcept { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } //===--- Statement bitfields classes ---===// class StmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class Stmt; /// The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class NullStmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class NullStmt; unsigned : NumStmtBits; /// True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode unsigned HasLeadingEmptyMacro : 1; /// The location of the semi-colon. SourceLocation SemiLoc; }; class CompoundStmtBitfields { friend class ASTStmtReader; friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; /// The location of the opening "{". SourceLocation LBraceLoc; }; class LabelStmtBitfields { friend class LabelStmt; unsigned : NumStmtBits; SourceLocation IdentLoc; }; class AttributedStmtBitfields { friend class ASTStmtReader; friend class AttributedStmt; unsigned : NumStmtBits; /// Number of attributes. unsigned NumAttrs : 32 - NumStmtBits; /// The location of the attribute. SourceLocation AttrLoc; }; class IfStmtBitfields { friend class ASTStmtReader; friend class IfStmt; unsigned : NumStmtBits; /// True if this if statement is a constexpr if. unsigned IsConstexpr : 1; /// True if this if statement has storage for an else statement. unsigned HasElse : 1; /// True if this if statement has storage for a variable declaration. unsigned HasVar : 1; /// True if this if statement has storage for an init statement. unsigned HasInit : 1; /// The location of the "if". SourceLocation IfLoc; }; class SwitchStmtBitfields { friend class SwitchStmt; unsigned : NumStmtBits; /// True if the SwitchStmt has storage for an init statement. unsigned HasInit : 1; /// True if the SwitchStmt has storage for a condition variable. unsigned HasVar : 1; /// If the SwitchStmt is a switch on an enum value, records whether all /// the enum values were covered by CaseStmts. The coverage information /// value is meant to be a hint for possible clients. unsigned AllEnumCasesCovered : 1; /// The location of the "switch". SourceLocation SwitchLoc; }; class WhileStmtBitfields { friend class ASTStmtReader; friend class WhileStmt; unsigned : NumStmtBits; /// True if the WhileStmt has storage for a condition variable. unsigned HasVar : 1; /// The location of the "while". SourceLocation WhileLoc; }; class DoStmtBitfields { friend class DoStmt; unsigned : NumStmtBits; /// The location of the "do". SourceLocation DoLoc; }; class ForStmtBitfields { friend class ForStmt; unsigned : NumStmtBits; /// The location of the "for". SourceLocation ForLoc; }; class GotoStmtBitfields { friend class GotoStmt; friend class IndirectGotoStmt; unsigned : NumStmtBits; /// The location of the "goto". SourceLocation GotoLoc; }; class ContinueStmtBitfields { friend class ContinueStmt; unsigned : NumStmtBits; /// The location of the "continue". SourceLocation ContinueLoc; }; class BreakStmtBitfields { friend class BreakStmt; unsigned : NumStmtBits; /// The location of the "break". SourceLocation BreakLoc; }; class ReturnStmtBitfields { friend class ReturnStmt; unsigned : NumStmtBits; /// True if this ReturnStmt has storage for an NRVO candidate. unsigned HasNRVOCandidate : 1; /// The location of the "return". SourceLocation RetLoc; }; class SwitchCaseBitfields { friend class SwitchCase; friend class CaseStmt; unsigned : NumStmtBits; /// Used by CaseStmt to store whether it is a case statement /// of the form case LHS ... RHS (a GNU extension). unsigned CaseStmtIsGNURange : 1; /// The location of the "case" or "default" keyword. SourceLocation KeywordLoc; }; //===--- Expression bitfields classes ---===// class ExprBitfields { friend class ASTStmtReader; // deserialization friend class AtomicExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class CallExpr; // ctor friend class CXXConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class CXXNewExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class DeclRefExpr; // computeDependence friend class DependentScopeDeclRefExpr; // ctor friend class DesignatedInitExpr; // ctor friend class Expr; friend class InitListExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ObjCMessageExpr; // ctor friend class OffsetOfExpr; // ctor friend class OpaqueValueExpr; // ctor friend class OverloadExpr; // ctor friend class ParenListExpr; // ctor friend class PseudoObjectExpr; // ctor friend class ShuffleVectorExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 3; unsigned /*ExprDependence*/ Dependent : llvm::BitWidth<ExprDependence>; }; enum { NumExprBits = NumStmtBits + 5 + llvm::BitWidth<ExprDependence> }; class ConstantExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class ConstantExpr; unsigned : NumExprBits; /// The kind of result that is tail-allocated. unsigned ResultKind : 2; /// The kind of Result as defined by APValue::Kind. unsigned APValueKind : 4; /// When ResultKind == RSK_Int64, true if the tail-allocated integer is /// unsigned. unsigned IsUnsigned : 1; /// When ResultKind == RSK_Int64. the BitWidth of the tail-allocated /// integer. 7 bits because it is the minimal number of bits to represent a /// value from 0 to 64 (the size of the tail-allocated integer). unsigned BitWidth : 7; /// When ResultKind == RSK_APValue, true if the ASTContext will cleanup the /// tail-allocated APValue. unsigned HasCleanup : 1; /// True if this ConstantExpr was created for immediate invocation. unsigned IsImmediateInvocation : 1; }; class PredefinedExprBitfields { friend class ASTStmtReader; friend class PredefinedExpr; unsigned : NumExprBits; /// The kind of this PredefinedExpr. One of the enumeration values /// in PredefinedExpr::IdentKind. unsigned Kind : 4; /// True if this PredefinedExpr has a trailing "StringLiteral *" /// for the predefined identifier. unsigned HasFunctionName : 1; /// The location of this PredefinedExpr. SourceLocation Loc; }; class DeclRefExprBitfields { friend class ASTStmtReader; // deserialization friend class DeclRefExpr; unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; unsigned NonOdrUseReason : 2; /// The location of the declaration name itself. SourceLocation Loc; }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class StringLiteralBitfields { friend class ASTStmtReader; friend class StringLiteral; unsigned : NumExprBits; /// The kind of this string literal. /// One of the enumeration values of StringLiteral::StringKind. unsigned Kind : 3; /// The width of a single character in bytes. Only values of 1, 2, /// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps /// the target + string kind to the appropriate CharByteWidth. unsigned CharByteWidth : 3; unsigned IsPascal : 1; /// The number of concatenated token this string is made of. /// This is the number of trailing SourceLocation. unsigned NumConcatenated; }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 3; }; class UnaryOperatorBitfields { friend class UnaryOperator; unsigned : NumExprBits; unsigned Opc : 5; unsigned CanOverflow : 1; // /// This is only meaningful for operations on floating point /// types when additional values need to be in trailing storage. /// It is 0 otherwise. unsigned HasFPFeatures : 1; SourceLocation Loc; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 3; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class ArrayOrMatrixSubscriptExprBitfields { friend class ArraySubscriptExpr; friend class MatrixSubscriptExpr; unsigned : NumExprBits; SourceLocation RBracketLoc; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; /// True if the callee of the call expression was found using ADL. unsigned UsesADL : 1; /// True if the call expression has some floating-point features. unsigned HasFPFeatures : 1; /// Padding used to align OffsetToTrailingObjects to a byte multiple. unsigned : 24 - 3 - NumExprBits; /// The offset in bytes from the this pointer to the start of the /// trailing objects belonging to CallExpr. Intentionally byte sized /// for faster access. unsigned OffsetToTrailingObjects : 8; }; enum { NumCallExprBits = 32 }; class MemberExprBitfields { friend class ASTStmtReader; friend class MemberExpr; unsigned : NumExprBits; /// IsArrow - True if this is "X->F", false if this is "X.F". unsigned IsArrow : 1; /// True if this member expression used a nested-name-specifier to /// refer to the member, e.g., "x->Base::f", or found its member via /// a using declaration. When true, a MemberExprNameQualifier /// structure is allocated immediately after the MemberExpr. unsigned HasQualifierOrFoundDecl : 1; /// True if this member expression specified a template keyword /// and/or a template argument list explicitly, e.g., x->f<int>, /// x->template f, x->template f<int>. /// When true, an ASTTemplateKWAndArgsInfo structure and its /// TemplateArguments (if any) are present. unsigned HasTemplateKWAndArgsInfo : 1; /// True if this member expression refers to a method that /// was resolved from an overloaded set having size greater than 1. unsigned HadMultipleCandidates : 1; /// Value of type NonOdrUseReason indicating why this MemberExpr does /// not constitute an odr-use of the named declaration. Meaningful only /// when naming a static member. unsigned NonOdrUseReason : 2; /// This is the location of the -> or . in the expression. SourceLocation OperatorLoc; }; class CastExprBitfields { friend class CastExpr; friend class ImplicitCastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr. /// True if the call expression has some floating-point features. unsigned HasFPFeatures : 1; /// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough /// here. ([implimits] Direct and indirect base classes [16384]). unsigned BasePathSize; }; class BinaryOperatorBitfields { friend class BinaryOperator; unsigned : NumExprBits; unsigned Opc : 6; /// This is only meaningful for operations on floating point /// types when additional values need to be in trailing storage. /// It is 0 otherwise. unsigned HasFPFeatures : 1; SourceLocation OpLoc; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class ParenListExprBitfields { friend class ASTStmtReader; friend class ParenListExpr; unsigned : NumExprBits; /// The number of expressions in the paren list. unsigned NumExprs; }; class GenericSelectionExprBitfields { friend class ASTStmtReader; friend class GenericSelectionExpr; unsigned : NumExprBits; /// The location of the "_Generic". SourceLocation GenericLoc; }; class PseudoObjectExprBitfields { friend class ASTStmtReader; // deserialization friend class PseudoObjectExpr; unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class SourceLocExprBitfields { friend class ASTStmtReader; friend class SourceLocExpr; unsigned : NumExprBits; /// The kind of source location builtin represented by the SourceLocExpr. /// Ex. __builtin_LINE, __builtin_FUNCTION, ect. unsigned Kind : 2; }; class StmtExprBitfields { friend class ASTStmtReader; friend class StmtExpr; unsigned : NumExprBits; /// The number of levels of template parameters enclosing this statement /// expression. Used to determine if a statement expression remains /// dependent after instantiation. unsigned TemplateDepth; }; //===--- C++ Expression bitfields classes ---===// class CXXOperatorCallExprBitfields { friend class ASTStmtReader; friend class CXXOperatorCallExpr; unsigned : NumCallExprBits; /// The kind of this overloaded operator. One of the enumerator /// value of OverloadedOperatorKind. unsigned OperatorKind : 6; }; class CXXRewrittenBinaryOperatorBitfields { friend class ASTStmtReader; friend class CXXRewrittenBinaryOperator; unsigned : NumCallExprBits; unsigned IsReversed : 1; }; class CXXBoolLiteralExprBitfields { friend class CXXBoolLiteralExpr; unsigned : NumExprBits; /// The value of the boolean literal. unsigned Value : 1; /// The location of the boolean literal. SourceLocation Loc; }; class CXXNullPtrLiteralExprBitfields { friend class CXXNullPtrLiteralExpr; unsigned : NumExprBits; /// The location of the null pointer literal. SourceLocation Loc; }; class CXXThisExprBitfields { friend class CXXThisExpr; unsigned : NumExprBits; /// Whether this is an implicit "this". unsigned IsImplicit : 1; /// The location of the "this". SourceLocation Loc; }; class CXXThrowExprBitfields { friend class ASTStmtReader; friend class CXXThrowExpr; unsigned : NumExprBits; /// Whether the thrown variable (if any) is in scope. unsigned IsThrownVariableInScope : 1; /// The location of the "throw". SourceLocation ThrowLoc; }; class CXXDefaultArgExprBitfields { friend class ASTStmtReader; friend class CXXDefaultArgExpr; unsigned : NumExprBits; /// The location where the default argument expression was used. SourceLocation Loc; }; class CXXDefaultInitExprBitfields { friend class ASTStmtReader; friend class CXXDefaultInitExpr; unsigned : NumExprBits; /// The location where the default initializer expression was used. SourceLocation Loc; }; class CXXScalarValueInitExprBitfields { friend class ASTStmtReader; friend class CXXScalarValueInitExpr; unsigned : NumExprBits; SourceLocation RParenLoc; }; class CXXNewExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class CXXNewExpr; unsigned : NumExprBits; /// Was the usage ::new, i.e. is the global new to be used? unsigned IsGlobalNew : 1; /// Do we allocate an array? If so, the first trailing "Stmt *" is the /// size expression. unsigned IsArray : 1; /// Should the alignment be passed to the allocation function? unsigned ShouldPassAlignment : 1; /// If this is an array allocation, does the usual deallocation /// function for the allocated type want to know the allocated size? unsigned UsualArrayDeleteWantsSize : 1; /// What kind of initializer do we have? Could be none, parens, or braces. /// In storage, we distinguish between "none, and no initializer expr", and /// "none, but an implicit initializer expr". unsigned StoredInitializationStyle : 2; /// True if the allocated type was expressed as a parenthesized type-id. unsigned IsParenTypeId : 1; /// The number of placement new arguments. unsigned NumPlacementArgs; }; class CXXDeleteExprBitfields { friend class ASTStmtReader; friend class CXXDeleteExpr; unsigned : NumExprBits; /// Is this a forced global delete, i.e. "::delete"? unsigned GlobalDelete : 1; /// Is this the array form of delete, i.e. "delete[]"? unsigned ArrayForm : 1; /// ArrayFormAsWritten can be different from ArrayForm if 'delete' is /// applied to pointer-to-array type (ArrayFormAsWritten will be false /// while ArrayForm will be true). unsigned ArrayFormAsWritten : 1; /// Does the usual deallocation function for the element type require /// a size_t argument? unsigned UsualArrayDeleteWantsSize : 1; /// Location of the expression. SourceLocation Loc; }; class TypeTraitExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class TypeTraitExpr; unsigned : NumExprBits; /// The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// The number of arguments to this type trait. According to [implimits] /// 8 bits would be enough, but we require (and test for) at least 16 bits /// to mirror FunctionType. unsigned NumArgs; }; class DependentScopeDeclRefExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class DependentScopeDeclRefExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; }; class CXXConstructExprBitfields { friend class ASTStmtReader; friend class CXXConstructExpr; unsigned : NumExprBits; unsigned Elidable : 1; unsigned HadMultipleCandidates : 1; unsigned ListInitialization : 1; unsigned StdInitListInitialization : 1; unsigned ZeroInitialization : 1; unsigned ConstructionKind : 3; SourceLocation Loc; }; class ExprWithCleanupsBitfields { friend class ASTStmtReader; // deserialization friend class ExprWithCleanups; unsigned : NumExprBits; // When false, it must not have side effects. unsigned CleanupsHaveSideEffects : 1; unsigned NumObjects : 32 - 1 - NumExprBits; }; class CXXUnresolvedConstructExprBitfields { friend class ASTStmtReader; friend class CXXUnresolvedConstructExpr; unsigned : NumExprBits; /// The number of arguments used to construct the type. unsigned NumArgs; }; class CXXDependentScopeMemberExprBitfields { friend class ASTStmtReader; friend class CXXDependentScopeMemberExpr; unsigned : NumExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether this member expression has info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// See getFirstQualifierFoundInScope() and the comment listing /// the trailing objects. unsigned HasFirstQualifierFoundInScope : 1; /// The location of the '->' or '.' operator. SourceLocation OperatorLoc; }; class OverloadExprBitfields { friend class ASTStmtReader; friend class OverloadExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// Padding used by the derived classes to store various bits. If you /// need to add some data here, shrink this padding and add your data /// above. NumOverloadExprBits also needs to be updated. unsigned : 32 - NumExprBits - 1; /// The number of results. unsigned NumResults; }; enum { NumOverloadExprBits = NumExprBits + 1 }; class UnresolvedLookupExprBitfields { friend class ASTStmtReader; friend class UnresolvedLookupExpr; unsigned : NumOverloadExprBits; /// True if these lookup results should be extended by /// argument-dependent lookup if this is the operand of a function call. unsigned RequiresADL : 1; /// True if these lookup results are overloaded. This is pretty trivially /// rederivable if we urgently need to kill this field. unsigned Overloaded : 1; }; static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4, "UnresolvedLookupExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class UnresolvedMemberExprBitfields { friend class ASTStmtReader; friend class UnresolvedMemberExpr; unsigned : NumOverloadExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether the lookup results contain an unresolved using declaration. unsigned HasUnresolvedUsing : 1; }; static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4, "UnresolvedMemberExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class CXXNoexceptExprBitfields { friend class ASTStmtReader; friend class CXXNoexceptExpr; unsigned : NumExprBits; unsigned Value : 1; }; class SubstNonTypeTemplateParmExprBitfields { friend class ASTStmtReader; friend class SubstNonTypeTemplateParmExpr; unsigned : NumExprBits; /// The location of the non-type template parameter reference. SourceLocation NameLoc; }; class LambdaExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class LambdaExpr; unsigned : NumExprBits; /// The default capture kind, which is a value of type /// LambdaCaptureDefault. unsigned CaptureDefault : 2; /// Whether this lambda had an explicit parameter list vs. an /// implicit (and empty) parameter list. unsigned ExplicitParams : 1; /// Whether this lambda had the result type explicitly specified. unsigned ExplicitResultType : 1; /// The number of captures. unsigned NumCaptures : 16; }; class RequiresExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class RequiresExpr; unsigned : NumExprBits; unsigned IsSatisfied : 1; SourceLocation RequiresKWLoc; }; //===--- C++ Coroutines TS bitfields classes ---===// class CoawaitExprBitfields { friend class CoawaitExpr; unsigned : NumExprBits; unsigned IsImplicit : 1; }; //===--- Obj-C Expression bitfields classes ---===// class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; //===--- Clang Extensions bitfields classes ---===// class OpaqueValueExprBitfields { friend class ASTStmtReader; friend class OpaqueValueExpr; unsigned : NumExprBits; /// The OVE is a unique semantic reference to its source expression if this /// bit is set to true. unsigned IsUnique : 1; SourceLocation Loc; }; union { // Same order as in StmtNodes.td. // Statements StmtBitfields StmtBits; NullStmtBitfields NullStmtBits; CompoundStmtBitfields CompoundStmtBits; LabelStmtBitfields LabelStmtBits; AttributedStmtBitfields AttributedStmtBits; IfStmtBitfields IfStmtBits; SwitchStmtBitfields SwitchStmtBits; WhileStmtBitfields WhileStmtBits; DoStmtBitfields DoStmtBits; ForStmtBitfields ForStmtBits; GotoStmtBitfields GotoStmtBits; ContinueStmtBitfields ContinueStmtBits; BreakStmtBitfields BreakStmtBits; ReturnStmtBitfields ReturnStmtBits; SwitchCaseBitfields SwitchCaseBits; // Expressions ExprBitfields ExprBits; ConstantExprBitfields ConstantExprBits; PredefinedExprBitfields PredefinedExprBits; DeclRefExprBitfields DeclRefExprBits; FloatingLiteralBitfields FloatingLiteralBits; StringLiteralBitfields StringLiteralBits; CharacterLiteralBitfields CharacterLiteralBits; UnaryOperatorBitfields UnaryOperatorBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; ArrayOrMatrixSubscriptExprBitfields ArrayOrMatrixSubscriptExprBits; CallExprBitfields CallExprBits; MemberExprBitfields MemberExprBits; CastExprBitfields CastExprBits; BinaryOperatorBitfields BinaryOperatorBits; InitListExprBitfields InitListExprBits; ParenListExprBitfields ParenListExprBits; GenericSelectionExprBitfields GenericSelectionExprBits; PseudoObjectExprBitfields PseudoObjectExprBits; SourceLocExprBitfields SourceLocExprBits; // GNU Extensions. StmtExprBitfields StmtExprBits; // C++ Expressions CXXOperatorCallExprBitfields CXXOperatorCallExprBits; CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits; CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits; CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits; CXXThisExprBitfields CXXThisExprBits; CXXThrowExprBitfields CXXThrowExprBits; CXXDefaultArgExprBitfields CXXDefaultArgExprBits; CXXDefaultInitExprBitfields CXXDefaultInitExprBits; CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits; CXXNewExprBitfields CXXNewExprBits; CXXDeleteExprBitfields CXXDeleteExprBits; TypeTraitExprBitfields TypeTraitExprBits; DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits; CXXConstructExprBitfields CXXConstructExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits; CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits; OverloadExprBitfields OverloadExprBits; UnresolvedLookupExprBitfields UnresolvedLookupExprBits; UnresolvedMemberExprBitfields UnresolvedMemberExprBits; CXXNoexceptExprBitfields CXXNoexceptExprBits; SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits; LambdaExprBitfields LambdaExprBits; RequiresExprBitfields RequiresExprBits; // C++ Coroutines TS expressions CoawaitExprBitfields CoawaitBits; // Obj-C Expressions ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; // Clang Extensions OpaqueValueExprBitfields OpaqueValueExprBits; }; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) noexcept { return mem; } void operator delete(void *, const ASTContext &, unsigned) noexcept {} void operator delete(void *, const ASTContext *, unsigned) noexcept {} void operator delete(void *, size_t) noexcept {} void operator delete(void *, void *) noexcept {} public: /// A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell {}; /// The likelihood of a branch being taken. enum Likelihood { LH_Unlikely = -1, ///< Branch has the [[unlikely]] attribute. LH_None, ///< No attribute set or branches of the IfStmt have ///< the same attribute. LH_Likely ///< Branch has the [[likely]] attribute. }; protected: /// Iterator for iterating over Stmt * arrays that contain only T *. /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *> struct CastIterator : llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *, std::random_access_iterator_tag, TPtr> { using Base = typename CastIterator::iterator_adaptor_base; CastIterator() : Base(nullptr) {} CastIterator(StmtPtr *I) : Base(I) {} typename Base::value_type operator*() const { return cast_or_null<T>(*this->I); } }; /// Const iterator for iterating over Stmt * arrays that contain only T *. template <typename T> using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>; using ExprIterator = CastIterator<Expr>; using ConstExprIterator = ConstCastIterator<Expr>; private: /// Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt() = delete; Stmt(const Stmt &) = delete; Stmt(Stmt &&) = delete; Stmt &operator=(const Stmt &) = delete; Stmt &operator=(Stmt &&) = delete; Stmt(StmtClass SC) { static_assert(sizeof(*this) <= 8, "changing bitfields changed sizeof(Stmt)"); static_assert(sizeof(*this) % alignof(void *) == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getBeginLoc() const LLVM_READONLY; SourceLocation getEndLoc() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// \returns the likelihood of a set of attributes. static Likelihood getLikelihood(ArrayRef<const Attr *> Attrs); /// \returns the likelihood of a statement. static Likelihood getLikelihood(const Stmt *S); /// \returns the likelihood of the 'then' branch of an 'if' statement. The /// 'else' branch is required to determine whether both branches specify the /// same likelihood, which affects the result. static Likelihood getLikelihood(const Stmt *Then, const Stmt *Else); /// \returns whether the likelihood of the branches of an if statement are /// conflicting. When the first element is \c true there's a conflict and /// the Attr's are the conflicting attributes of the Then and Else Stmt. static std::tuple<bool, const Attr *, const Attr *> determineLikelihoodConflict(const Stmt *Then, const Stmt *Else); /// Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(raw_ostream &OS, const ASTContext &Context) const; /// \return Unique reproducible object identifier int64_t getID(const ASTContext &Context) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0, StringRef NewlineSymbol = "\n", const ASTContext *Context = nullptr) const; /// Pretty-prints in JSON format. void printJson(raw_ostream &Out, PrinterHelper *Helper, const PrintingPolicy &Policy, bool AddQuotes) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const { return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured); } const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; /// Calculate a unique representation for a statement that is /// stable across compiler invocations. /// /// \param ID profile information will be stored in ID. /// /// \param Hash an ODRHash object which will be called where pointers would /// have been used in the Profile function. void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {} /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } const_child_range children() const { auto Children = const_cast<DeclStmt *>(this)->children(); return const_child_range(Children); } using decl_iterator = DeclGroupRef::iterator; using const_decl_iterator = DeclGroupRef::const_iterator; using decl_range = llvm::iterator_range<decl_iterator>; using decl_const_range = llvm::iterator_range<const_decl_iterator>; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } using reverse_decl_iterator = std::reverse_iterator<decl_iterator>; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass) { NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro; setSemiLoc(L); } /// Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {} SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; } void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; } bool hasLeadingEmptyMacro() const { return NullStmtBits.HasLeadingEmptyMacro; } SourceLocation getBeginLoc() const { return getSemiLoc(); } SourceLocation getEndLoc() const { return getSemiLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. class CompoundStmt final : public Stmt, private llvm::TrailingObjects<CompoundStmt, Stmt *> { friend class ASTStmtReader; friend TrailingObjects; /// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits. SourceLocation RBraceLoc; CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {} void setStmts(ArrayRef<Stmt *> Stmts); public: static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); // Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; CompoundStmtBits.LBraceLoc = Loc; } // Build an empty compound statement. static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } using body_iterator = Stmt **; using body_range = llvm::iterator_range<body_iterator>; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return getTrailingObjects<Stmt *>(); } body_iterator body_end() { return body_begin() + size(); } Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; } Stmt *body_back() { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using const_body_iterator = Stmt *const *; using body_const_range = llvm::iterator_range<const_body_iterator>; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return getTrailingObjects<Stmt *>(); } const_body_iterator body_end() const { return body_begin() + size(); } const Stmt *body_front() const { return !body_empty() ? body_begin()[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using reverse_body_iterator = std::reverse_iterator<body_iterator>; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } using const_reverse_body_iterator = std::reverse_iterator<const_body_iterator>; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } // Get the Stmt that StmtExpr would consider to be the result of this // compound statement. This is used by StmtExpr to properly emulate the GCC // compound expression extension, which ignores trailing NullStmts when // getting the result of the expression. // i.e. ({ 5;;; }) // ^^ ignored // If we don't find something that isn't a NullStmt, just return the last // Stmt. Stmt *getStmtExprResult() { for (auto *B : llvm::reverse(body())) { if (!isa<NullStmt>(B)) return B; } return body_back(); } const Stmt *getStmtExprResult() const { return const_cast<CompoundStmt *>(this)->getStmtExprResult(); } SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getEndLoc() const { return RBraceLoc; } SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(body_begin(), body_end()); } const_child_range children() const { return const_child_range(body_begin(), body_end()); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: /// The location of the ":". SourceLocation ColonLoc; // The location of the "case" or "default" keyword. Stored in SwitchCaseBits. // SourceLocation KeywordLoc; /// A pointer to the following CaseStmt or DefaultStmt class, /// used by SwitchStmt. SwitchCase *NextSwitchCase = nullptr; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), ColonLoc(ColonLoc) { setKeywordLoc(KWLoc); } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; } void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } inline Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase *>(this)->getSubStmt(); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } inline SourceLocation getEndLoc() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; /// CaseStmt - Represent a case statement. It can optionally be a GNU case /// statement of the form LHS ... RHS representing a range of cases. class CaseStmt final : public SwitchCase, private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> { friend TrailingObjects; // CaseStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing objects // at the end but this would impact children(). // The trailing objects are in order: // // * A "Stmt *" for the LHS of the case statement. Always present. // // * A "Stmt *" for the RHS of the case statement. This is a GNU extension // which allow ranges in cases statement of the form LHS ... RHS. // Present if and only if caseStmtIsGNURange() is true. // // * A "Stmt *" for the substatement of the case statement. Always present. // // * A SourceLocation for the location of the ... if this is a case statement // with a range. Present if and only if caseStmtIsGNURange() is true. enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + caseStmtIsGNURange(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return caseStmtIsGNURange(); } unsigned lhsOffset() const { return LhsOffset; } unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); } unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; } /// Build a case statement assuming that the storage for the /// trailing objects has been properly allocated. CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { // Handle GNU case statements of the form LHS ... RHS. bool IsGNURange = rhs != nullptr; SwitchCaseBits.CaseStmtIsGNURange = IsGNURange; setLHS(lhs); setSubStmt(nullptr); if (IsGNURange) { setRHS(rhs); setEllipsisLoc(ellipsisLoc); } } /// Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange) : SwitchCase(CaseStmtClass, Empty) { SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange; } public: /// Build a case statement. static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc); /// Build an empty case statement. static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange); /// True if this case statement is of the form case LHS ... RHS, which /// is a GNU extension. In this case the RHS can be obtained with getRHS() /// and the location of the ellipsis can be obtained with getEllipsisLoc(). bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; } SourceLocation getCaseLoc() const { return getKeywordLoc(); } void setCaseLoc(SourceLocation L) { setKeywordLoc(L); } /// Get the location of the ... in a case statement of the form LHS ... RHS. SourceLocation getEllipsisLoc() const { return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } /// Set the location of the ... in a case statement of the form LHS ... RHS. /// Assert that this case statement is of this form. void setEllipsisLoc(SourceLocation L) { assert( caseStmtIsGNURange() && "setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!"); *getTrailingObjects<SourceLocation>() = L; } Expr *getLHS() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } const Expr *getLHS() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } void setLHS(Expr *Val) { getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val); } Expr *getRHS() { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } const Expr *getRHS() const { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } void setRHS(Expr *Val) { assert(caseStmtIsGNURange() && "setRHS but this is not a case stmt of the form LHS ... RHS!"); getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val); } Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } const Stmt *getSubStmt() const { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } void setSubStmt(Stmt *S) { getTrailingObjects<Stmt *>()[subStmtOffset()] = S; } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; class DefaultStmt : public SwitchCase { Stmt *SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) {} Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return getKeywordLoc(); } void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } }; SourceLocation SwitchCase::getEndLoc() const { if (const auto *CS = dyn_cast<CaseStmt>(this)) return CS->getEndLoc(); else if (const auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getEndLoc(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } Stmt *SwitchCase::getSubStmt() { if (auto *CS = dyn_cast<CaseStmt>(this)) return CS->getSubStmt(); else if (auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getSubStmt(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } /// Represents a statement that could possibly have a value and type. This /// covers expression-statements, as well as labels and attributed statements. /// /// Value statements have a special meaning when they are the last non-null /// statement in a GNU statement expression, where they determine the value /// of the statement expression. class ValueStmt : public Stmt { protected: using Stmt::Stmt; public: const Expr *getExprStmt() const; Expr *getExprStmt() { const ValueStmt *ConstThis = this; return const_cast<Expr*>(ConstThis->getExprStmt()); } static bool classof(const Stmt *T) { return T->getStmtClass() >= firstValueStmtConstant && T->getStmtClass() <= lastValueStmtConstant; } }; /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; class LabelStmt : public ValueStmt { LabelDecl *TheDecl; Stmt *SubStmt; public: /// Build a label statement. LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) { setIdentLoc(IL); } /// Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {} SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; } void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getBeginLoc() const { return getIdentLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } class AttributedStmt final : public ValueStmt, private llvm::TrailingObjects<AttributedStmt, const Attr *> { friend class ASTStmtReader; friend TrailingObjects; Stmt *SubStmt; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt) : ValueStmt(AttributedStmtClass), SubStmt(SubStmt) { AttributedStmtBits.NumAttrs = Attrs.size(); AttributedStmtBits.AttrLoc = Loc; std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : ValueStmt(AttributedStmtClass, Empty) { AttributedStmtBits.NumAttrs = NumAttrs; AttributedStmtBits.AttrLoc = SourceLocation{}; std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return getTrailingObjects<const Attr *>(); } const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); // Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; } ArrayRef<const Attr *> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getBeginLoc() const { return getAttrLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. class IfStmt final : public Stmt, private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> { friend TrailingObjects; // IfStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing // objects at then end but this would change the order of the children. // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact a "Expr *". // // * A "Stmt *" for the then statement. // Always present. // // * A "Stmt *" for the else statement. // Present if and only if hasElseStorage(). // // * A "SourceLocation" for the location of the "else". // Present if and only if hasElseStorage(). enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 }; enum { NumMandatoryStmtPtr = 2 }; SourceLocation LParenLoc; SourceLocation RParenLoc; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() + hasInitStorage(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return hasElseStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; } unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; } /// Build an if/then/else statement. IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LParenLoc, SourceLocation RParenLoc, Stmt *Then, SourceLocation EL, Stmt *Else); /// Build an empty if/then/else statement. explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit); public: /// Create an IfStmt. static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LPL, SourceLocation RPL, Stmt *Then, SourceLocation EL = SourceLocation(), Stmt *Else = nullptr); /// Create an empty IfStmt optionally with storage for an else statement, /// condition variable and init expression. static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar, bool HasInit); /// True if this IfStmt has the storage for an init statement. bool hasInitStorage() const { return IfStmtBits.HasInit; } /// True if this IfStmt has storage for a variable declaration. bool hasVarStorage() const { return IfStmtBits.HasVar; } /// True if this IfStmt has storage for an else statement. bool hasElseStorage() const { return IfStmtBits.HasElse; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; } const Stmt *getThen() const { return getTrailingObjects<Stmt *>()[thenOffset()]; } void setThen(Stmt *Then) { getTrailingObjects<Stmt *>()[thenOffset()] = Then; } Stmt *getElse() { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } const Stmt *getElse() const { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } void setElse(Stmt *Else) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); getTrailingObjects<Stmt *>()[elseOffset()] = Else; } /// Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<IfStmt *>(this)->getConditionVariable(); } /// Set the condition variable for this if statement. /// The if statement must have storage for the condition variable. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This if statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; } void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; } SourceLocation getElseLoc() const { return hasElseStorage() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } void setElseLoc(SourceLocation ElseLoc) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); *getTrailingObjects<SourceLocation>() = ElseLoc; } bool isConstexpr() const { return IfStmtBits.IsConstexpr; } void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; } /// If this is an 'if constexpr', determine which substatement will be taken. /// Otherwise, or if the condition is value-dependent, returns None. Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const; bool isObjCAvailabilityCheck() const; SourceLocation getBeginLoc() const { return getIfLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { if (getElse()) return getElse()->getEndLoc(); return getThen()->getEndLoc(); } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. class SwitchStmt final : public Stmt, private llvm::TrailingObjects<SwitchStmt, Stmt *> { friend TrailingObjects; /// Points to a linked list of case and default statements. SwitchCase *FirstCase; // SwitchStmt is followed by several trailing objects, // some of which optional. Note that it would be more convenient to // put the optional trailing objects at the end but this would change // the order in children(). // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. enum { InitOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; SourceLocation LParenLoc; SourceLocation RParenLoc; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } /// Build a switch statement. SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LParenLoc, SourceLocation RParenLoc); /// Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar); public: /// Create a switch statement. static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LParenLoc, SourceLocation RParenLoc); /// Create an empty switch statement optionally with storage for /// an init expression and a condition variable. static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit, bool HasVar); /// True if this SwitchStmt has storage for an init statement. bool hasInitStorage() const { return SwitchStmtBits.HasInit; } /// True if this SwitchStmt has storage for a condition variable. bool hasVarStorage() const { return SwitchStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This switch statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } /// Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<SwitchStmt *>(this)->getConditionVariable(); } /// Set the condition variable in this switch statement. /// The switch statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *VD); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SwitchCase *getSwitchCaseList() { return FirstCase; } const SwitchCase *getSwitchCaseList() const { return FirstCase; } void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; } SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; } void setBody(Stmt *S, SourceLocation SL) { setBody(S); setSwitchLoc(SL); } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase); FirstCase = SC; } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return SwitchStmtBits.AllEnumCasesCovered; } SourceLocation getBeginLoc() const { return getSwitchLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody() ? getBody()->getEndLoc() : reinterpret_cast<const Stmt *>(getCond())->getEndLoc(); } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. class WhileStmt final : public Stmt, private llvm::TrailingObjects<WhileStmt, Stmt *> { friend TrailingObjects; // WhileStmt is followed by several trailing objects, // some of which optional. Note that it would be more // convenient to put the optional trailing object at the end // but this would affect children(). // The trailing objects are in order: // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. // enum { VarOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; SourceLocation LParenLoc, RParenLoc; unsigned varOffset() const { return VarOffset; } unsigned condOffset() const { return VarOffset + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasVarStorage(); } /// Build a while statement. WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL, SourceLocation LParenLoc, SourceLocation RParenLoc); /// Build an empty while statement. explicit WhileStmt(EmptyShell Empty, bool HasVar); public: /// Create a while statement. static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL, SourceLocation LParenLoc, SourceLocation RParenLoc); /// Create an empty while statement optionally with storage for /// a condition variable. static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar); /// True if this WhileStmt has storage for a condition variable. bool hasVarStorage() const { return WhileStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } /// Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<WhileStmt *>(this)->getConditionVariable(); } /// Set the condition variable of this while statement. /// The while statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; } void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getWhileLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; /// DoStmt - This represents a 'do/while' stmt. class DoStmt : public Stmt { enum { BODY, COND, END_EXPR }; Stmt *SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) { setCond(Cond); setBody(Body); setDoLoc(DL); } /// Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {} Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(SubExprs[COND]); } void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *Body) { SubExprs[BODY] = Body; } SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; } void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getDoLoc(); } SourceLocation getEndLoc() const { return getRParenLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. class ForStmt : public Stmt { enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {} Stmt *getInit() { return SubExprs[INIT]; } /// Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForStmtBits.ForLoc; } void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getForLoc(); } SourceLocation getEndLoc() const { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// GotoStmt - This represents a direct goto. class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), LabelLoc(LL) { setGotoLoc(GL); } /// Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {} LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const { return getLabelLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. class IndirectGotoStmt : public Stmt { SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), StarLoc(starLoc) { setTarget(target); setGotoLoc(gotoLoc); } /// Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) {} void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr *>(Target); } const Expr *getTarget() const { return reinterpret_cast<const Expr *>(Target); } void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt *>(this)->getConstantTarget(); } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target + 1); } const_child_range children() const { return const_child_range(&Target, &Target + 1); } }; /// ContinueStmt - This represents a continue. class ContinueStmt : public Stmt { public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) { setContinueLoc(CL); } /// Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {} SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; } SourceLocation getBeginLoc() const { return getContinueLoc(); } SourceLocation getEndLoc() const { return getContinueLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// BreakStmt - This represents a break. class BreakStmt : public Stmt { public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) { setBreakLoc(BL); } /// Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {} SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; } void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; } SourceLocation getBeginLoc() const { return getBreakLoc(); } SourceLocation getEndLoc() const { return getBreakLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. class ReturnStmt final : public Stmt, private llvm::TrailingObjects<ReturnStmt, const VarDecl *> { friend TrailingObjects; /// The return expression. Stmt *RetExpr; // ReturnStmt is followed optionally by a trailing "const VarDecl *" // for the NRVO candidate. Present if and only if hasNRVOCandidate(). /// True if this ReturnStmt has storage for an NRVO candidate. bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; } unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const { return hasNRVOCandidate(); } /// Build a return statement. ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Build an empty return statement. explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate); public: /// Create a return statement. static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Create an empty return statement, optionally with /// storage for an NRVO candidate. static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate); Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); } const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); } void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); } /// Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>() : nullptr; } /// Set the variable that might be used for the named return value /// optimization. The return statement must have storage for it, /// which is the case if and only if hasNRVOCandidate() is true. void setNRVOCandidate(const VarDecl *Var) { assert(hasNRVOCandidate() && "This return statement has no storage for an NRVO candidate!"); *getTrailingObjects<const VarDecl *>() = Var; } SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; } void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; } SourceLocation getBeginLoc() const { return getReturnLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return RetExpr ? RetExpr->getEndLoc() : getReturnLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr + 1); return child_range(child_iterator(), child_iterator()); } const_child_range children() const { if (RetExpr) return const_child_range(&RetExpr, &RetExpr + 1); return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. class AsmStmt : public Stmt { protected: friend class ASTStmtReader; SourceLocation AsmLoc; /// True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs = nullptr; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {} public: /// Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {} SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getBeginLoc() const LLVM_READONLY { return {}; } SourceLocation getEndLoc() const LLVM_READONLY { return {}; } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. using inputs_iterator = ExprIterator; using const_inputs_iterator = ConstExprIterator; using inputs_range = llvm::iterator_range<inputs_iterator>; using inputs_const_range = llvm::iterator_range<const_inputs_iterator>; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. using outputs_iterator = ExprIterator; using const_outputs_iterator = ConstExprIterator; using outputs_range = llvm::iterator_range<outputs_iterator>; using outputs_const_range = llvm::iterator_range<const_outputs_iterator>; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. class GCCAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints = nullptr; StringLiteral **Clobbers = nullptr; IdentifierInfo **Names = nullptr; unsigned NumLabels = 0; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, unsigned numlabels, SourceLocation rparenloc); /// Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {} SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) {} bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return {}; } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return {}; } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } //===--- Labels ---===// bool isAsmGoto() const { return NumLabels > 0; } unsigned getNumLabels() const { return NumLabels; } IdentifierInfo *getLabelIdentifier(unsigned i) const { return Names[i + NumOutputs + NumInputs]; } AddrLabelExpr *getLabelExpr(unsigned i) const; StringRef getLabelName(unsigned i) const; using labels_iterator = CastIterator<AddrLabelExpr>; using const_labels_iterator = ConstCastIterator<AddrLabelExpr>; using labels_range = llvm::iterator_range<labels_iterator>; using labels_const_range = llvm::iterator_range<const_labels_iterator>; labels_iterator begin_labels() { return &Exprs[0] + NumOutputs + NumInputs; } labels_iterator end_labels() { return &Exprs[0] + NumOutputs + NumInputs + NumLabels; } labels_range labels() { return labels_range(begin_labels(), end_labels()); } const_labels_iterator begin_labels() const { return &Exprs[0] + NumOutputs + NumInputs; } const_labels_iterator end_labels() const { return &Exprs[0] + NumOutputs + NumInputs + NumLabels; } labels_const_range labels() const { return labels_const_range(begin_labels(), end_labels()); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, unsigned NumLabels, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. class MSAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks = 0; Token *AsmToks = nullptr; StringRef *Constraints = nullptr; StringRef *Clobbers = nullptr; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {} SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {} public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {} public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getEndLoc(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } const_child_range children() const { return const_child_range(&Block, &Block + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {} public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {} SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: friend class ASTStmtReader; /// Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// Determine the kind of capture. VariableCaptureKind getCaptureKind() const; /// Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const; }; private: /// The number of variable captured, including 'this'. unsigned NumCaptures; /// The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind; /// The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl = nullptr; /// Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: friend class ASTStmtReader; static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl(); const CapturedDecl *getCapturedDecl() const; /// Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D); /// Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const; /// Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind); /// Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// An iterator that walks over the captures. using capture_iterator = Capture *; using const_capture_iterator = const Capture *; using capture_range = llvm::iterator_range<capture_iterator>; using capture_const_range = llvm::iterator_range<const_capture_iterator>; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// Iterator that walks over the capture initialization arguments. using capture_init_iterator = Expr **; using capture_init_range = llvm::iterator_range<capture_init_iterator>; /// Const iterator that walks over the capture initialization /// arguments. using const_capture_init_iterator = Expr *const *; using const_capture_init_range = llvm::iterator_range<const_capture_init_iterator>; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getBeginLoc() const LLVM_READONLY { return getCapturedStmt()->getBeginLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getCapturedStmt()->getEndLoc(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); const_child_range children() const; }; } // namespace clang #endif // LLVM_CLANG_AST_STMT_H
parallel_fann.c
/* * parallel_FANN.c * Author: Alessandro Pietro Bardelli */ #ifndef DISABLE_PARALLEL_FANN #include <omp.h> #include "parallel_fann.h" #include "config.h" #include "fann.h" FANN_EXTERNAL float FANN_API fann_train_epoch_batch_parallel(struct fann *ann, struct fann_train_data *data, const unsigned int threadnumb) { /*vector<struct fann *> ann_vect(threadnumb);*/ struct fann** ann_vect= (struct fann**) malloc(threadnumb * sizeof(struct fann*)); int i=0,j=0; fann_reset_MSE(ann); //generate copies of the ann omp_set_dynamic(0); omp_set_num_threads(threadnumb); #pragma omp parallel private(j) { #pragma omp for schedule(static) for(i=0; i<(int)threadnumb; i++) { ann_vect[i]=fann_copy(ann); } //parallel computing of the updates #pragma omp for schedule(static) for(i = 0; i < (int)data->num_data; i++) { j=omp_get_thread_num(); fann_run(ann_vect[j], data->input[i]); fann_compute_MSE(ann_vect[j], data->output[i]); fann_backpropagate_MSE(ann_vect[j]); fann_update_slopes_batch(ann_vect[j], ann_vect[j]->first_layer + 1, ann_vect[j]->last_layer - 1); } } //parallel update of the weights { const unsigned int num_data=data->num_data; const unsigned int first_weight=0; const unsigned int past_end=ann->total_connections; fann_type *weights = ann->weights; const fann_type epsilon = ann->learning_rate / num_data; omp_set_dynamic(0); omp_set_num_threads(threadnumb); #pragma omp parallel { #pragma omp for schedule(static) for(i=first_weight; i < (int)past_end; i++) { fann_type temp_slopes=0.0; unsigned int k; fann_type *train_slopes; for(k=0;k<threadnumb;++k) { train_slopes=ann_vect[k]->train_slopes; temp_slopes+= train_slopes[i]; train_slopes[i]=0.0; } weights[i] += temp_slopes*epsilon; } } } //merge of MSEs for(i=0;i<(int)threadnumb;++i) { ann->MSE_value+= ann_vect[i]->MSE_value; ann->num_MSE+=ann_vect[i]->num_MSE; fann_destroy(ann_vect[i]); } free(ann_vect); return fann_get_MSE(ann); } FANN_EXTERNAL float FANN_API fann_train_epoch_irpropm_parallel(struct fann *ann, struct fann_train_data *data, const unsigned int threadnumb) { struct fann** ann_vect= (struct fann**) malloc(threadnumb * sizeof(struct fann*)); int i=0,j=0; if(ann->prev_train_slopes == NULL) { fann_clear_train_arrays(ann); } //#define THREADNUM 1 fann_reset_MSE(ann); /*vector<struct fann *> ann_vect(threadnumb);*/ //generate copies of the ann omp_set_dynamic(0); omp_set_num_threads(threadnumb); #pragma omp parallel private(j) { #pragma omp for schedule(static) for(i=0; i<(int)threadnumb; i++) { ann_vect[i]=fann_copy(ann); } //parallel computing of the updates #pragma omp for schedule(static) for(i = 0; i < (int)data->num_data; i++) { j=omp_get_thread_num(); fann_run(ann_vect[j], data->input[i]); fann_compute_MSE(ann_vect[j], data->output[i]); fann_backpropagate_MSE(ann_vect[j]); fann_update_slopes_batch(ann_vect[j], ann_vect[j]->first_layer + 1, ann_vect[j]->last_layer - 1); } } { fann_type *weights = ann->weights; fann_type *prev_steps = ann->prev_steps; fann_type *prev_train_slopes = ann->prev_train_slopes; fann_type next_step; const float increase_factor = ann->rprop_increase_factor; //1.2; const float decrease_factor = ann->rprop_decrease_factor; //0.5; const float delta_min = ann->rprop_delta_min; //0.0; const float delta_max = ann->rprop_delta_max; //50.0; const unsigned int first_weight=0; const unsigned int past_end=ann->total_connections; omp_set_dynamic(0); omp_set_num_threads(threadnumb); #pragma omp parallel private(next_step) { #pragma omp for schedule(static) for(i=first_weight; i < (int)past_end; i++) { fann_type prev_slope, same_sign; const fann_type prev_step = fann_max(prev_steps[i], (fann_type) 0.0001); // prev_step may not be zero because then the training will stop fann_type temp_slopes=0.0; unsigned int k; fann_type *train_slopes; for(k=0;k<threadnumb;++k) { train_slopes=ann_vect[k]->train_slopes; temp_slopes+= train_slopes[i]; train_slopes[i]=0.0; } prev_slope = prev_train_slopes[i]; same_sign = prev_slope * temp_slopes; if(same_sign >= 0.0) next_step = fann_min(prev_step * increase_factor, delta_max); else { next_step = fann_max(prev_step * decrease_factor, delta_min); temp_slopes = 0; } if(temp_slopes < 0) { weights[i] -= next_step; if(weights[i] < -1500) weights[i] = -1500; } else { weights[i] += next_step; if(weights[i] > 1500) weights[i] = 1500; } // update global data arrays prev_steps[i] = next_step; prev_train_slopes[i] = temp_slopes; } } } //merge of MSEs for(i=0;i<(int)threadnumb;++i) { ann->MSE_value+= ann_vect[i]->MSE_value; ann->num_MSE+=ann_vect[i]->num_MSE; fann_destroy(ann_vect[i]); } free(ann_vect); return fann_get_MSE(ann); } FANN_EXTERNAL float FANN_API fann_train_epoch_quickprop_parallel(struct fann *ann, struct fann_train_data *data, const unsigned int threadnumb) { struct fann** ann_vect= (struct fann**) malloc(threadnumb * sizeof(struct fann*)); int i=0,j=0; if(ann->prev_train_slopes == NULL) { fann_clear_train_arrays(ann); } //#define THREADNUM 1 fann_reset_MSE(ann); /*vector<struct fann *> ann_vect(threadnumb);*/ //generate copies of the ann omp_set_dynamic(0); omp_set_num_threads(threadnumb); #pragma omp parallel private(j) { #pragma omp for schedule(static) for(i=0; i<(int)threadnumb; i++) { ann_vect[i]=fann_copy(ann); } //parallel computing of the updates #pragma omp for schedule(static) for(i = 0; i < (int)data->num_data; i++) { j=omp_get_thread_num(); fann_run(ann_vect[j], data->input[i]); fann_compute_MSE(ann_vect[j], data->output[i]); fann_backpropagate_MSE(ann_vect[j]); fann_update_slopes_batch(ann_vect[j], ann_vect[j]->first_layer + 1, ann_vect[j]->last_layer - 1); } } { fann_type *weights = ann->weights; fann_type *prev_steps = ann->prev_steps; fann_type *prev_train_slopes = ann->prev_train_slopes; const unsigned int first_weight=0; const unsigned int past_end=ann->total_connections; fann_type w=0.0, next_step; const float epsilon = ann->learning_rate / data->num_data; const float decay = ann->quickprop_decay; /*-0.0001;*/ const float mu = ann->quickprop_mu; /*1.75; */ const float shrink_factor = (float) (mu / (1.0 + mu)); omp_set_dynamic(0); omp_set_num_threads(threadnumb); #pragma omp parallel private(w, next_step) { #pragma omp for schedule(static) for(i=first_weight; i < (int)past_end; i++) { fann_type temp_slopes=0.0; unsigned int k; fann_type *train_slopes; fann_type prev_step, prev_slope; w = weights[i]; for(k=0;k<threadnumb;++k) { train_slopes=ann_vect[k]->train_slopes; temp_slopes+= train_slopes[i]; train_slopes[i]=0.0; } temp_slopes+= decay * w; prev_step = prev_steps[i]; prev_slope = prev_train_slopes[i]; next_step = 0.0; /* The step must always be in direction opposite to the slope. */ if(prev_step > 0.001) { /* If last step was positive... */ if(temp_slopes > 0.0) /* Add in linear term if current slope is still positive. */ next_step += epsilon * temp_slopes; /*If current slope is close to or larger than prev slope... */ if(temp_slopes > (shrink_factor * prev_slope)) next_step += mu * prev_step; /* Take maximum size negative step. */ else next_step += prev_step * temp_slopes / (prev_slope - temp_slopes); /* Else, use quadratic estimate. */ } else if(prev_step < -0.001) { /* If last step was negative... */ if(temp_slopes < 0.0) /* Add in linear term if current slope is still negative. */ next_step += epsilon * temp_slopes; /* If current slope is close to or more neg than prev slope... */ if(temp_slopes < (shrink_factor * prev_slope)) next_step += mu * prev_step; /* Take maximum size negative step. */ else next_step += prev_step * temp_slopes / (prev_slope - temp_slopes); /* Else, use quadratic estimate. */ } else /* Last step was zero, so use only linear term. */ next_step += epsilon * temp_slopes; /* update global data arrays */ prev_steps[i] = next_step; prev_train_slopes[i] = temp_slopes; w += next_step; if(w > 1500) weights[i] = 1500; else if(w < -1500) weights[i] = -1500; else weights[i] = w; } } } //merge of MSEs for(i=0;i<(int)threadnumb;++i) { ann->MSE_value+= ann_vect[i]->MSE_value; ann->num_MSE+=ann_vect[i]->num_MSE; fann_destroy(ann_vect[i]); } free(ann_vect); return fann_get_MSE(ann); } FANN_EXTERNAL float FANN_API fann_train_epoch_sarprop_parallel(struct fann *ann, struct fann_train_data *data, const unsigned int threadnumb) { struct fann** ann_vect= (struct fann**) malloc(threadnumb * sizeof(struct fann*)); int i=0,j=0; if(ann->prev_train_slopes == NULL) { fann_clear_train_arrays(ann); } //#define THREADNUM 1 fann_reset_MSE(ann); /*vector<struct fann *> ann_vect(threadnumb);*/ //generate copies of the ann omp_set_dynamic(0); omp_set_num_threads(threadnumb); #pragma omp parallel private(j) { #pragma omp for schedule(static) for(i=0; i<(int)threadnumb; i++) { ann_vect[i]=fann_copy(ann); } //parallel computing of the updates #pragma omp for schedule(static) for(i = 0; i < (int)data->num_data; i++) { j=omp_get_thread_num(); fann_run(ann_vect[j], data->input[i]); fann_compute_MSE(ann_vect[j], data->output[i]); fann_backpropagate_MSE(ann_vect[j]); fann_update_slopes_batch(ann_vect[j], ann_vect[j]->first_layer + 1, ann_vect[j]->last_layer - 1); } } { fann_type *weights = ann->weights; fann_type *prev_steps = ann->prev_steps; fann_type *prev_train_slopes = ann->prev_train_slopes; const unsigned int first_weight=0; const unsigned int past_end=ann->total_connections; const unsigned int epoch=ann->sarprop_epoch; fann_type next_step; /* These should be set from variables */ const float increase_factor = ann->rprop_increase_factor; /*1.2; */ const float decrease_factor = ann->rprop_decrease_factor; /*0.5; */ /* TODO: why is delta_min 0.0 in iRprop? SARPROP uses 1x10^-6 (Braun and Riedmiller, 1993) */ const float delta_min = 0.000001f; const float delta_max = ann->rprop_delta_max; /*50.0; */ const float weight_decay_shift = ann->sarprop_weight_decay_shift; /* ld 0.01 = -6.644 */ const float step_error_threshold_factor = ann->sarprop_step_error_threshold_factor; /* 0.1 */ const float step_error_shift = ann->sarprop_step_error_shift; /* ld 3 = 1.585 */ const float T = ann->sarprop_temperature; float MSE, RMSE; //merge of MSEs for(i=0;i<(int)threadnumb;++i) { ann->MSE_value+= ann_vect[i]->MSE_value; ann->num_MSE+=ann_vect[i]->num_MSE; } MSE = fann_get_MSE(ann); RMSE = sqrtf(MSE); /* for all weights; TODO: are biases included? */ omp_set_dynamic(0); omp_set_num_threads(threadnumb); #pragma omp parallel private(next_step) { #pragma omp for schedule(static) for(i=first_weight; i < (int)past_end; i++) { /* TODO: confirm whether 1x10^-6 == delta_min is really better */ const fann_type prev_step = fann_max(prev_steps[i], (fann_type) 0.000001); /* prev_step may not be zero because then the training will stop */ /* calculate SARPROP slope; TODO: better as new error function? (see SARPROP paper)*/ fann_type prev_slope, same_sign; fann_type temp_slopes=0.0; unsigned int k; fann_type *train_slopes; for(k=0;k<threadnumb;++k) { train_slopes=ann_vect[k]->train_slopes; temp_slopes+= train_slopes[i]; train_slopes[i]=0.0; } temp_slopes= -temp_slopes - weights[i] * (fann_type)fann_exp2(-T * epoch + weight_decay_shift); next_step=0.0; /* TODO: is prev_train_slopes[i] 0.0 in the beginning? */ prev_slope = prev_train_slopes[i]; same_sign = prev_slope * temp_slopes; if(same_sign > 0.0) { next_step = fann_min(prev_step * increase_factor, delta_max); /* TODO: are the signs inverted? see differences between SARPROP paper and iRprop */ if (temp_slopes < 0.0) weights[i] += next_step; else weights[i] -= next_step; } else if(same_sign < 0.0) { #ifndef RAND_MAX #define RAND_MAX 0x7fffffff #endif if(prev_step < step_error_threshold_factor * MSE) next_step = prev_step * decrease_factor + (float)rand() / RAND_MAX * RMSE * (fann_type)fann_exp2(-T * epoch + step_error_shift); else next_step = fann_max(prev_step * decrease_factor, delta_min); temp_slopes = 0.0; } else { if(temp_slopes < 0.0) weights[i] += prev_step; else weights[i] -= prev_step; } /* update global data arrays */ prev_steps[i] = next_step; prev_train_slopes[i] = temp_slopes; } } } ++(ann->sarprop_epoch); //already computed before /*//merge of MSEs for(i=0;i<threadnumb;++i) { ann->MSE_value+= ann_vect[i]->MSE_value; ann->num_MSE+=ann_vect[i]->num_MSE; }*/ //destroy the copies of the ann for(i=0; i<(int)threadnumb; i++) { fann_destroy(ann_vect[i]); } free(ann_vect); return fann_get_MSE(ann); } FANN_EXTERNAL float FANN_API fann_train_epoch_incremental_mod(struct fann *ann, struct fann_train_data *data) { unsigned int i; fann_reset_MSE(ann); for(i = 0; i != data->num_data; i++) { fann_train(ann, data->input[i], data->output[i]); } return fann_get_MSE(ann); } #endif /* DISABLE_PARALLEL_FANN */
parallel.c
/* Quicksort parallel */ #include <omp.h> #include "parallel.h" void quickSort_parallel(int* array, int lenArray, int numThreads){ int cutoff = 1000; #pragma omp parallel num_threads(numThreads) { #pragma omp single nowait { quickSort_parallel_internal(array, 0, lenArray - 1, cutoff); } } } void quickSort_parallel_internal(int* array, int left, int right, int cutoff) { int i = left; int j = right; int tmp; int pivot = array[(left + right) / 2]; while (i <= j) { while (array[i] < pivot) { i++; } while (array[j] > pivot) { j--; } if (i <= j) { tmp = array[i]; array[i] = array[j]; array[j] = tmp; i++; j--; } } if((right - left) < cutoff) { if(left < j) { quickSort_parallel_internal(array, left, j, cutoff); } if(i < right) { quickSort_parallel_internal(array, i, right, cutoff); } } else { #pragma omp task { quickSort_parallel_internal(array, left, j, cutoff); } #pragma omp task { quickSort_parallel_internal(array, i, right, cutoff); } } }
GB_binop__min_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__min_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__min_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__min_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__min_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__min_uint64) // A*D function (colscale): GB (_AxD__min_uint64) // D*A function (rowscale): GB (_DxB__min_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__min_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__min_uint64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_uint64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_uint64) // C=scalar+B GB (_bind1st__min_uint64) // C=scalar+B' GB (_bind1st_tran__min_uint64) // C=A+scalar GB (_bind2nd__min_uint64) // C=A'+scalar GB (_bind2nd_tran__min_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = GB_IMIN (aij, bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMIN (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_UINT64 || GxB_NO_MIN_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__min_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__min_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__min_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__min_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__min_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__min_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__min_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__min_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__min_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__min_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__min_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__min_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMIN (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__min_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMIN (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMIN (x, aij) ; \ } GrB_Info GB (_bind1st_tran__min_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMIN (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__min_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_uint16_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint16_uint16) // op(A') function: GB (_unop_tran__identity_uint16_uint16) // C type: uint16_t // A type: uint16_t // cast: uint16_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 1 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint16_uint16) ( uint16_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; uint16_t z = aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint16_t aij = Ax [p] ; uint16_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint16_uint16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
equality.h
/* Authors: Mayank Rathee Copyright: Copyright (c) 2020 Microsoft Research Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef EQUALITY_H__ #define EQUALITY_H__ #include "Millionaire/millionaire.h" #include "OT/emp-ot.h" #include "utils/emp-tool.h" #include <cmath> class Equality { public: sci::IOPack *iopack; sci::OTPack *otpack; TripleGenerator *triple_gen; MillionaireProtocol *mill; int party; int l, r, log_alpha, beta, beta_pow; int num_digits, num_triples, log_num_digits; uint8_t mask_beta, mask_r; Equality(int party, sci::IOPack *iopack, sci::OTPack *otpack, int bitlength = 32, int radix_base = MILL_PARAM) { this->party = party; this->iopack = iopack; this->otpack = otpack; this->mill = new MillionaireProtocol(party, iopack, otpack, bitlength, radix_base); this->triple_gen = mill->triple_gen; configure(bitlength, radix_base); } void configure(int bitlength, int radix_base = MILL_PARAM) { assert(radix_base <= 8); assert(bitlength <= 64); this->l = bitlength; this->beta = radix_base; this->num_digits = ceil((double)l / beta); this->r = l % beta; this->log_alpha = sci::bitlen(num_digits) - 1; this->log_num_digits = log_alpha + 1; this->num_triples = num_digits - 1; if (beta == 8) this->mask_beta = -1; else this->mask_beta = (1 << beta) - 1; this->mask_r = (1 << r) - 1; this->beta_pow = 1 << beta; } ~Equality() { delete mill; } void bitlen_lt_beta(uint8_t *res_eq, uint64_t *data, int num_eqs, int bitlength, bool greater_than = true, int radix_base = MILL_PARAM) { uint8_t N = 1 << bitlength; uint8_t mask = N - 1; if (party == sci::ALICE) { sci::PRG128 prg; prg.random_data(res_eq, num_eqs * sizeof(uint8_t)); uint8_t **leaf_messages = new uint8_t *[num_eqs]; for (int i = 0; i < num_eqs; i++) { res_eq[i] &= 1; leaf_messages[i] = new uint8_t[N]; this->set_leaf_ot_messages(leaf_messages[i], (data[i] & mask), N, res_eq[i]); } if (bitlength > 1) { otpack->kkot[bitlength - 1]->send(leaf_messages, num_eqs, 1); } else { otpack->iknp_straight->send(leaf_messages, num_eqs, 1); } for (int i = 0; i < num_eqs; i++) delete[] leaf_messages[i]; delete[] leaf_messages; } else { // party == BOB uint8_t *choice = new uint8_t[num_eqs]; for (int i = 0; i < num_eqs; i++) { choice[i] = data[i] & mask; } if (bitlength > 1) { otpack->kkot[bitlength - 1]->recv(res_eq, choice, num_eqs, 1); } else { otpack->iknp_straight->recv(res_eq, choice, num_eqs, 1); } for (int i = 0; i < num_eqs; i++) { res_eq[i] = res_eq[i] & 1; } delete[] choice; } return; } void check_equality(uint8_t *res_eq, uint64_t *data, int num_eqs, int bitlength, int radix_base = MILL_PARAM) { configure(bitlength, radix_base); if (bitlength <= beta) { bitlen_lt_beta(res_eq, data, num_eqs, bitlength, radix_base); return; } int old_num_eqs = num_eqs; // num_eqs should be a multiple of 8 num_eqs = ceil(num_eqs / 8.0) * 8; // padding with 0s if data dim not multiple of 8 uint64_t *data_ext; if (old_num_eqs == num_eqs) data_ext = data; else { data_ext = new uint64_t[num_eqs]; memcpy(data_ext, data, old_num_eqs * sizeof(uint64_t)); memset(data_ext + old_num_eqs, 0, (num_eqs - old_num_eqs) * sizeof(uint64_t)); } uint8_t *digits; // num_digits * num_eqs uint8_t *leaf_res_eq; // num_digits * num_eqs digits = new uint8_t[num_digits * num_eqs]; leaf_res_eq = new uint8_t[num_digits * num_eqs]; // Extract radix-digits from data for (int i = 0; i < num_digits; i++) // Stored from LSB to MSB for (int j = 0; j < num_eqs; j++) if ((i == num_digits - 1) && (r != 0)) digits[i * num_eqs + j] = (uint8_t)(data_ext[j] >> i * beta) & mask_r; else digits[i * num_eqs + j] = (uint8_t)(data_ext[j] >> i * beta) & mask_beta; // ====================== // Set leaf OT messages now if (party == sci::ALICE) { uint8_t **leaf_ot_messages; // (num_digits * num_eqs) X beta_pow (=2^beta) leaf_ot_messages = new uint8_t *[num_digits * num_eqs]; for (int i = 0; i < num_digits * num_eqs; i++) leaf_ot_messages[i] = new uint8_t[beta_pow]; // Set Leaf OT messages triple_gen->prg->random_bool((bool *)leaf_res_eq, num_digits * num_eqs); for (int i = 0; i < num_digits; i++) { for (int j = 0; j < num_eqs; j++) { if (i == (num_digits - 1) && (r > 0)) { this->set_leaf_ot_messages(leaf_ot_messages[i * num_eqs + j], digits[i * num_eqs + j], 1ULL << r, leaf_res_eq[i * num_eqs + j]); } else { this->set_leaf_ot_messages(leaf_ot_messages[i * num_eqs + j], digits[i * num_eqs + j], beta_pow, leaf_res_eq[i * num_eqs + j]); } } } // Perform Leaf OTs with comparison and equality if (r == 1) { // All branches except r otpack->kkot[beta - 1]->send(leaf_ot_messages, num_eqs * (num_digits - 1), 1); // r branch otpack->iknp_straight->send( leaf_ot_messages + num_eqs * (num_digits - 1), num_eqs, 1); } else if (r != 0) { // All branches except r otpack->kkot[beta - 1]->send(leaf_ot_messages, num_eqs * (num_digits - 1), 1); // r branch otpack->kkot[r - 1]->send(leaf_ot_messages + num_eqs * (num_digits - 1), num_eqs, 1); } else { // All branches including r, r is 0 otpack->kkot[beta - 1]->send(leaf_ot_messages, num_eqs * (num_digits), 1); } // Cleanup for (int i = 0; i < num_digits * num_eqs; i++) delete[] leaf_ot_messages[i]; delete[] leaf_ot_messages; } else // party = sci::BOB { // Perform Leaf OTs if (r == 1) { // All branches except r otpack->kkot[beta - 1]->recv(leaf_res_eq, digits, num_eqs * (num_digits - 1), 1); // r branch otpack->iknp_straight->recv(leaf_res_eq + num_eqs * (num_digits - 1), digits + num_eqs * (num_digits - 1), num_eqs, 1); } else if (r != 0) { // All branches except r otpack->kkot[beta - 1]->recv(leaf_res_eq, digits, num_eqs * (num_digits - 1), 1); // r branch otpack->kkot[r - 1]->recv(leaf_res_eq + num_eqs * (num_digits - 1), digits + num_eqs * (num_digits - 1), num_eqs, 1); } else { // All branches including r, r is 0 otpack->kkot[beta - 1]->recv(leaf_res_eq, digits, num_eqs * (num_digits), 1); } for (int i = 0; i < num_digits * num_eqs; i++) { leaf_res_eq[i] = leaf_res_eq[i] & 1; } } traverse_and_compute_ANDs(num_eqs, leaf_res_eq); for (int i = 0; i < old_num_eqs; i++) { res_eq[i] = leaf_res_eq[i]; } // Cleanup if (old_num_eqs != num_eqs) delete[] data_ext; delete[] digits; delete[] leaf_res_eq; } /************************************************************************************************** * AND computation related functions **************************************************************************************************/ void traverse_and_compute_ANDs(int num_eqs, uint8_t *leaf_res_eq) { Triple triples_std((num_triples)*num_eqs, true); // Generate required Bit-Triples triple_gen->generate(party, &triples_std, _16KKOT_to_4OT); // Combine leaf OT results in a bottom-up fashion int counter_triples_used = 0, old_counter_triples_used = 0; uint8_t *ei = new uint8_t[(num_triples * num_eqs) / 8]; uint8_t *fi = new uint8_t[(num_triples * num_eqs) / 8]; uint8_t *e = new uint8_t[(num_triples * num_eqs) / 8]; uint8_t *f = new uint8_t[(num_triples * num_eqs) / 8]; for (int i = 1; i < num_digits; i *= 2) { // i denotes the distance between 2 nodes which should be // ANDed together for (int j = 0; j < num_digits and j + i < num_digits; j += 2 * i) { // j=0 is LSD and j=num_digits-1 is MSD // EQ_j: Use 1 triple for opening e = a + eq_j and f = b + eq_j+i. this->mill->AND_step_1( ei + (counter_triples_used * num_eqs) / 8, fi + (counter_triples_used * num_eqs) / 8, leaf_res_eq + j * num_eqs, leaf_res_eq + (j + i) * num_eqs, (triples_std.ai) + (counter_triples_used * num_eqs) / 8, (triples_std.bi) + (counter_triples_used * num_eqs) / 8, num_eqs); counter_triples_used++; } int offset = (old_counter_triples_used * num_eqs) / 8; int size_used = ((counter_triples_used - old_counter_triples_used) * num_eqs) / 8; #pragma omp parallel num_threads(2) { if (omp_get_thread_num() == 1) { if (party == sci::ALICE) { iopack->io_rev->recv_data(e + offset, size_used); iopack->io_rev->recv_data(e + offset, size_used); iopack->io_rev->recv_data(f + offset, size_used); iopack->io_rev->recv_data(f + offset, size_used); } else { // party == sci::BOB iopack->io_rev->send_data(ei + offset, size_used); iopack->io_rev->send_data(ei + offset, size_used); iopack->io_rev->send_data(fi + offset, size_used); iopack->io_rev->send_data(fi + offset, size_used); } } else { if (party == sci::ALICE) { iopack->io->send_data(ei + offset, size_used); iopack->io->send_data(ei + offset, size_used); iopack->io->send_data(fi + offset, size_used); iopack->io->send_data(fi + offset, size_used); } else { // party == sci::BOB iopack->io->recv_data(e + offset, size_used); iopack->io->recv_data(e + offset, size_used); iopack->io->recv_data(f + offset, size_used); iopack->io->recv_data(f + offset, size_used); } } } // Reconstruct e and f for (int i = 0; i < size_used; i++) { e[i + offset] ^= ei[i + offset]; f[i + offset] ^= fi[i + offset]; } counter_triples_used = old_counter_triples_used; // Step 2 of AND computation for (int j = 0; j < num_digits and j + i < num_digits; j += 2 * i) { // j=0 is LSD and j=num_digits-1 is MSD // EQ_j: Use 1 triple compute eq_j AND eq_j+i. this->mill->AND_step_2( leaf_res_eq + j * num_eqs, e + (counter_triples_used * num_eqs) / 8, f + (counter_triples_used * num_eqs) / 8, nullptr, // not used in function nullptr, // not used in function (triples_std.ai) + (counter_triples_used * num_eqs) / 8, (triples_std.bi) + (counter_triples_used * num_eqs) / 8, (triples_std.ci) + (counter_triples_used * num_eqs) / 8, num_eqs); counter_triples_used++; } old_counter_triples_used = counter_triples_used; } assert(counter_triples_used == num_triples); // cleanup delete[] ei; delete[] fi; delete[] e; delete[] f; } void set_leaf_ot_messages(uint8_t *ot_messages, uint8_t digit, int N, uint8_t mask_eq) { for (int i = 0; i < N; i++) { ot_messages[i] = ((digit == i) ^ mask_eq); } } }; #endif // EQUALITY_H__
nanort.h
// // NanoRT, single header only modern ray tracing kernel. // /* The MIT License (MIT) Copyright (c) 2015 - 2016 Light Transport Entertainment, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef NANORT_H_ #define NANORT_H_ #include <algorithm> #include <cassert> #include <cmath> #include <cstdio> #include <cstdlib> #include <cstring> #include <functional> #include <limits> #include <memory> #include <queue> #include <string> #include <vector> namespace nanort { // Parallelized BVH build is not yet fully tested, // thus turn off if you face a problem when building BVH. #define NANORT_ENABLE_PARALLEL_BUILD (1) // ---------------------------------------------------------------------------- // Small vector class useful for multi-threaded environment. // // stack_container.h // // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This allocator can be used with STL containers to provide a stack buffer // from which to allocate memory and overflows onto the heap. This stack buffer // would be allocated on the stack and allows us to avoid heap operations in // some situations. // // STL likes to make copies of allocators, so the allocator itself can't hold // the data. Instead, we make the creator responsible for creating a // StackAllocator::Source which contains the data. Copying the allocator // merely copies the pointer to this shared source, so all allocators created // based on our allocator will share the same stack buffer. // // This stack buffer implementation is very simple. The first allocation that // fits in the stack buffer will use the stack buffer. Any subsequent // allocations will not use the stack buffer, even if there is unused room. // This makes it appropriate for array-like containers, but the caller should // be sure to reserve() in the container up to the stack buffer size. Otherwise // the container will allocate a small array which will "use up" the stack // buffer. template <typename T, size_t stack_capacity> class StackAllocator : public std::allocator<T> { public: typedef typename std::allocator<T>::pointer pointer; typedef typename std::allocator<T>::size_type size_type; // Backing store for the allocator. The container owner is responsible for // maintaining this for as long as any containers using this allocator are // live. struct Source { Source() : used_stack_buffer_(false) {} // Casts the buffer in its right type. T *stack_buffer() { return reinterpret_cast<T *>(stack_buffer_); } const T *stack_buffer() const { return reinterpret_cast<const T *>(stack_buffer_); } // // IMPORTANT: Take care to ensure that stack_buffer_ is aligned // since it is used to mimic an array of T. // Be careful while declaring any unaligned types (like bool) // before stack_buffer_. // // The buffer itself. It is not of type T because we don't want the // constructors and destructors to be automatically called. Define a POD // buffer of the right size instead. char stack_buffer_[sizeof(T[stack_capacity])]; // Set when the stack buffer is used for an allocation. We do not track // how much of the buffer is used, only that somebody is using it. bool used_stack_buffer_; }; // Used by containers when they want to refer to an allocator of type U. template <typename U> struct rebind { typedef StackAllocator<U, stack_capacity> other; }; // For the straight up copy c-tor, we can share storage. StackAllocator(const StackAllocator<T, stack_capacity> &rhs) : source_(rhs.source_) {} // ISO C++ requires the following constructor to be defined, // and std::vector in VC++2008SP1 Release fails with an error // in the class _Container_base_aux_alloc_real (from <xutility>) // if the constructor does not exist. // For this constructor, we cannot share storage; there's // no guarantee that the Source buffer of Ts is large enough // for Us. // TODO(Google): If we were fancy pants, perhaps we could share storage // iff sizeof(T) == sizeof(U). template <typename U, size_t other_capacity> StackAllocator(const StackAllocator<U, other_capacity> &other) : source_(NULL) { (void)other; } explicit StackAllocator(Source *source) : source_(source) {} // Actually do the allocation. Use the stack buffer if nobody has used it yet // and the size requested fits. Otherwise, fall through to the standard // allocator. pointer allocate(size_type n, void *hint = 0) { if (source_ != NULL && !source_->used_stack_buffer_ && n <= stack_capacity) { source_->used_stack_buffer_ = true; return source_->stack_buffer(); } else { return std::allocator<T>::allocate(n, hint); } } // Free: when trying to free the stack buffer, just mark it as free. For // non-stack-buffer pointers, just fall though to the standard allocator. void deallocate(pointer p, size_type n) { if (source_ != NULL && p == source_->stack_buffer()) source_->used_stack_buffer_ = false; else std::allocator<T>::deallocate(p, n); } private: Source *source_; }; // A wrapper around STL containers that maintains a stack-sized buffer that the // initial capacity of the vector is based on. Growing the container beyond the // stack capacity will transparently overflow onto the heap. The container must // support reserve(). // // WATCH OUT: the ContainerType MUST use the proper StackAllocator for this // type. This object is really intended to be used only internally. You'll want // to use the wrappers below for different types. template <typename TContainerType, int stack_capacity> class StackContainer { public: typedef TContainerType ContainerType; typedef typename ContainerType::value_type ContainedType; typedef StackAllocator<ContainedType, stack_capacity> Allocator; // Allocator must be constructed before the container! StackContainer() : allocator_(&stack_data_), container_(allocator_) { // Make the container use the stack allocation by reserving our buffer size // before doing anything else. container_.reserve(stack_capacity); } // Getters for the actual container. // // Danger: any copies of this made using the copy constructor must have // shorter lifetimes than the source. The copy will share the same allocator // and therefore the same stack buffer as the original. Use std::copy to // copy into a "real" container for longer-lived objects. ContainerType &container() { return container_; } const ContainerType &container() const { return container_; } // Support operator-> to get to the container. This allows nicer syntax like: // StackContainer<...> foo; // std::sort(foo->begin(), foo->end()); ContainerType *operator->() { return &container_; } const ContainerType *operator->() const { return &container_; } #ifdef UNIT_TEST // Retrieves the stack source so that that unit tests can verify that the // buffer is being used properly. const typename Allocator::Source &stack_data() const { return stack_data_; } #endif protected: typename Allocator::Source stack_data_; unsigned char pad_[7]; Allocator allocator_; ContainerType container_; // DISALLOW_EVIL_CONSTRUCTORS(StackContainer); StackContainer(const StackContainer &); void operator=(const StackContainer &); }; // StackVector // // Example: // StackVector<int, 16> foo; // foo->push_back(22); // we have overloaded operator-> // foo[0] = 10; // as well as operator[] template <typename T, size_t stack_capacity> class StackVector : public StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >, stack_capacity> { public: StackVector() : StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >, stack_capacity>() {} // We need to put this in STL containers sometimes, which requires a copy // constructor. We can't call the regular copy constructor because that will // take the stack buffer from the original. Here, we create an empty object // and make a stack buffer of its own. StackVector(const StackVector<T, stack_capacity> &other) : StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >, stack_capacity>() { this->container().assign(other->begin(), other->end()); } StackVector<T, stack_capacity> &operator=( const StackVector<T, stack_capacity> &other) { this->container().assign(other->begin(), other->end()); return *this; } // Vectors are commonly indexed, which isn't very convenient even with // operator-> (using "->at()" does exception stuff we don't want). T &operator[](size_t i) { return this->container().operator[](i); } const T &operator[](size_t i) const { return this->container().operator[](i); } }; // ---------------------------------------------------------------------------- struct float3 { float3() {} float3(float xx, float yy, float zz) { v[0] = xx; v[1] = yy; v[2] = zz; } explicit float3(const float *p) { v[0] = p[0]; v[1] = p[1]; v[2] = p[2]; } inline float x() const { return v[0]; } inline float y() const { return v[1]; } inline float z() const { return v[2]; } float3 operator*(float f) const { return float3(x() * f, y() * f, z() * f); } float3 operator-(const float3 &f2) const { return float3(x() - f2.x(), y() - f2.y(), z() - f2.z()); } float3 operator*(const float3 &f2) const { return float3(x() * f2.x(), y() * f2.y(), z() * f2.z()); } float3 operator+(const float3 &f2) const { return float3(x() + f2.x(), y() + f2.y(), z() + f2.z()); } float3 &operator+=(const float3 &f2) { v[0] += f2.x(); v[1] += f2.y(); v[2] += f2.z(); return (*this); } float3 operator/(const float3 &f2) const { return float3(x() / f2.x(), y() / f2.y(), z() / f2.z()); } float operator[](int i) const { return v[i]; } float &operator[](int i) { return v[i]; } float v[3]; // float pad; // for alignment }; inline float3 operator*(float f, const float3 &v) { return float3(v.x() * f, v.y() * f, v.z() * f); } inline float3 vneg(const float3 &rhs) { return float3(-rhs.x(), -rhs.y(), -rhs.z()); } inline float vlength(const float3 &rhs) { return sqrtf(rhs.x() * rhs.x() + rhs.y() * rhs.y() + rhs.z() * rhs.z()); } inline float3 vnormalize(const float3 &rhs) { float3 v = rhs; float len = vlength(rhs); if (fabsf(len) > 1.0e-6f) { float inv_len = 1.0f / len; v.v[0] *= inv_len; v.v[1] *= inv_len; v.v[2] *= inv_len; } return v; } inline float3 vcross(float3 a, float3 b) { float3 c; c[0] = a[1] * b[2] - a[2] * b[1]; c[1] = a[2] * b[0] - a[0] * b[2]; c[2] = a[0] * b[1] - a[1] * b[0]; return c; } inline float vdot(float3 a, float3 b) { return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]; } typedef struct { float org[3]; // must set float dir[3]; // must set float min_t; // minium ray hit distance. must set. float max_t; // maximum ray hit distance. must set. float inv_dir[3]; // filled internally int dir_sign[3]; // filled internally } Ray; class BVHNode { public: BVHNode() {} ~BVHNode() {} float bmin[3]; float bmax[3]; int flag; // 1 = leaf node, 0 = branch node int axis; // leaf // data[0] = npoints // data[1] = index // // branch // data[0] = child[0] // data[1] = child[1] unsigned int data[2]; }; template<class I> class IsectComparator { public: bool operator()(const I &a, const I &b) const { return a.t < b.t; } }; /// BVH build option. struct BVHBuildOptions { float cost_t_aabb; unsigned int min_leaf_primitives; unsigned int max_tree_depth; unsigned int bin_size; unsigned int shallow_depth; unsigned int min_primitives_for_parallel_build; // Cache bounding box computation. // Requires more memory, but BVHbuild can be faster. bool cache_bbox; unsigned char pad[3]; // Set default value: Taabb = 0.2 BVHBuildOptions() : cost_t_aabb(0.2f), min_leaf_primitives(4), max_tree_depth(256), bin_size(64), shallow_depth(3), min_primitives_for_parallel_build(1024 * 128), cache_bbox(false) {} }; /// BVH build statistics. class BVHBuildStatistics { public: unsigned int max_tree_depth; unsigned int num_leaf_nodes; unsigned int num_branch_nodes; float build_secs; // Set default value: Taabb = 0.2 BVHBuildStatistics() : max_tree_depth(0), num_leaf_nodes(0), num_branch_nodes(0), build_secs(0.0f) {} }; /// BVH trace option. class BVHTraceOptions { public: // Hit only for face IDs in indexRange. // This feature is good to mimic something like glDrawArrays() unsigned int prim_ids_range[2]; bool cull_back_face; unsigned char pad[3]; ///< Padding(not used) BVHTraceOptions() { prim_ids_range[0] = 0; prim_ids_range[1] = 0x7FFFFFFF; // Up to 2G face IDs. cull_back_face = false; } }; class BBox { public: float3 bmin; float3 bmax; BBox() { bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<float>::max(); bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<float>::max(); } }; template <class P, class Pred, class I> class BVHAccel { public: BVHAccel() : pad0_(0) { (void)pad0_; } ~BVHAccel() {} /// Build BVH for input primitives. bool Build(const unsigned int num_primitives, const BVHBuildOptions &options, const P &p, const Pred &pred); /// Get statistics of built BVH tree. Valid after Build() BVHBuildStatistics GetStatistics() const { return stats_; } /// Dump built BVH to the file. bool Dump(const char *filename); /// Load BVH binary bool Load(const char *filename); /// Traverse into BVH along ray and find closest hit point & primitive if /// found bool Traverse(const Ray &ray, const BVHTraceOptions &options, const I &intersector) const; /// Multi-hit ray traversal /// Returns `max_intersections` frontmost intersections bool MultiHitTraverse(const Ray &ray, const BVHTraceOptions &optins, int max_intersections, StackVector<I, 128> *intersector) const; const std::vector<BVHNode> &GetNodes() const { return nodes_; } const std::vector<unsigned int> &GetIndices() const { return indices_; } /// Returns bounding box of built BVH. void BoundingBox(float bmin[3], float bmax[3]) const { if (nodes_.empty()) { bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<float>::max(); bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<float>::max(); } else { bmin[0] = nodes_[0].bmin[0]; bmin[1] = nodes_[0].bmin[1]; bmin[2] = nodes_[0].bmin[2]; bmax[0] = nodes_[0].bmax[0]; bmax[1] = nodes_[0].bmax[1]; bmax[2] = nodes_[0].bmax[2]; } } bool IsValid() const { return nodes_.size() > 0; } private: #if NANORT_ENABLE_PARALLEL_BUILD typedef struct { unsigned int left_idx; unsigned int right_idx; unsigned int offset; } ShallowNodeInfo; // Used only during BVH construction std::vector<ShallowNodeInfo> shallow_node_infos_; /// Builds shallow BVH tree recursively. unsigned int BuildShallowTree(std::vector<BVHNode> *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, unsigned int max_shallow_depth, const P &p, const Pred &pred); #endif /// Builds BVH tree recursively. unsigned int BuildTree(BVHBuildStatistics *out_stat, std::vector<BVHNode> *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, const P &p, const Pred &pred); bool TestLeafNode(const BVHNode &node, const Ray &ray, const I &intersector) const; //bool MultiHitTestLeafNode(IsectVector *isects, int max_intersections, // const BVHNode &node, const Ray &ray, // const I &intersector) const; std::vector<BVHNode> nodes_; std::vector<unsigned int> indices_; // max 4G triangles. std::vector<BBox> bboxes_; BVHBuildOptions options_; BVHBuildStatistics stats_; unsigned int pad0_; }; // Predefined SAH predicator for triangle. class TriangleSAHPred { public: TriangleSAHPred(const float *vertices, const unsigned int *faces) : axis_(0), pos_(0.0f), vertices_(vertices), faces_(faces) {} void Set(int axis, float pos) const { axis_ = axis; pos_ = pos; } bool operator()(unsigned int i) const { int axis = axis_; float pos = pos_; unsigned int i0 = faces_[3 * i + 0]; unsigned int i1 = faces_[3 * i + 1]; unsigned int i2 = faces_[3 * i + 2]; float3 p0(&vertices_[3 * i0]); float3 p1(&vertices_[3 * i1]); float3 p2(&vertices_[3 * i2]); float center = p0[axis] + p1[axis] + p2[axis]; return (center < pos * 3.0f); } private: mutable int axis_; mutable float pos_; const float *vertices_; const unsigned int *faces_; }; // Predefined Triangle mesh geometry. class TriangleMesh { public: TriangleMesh(const float *vertices, const unsigned int *faces) : vertices_(vertices), faces_(faces) {} /// Compute bounding box for `prim_index`th triangle. /// This function is called for each primitive in BVH build. void BoundingBox(float3 *bmin, float3 *bmax, unsigned int prim_index) const { (*bmin)[0] = vertices_[3 * faces_[3 * prim_index + 0] + 0]; (*bmin)[1] = vertices_[3 * faces_[3 * prim_index + 0] + 1]; (*bmin)[2] = vertices_[3 * faces_[3 * prim_index + 0] + 2]; (*bmax)[0] = vertices_[3 * faces_[3 * prim_index + 0] + 0]; (*bmax)[1] = vertices_[3 * faces_[3 * prim_index + 0] + 1]; (*bmax)[2] = vertices_[3 * faces_[3 * prim_index + 0] + 2]; for (unsigned int i = 1; i < 3; i++) { for (unsigned int k = 0; k < 3; k++) { if ((*bmin)[static_cast<int>(k)] > vertices_[3 * faces_[3 * prim_index + i] + k]) { (*bmin)[static_cast<int>(k)] = vertices_[3 * faces_[3 * prim_index + i] + k]; } if ((*bmax)[static_cast<int>(k)] < vertices_[3 * faces_[3 * prim_index + i] + k]) { (*bmax)[static_cast<int>(k)] = vertices_[3 * faces_[3 * prim_index + i] + k]; } } } } const float *vertices_; const unsigned int *faces_; }; struct TriangleIntersection { float u; float v; // Required member variables. float t; unsigned int prim_id; }; template<class I = TriangleIntersection> class TriangleIntersector { public: TriangleIntersector(const float *vertices, const unsigned int *faces) : vertices_(vertices), faces_(faces) {} // For Watertight Ray/Triangle Intersection. typedef struct { float Sx; float Sy; float Sz; int kx; int ky; int kz; } RayCoeff; /// Do ray interesection stuff for `prim_index` th primitive and return hit /// distance `t`, /// varycentric coordinate `u` and `v`. /// Returns true if there's intersection. bool Intersect(float *t_inout, unsigned int prim_index) const { if ((prim_index < trace_options_.prim_ids_range[0]) || (prim_index >= trace_options_.prim_ids_range[1])) { return false; } const unsigned int f0 = faces_[3 * prim_index + 0]; const unsigned int f1 = faces_[3 * prim_index + 1]; const unsigned int f2 = faces_[3 * prim_index + 2]; const float3 p0(&vertices_[3 * f0 + 0]); const float3 p1(&vertices_[3 * f1 + 0]); const float3 p2(&vertices_[3 * f2 + 0]); const float3 A = p0 - ray_org_; const float3 B = p1 - ray_org_; const float3 C = p2 - ray_org_; const float Ax = A[ray_coeff_.kx] - ray_coeff_.Sx * A[ray_coeff_.kz]; const float Ay = A[ray_coeff_.ky] - ray_coeff_.Sy * A[ray_coeff_.kz]; const float Bx = B[ray_coeff_.kx] - ray_coeff_.Sx * B[ray_coeff_.kz]; const float By = B[ray_coeff_.ky] - ray_coeff_.Sy * B[ray_coeff_.kz]; const float Cx = C[ray_coeff_.kx] - ray_coeff_.Sx * C[ray_coeff_.kz]; const float Cy = C[ray_coeff_.ky] - ray_coeff_.Sy * C[ray_coeff_.kz]; float U = Cx * By - Cy * Bx; float V = Ax * Cy - Ay * Cx; float W = Bx * Ay - By * Ax; // Fall back to test against edges using double precision. if (U == 0.0f || V == 0.0f || W == 0.0f) { double CxBy = static_cast<double>(Cx) * static_cast<double>(By); double CyBx = static_cast<double>(Cy) * static_cast<double>(Bx); U = static_cast<float>(CxBy - CyBx); double AxCy = static_cast<double>(Ax) * static_cast<double>(Cy); double AyCx = static_cast<double>(Ay) * static_cast<double>(Cx); V = static_cast<float>(AxCy - AyCx); double BxAy = static_cast<double>(Bx) * static_cast<double>(Ay); double ByAx = static_cast<double>(By) * static_cast<double>(Ax); W = static_cast<float>(BxAy - ByAx); } if (trace_options_.cull_back_face) { if (U < 0.0f || V < 0.0f || W < 0.0f) return false; } else { if ((U < 0.0f || V < 0.0f || W < 0.0f) && (U > 0.0f || V > 0.0f || W > 0.0f)) { return false; } } float det = U + V + W; if (det == 0.0f) return false; const float Az = ray_coeff_.Sz * A[ray_coeff_.kz]; const float Bz = ray_coeff_.Sz * B[ray_coeff_.kz]; const float Cz = ray_coeff_.Sz * C[ray_coeff_.kz]; const float T = U * Az + V * Bz + W * Cz; const float rcpDet = 1.0f / det; float tt = T * rcpDet; if (tt > (*t_inout)) { return false; } (*t_inout) = tt; // Use Thomas-Mueller style barycentric coord. // U + V + W = 1.0 and interp(p) = U * p0 + V * p1 + W * p2 // We want interp(p) = (1 - u - v) * p0 + u * v1 + v * p2; // => u = V, v = W. intersection.u = V * rcpDet; intersection.v = W * rcpDet; return true; } /// Returns the nearest hit distance. float GetT() const { return intersection.t; } /// Update is called when initializing intesection and nearest hit is found. void Update(float t, unsigned int prim_idx) const { intersection.t = t; intersection.prim_id = prim_idx; } /// Prepare BVH traversal(e.g. compute inverse ray direction) /// This function is called only once in BVH traversal. void PrepareTraversal(const Ray &ray, const BVHTraceOptions &trace_options) const { ray_org_[0] = ray.org[0]; ray_org_[1] = ray.org[1]; ray_org_[2] = ray.org[2]; // Calculate dimension where the ray direction is maximal. ray_coeff_.kz = 0; float absDir = fabsf(ray.dir[0]); if (absDir < fabsf(ray.dir[1])) { ray_coeff_.kz = 1; absDir = fabsf(ray.dir[1]); } if (absDir < fabsf(ray.dir[2])) { ray_coeff_.kz = 2; absDir = fabsf(ray.dir[2]); } ray_coeff_.kx = ray_coeff_.kz + 1; if (ray_coeff_.kx == 3) ray_coeff_.kx = 0; ray_coeff_.ky = ray_coeff_.kx + 1; if (ray_coeff_.ky == 3) ray_coeff_.ky = 0; // Swap kx and ky dimention to preserve widing direction of triangles. if (ray.dir[ray_coeff_.kz] < 0.0f) std::swap(ray_coeff_.kx, ray_coeff_.ky); // Claculate shear constants. ray_coeff_.Sx = ray.dir[ray_coeff_.kx] / ray.dir[ray_coeff_.kz]; ray_coeff_.Sy = ray.dir[ray_coeff_.ky] / ray.dir[ray_coeff_.kz]; ray_coeff_.Sz = 1.0f / ray.dir[ray_coeff_.kz]; trace_options_ = trace_options; intersection.u = 0.0f; intersection.v = 0.0f; } /// Post BVH traversal stuff(e.g. compute intersection point information) /// This function is called only once in BVH traversal. /// `hit` = true if there is something hit. void PostTraversal(const Ray &ray, bool hit) const { if (hit) { // Do something when there is a hit. } (void)ray; } const float *vertices_; const unsigned int *faces_; mutable float3 ray_org_; mutable RayCoeff ray_coeff_; mutable BVHTraceOptions trace_options_; mutable I intersection; }; // // Robust BVH Ray Traversal : http://jcgt.org/published/0002/02/02/paper.pdf // // NaN-safe min and max function. template <class T> const T &safemin(const T &a, const T &b) { return (a < b) ? a : b; } template <class T> const T &safemax(const T &a, const T &b) { return (a > b) ? a : b; } // // SAH functions // struct BinBuffer { explicit BinBuffer(unsigned int size) { bin_size = size; bin.resize(2 * 3 * size); clear(); } void clear() { memset(&bin[0], 0, sizeof(size_t) * 2 * 3 * bin_size); } std::vector<size_t> bin; // (min, max) * xyz * binsize unsigned int bin_size; unsigned int pad0; }; inline float CalculateSurfaceArea(const float3 &min, const float3 &max) { float3 box = max - min; return 2.0f * (box[0] * box[1] + box[1] * box[2] + box[2] * box[0]); } inline void GetBoundingBoxOfTriangle(float3 *bmin, float3 *bmax, const float *vertices, const unsigned int *faces, unsigned int index) { unsigned int f0 = faces[3 * index + 0]; unsigned int f1 = faces[3 * index + 1]; unsigned int f2 = faces[3 * index + 2]; float3 p[3]; p[0] = float3(&vertices[3 * f0]); p[1] = float3(&vertices[3 * f1]); p[2] = float3(&vertices[3 * f2]); (*bmin) = p[0]; (*bmax) = p[0]; for (int i = 1; i < 3; i++) { (*bmin)[0] = std::min((*bmin)[0], p[i][0]); (*bmin)[1] = std::min((*bmin)[1], p[i][1]); (*bmin)[2] = std::min((*bmin)[2], p[i][2]); (*bmax)[0] = std::max((*bmax)[0], p[i][0]); (*bmax)[1] = std::max((*bmax)[1], p[i][1]); (*bmax)[2] = std::max((*bmax)[2], p[i][2]); } } template <class P> inline void ContributeBinBuffer(BinBuffer *bins, // [out] const float3 &scene_min, const float3 &scene_max, unsigned int *indices, unsigned int left_idx, unsigned int right_idx, const P &p) { float bin_size = static_cast<float>(bins->bin_size); // Calculate extent float3 scene_size, scene_inv_size; scene_size = scene_max - scene_min; for (int i = 0; i < 3; ++i) { assert(scene_size[i] >= 0.0f); if (scene_size[i] > 0.0f) { scene_inv_size[i] = bin_size / scene_size[i]; } else { scene_inv_size[i] = 0.0; } } // Clear bin data std::fill(bins->bin.begin(), bins->bin.end(), 0); // memset(&bins->bin[0], 0, sizeof(2 * 3 * bins->bin_size)); size_t idx_bmin[3]; size_t idx_bmax[3]; for (size_t i = left_idx; i < right_idx; i++) { // // Quantize the position into [0, BIN_SIZE) // // q[i] = (int)(p[i] - scene_bmin) / scene_size // float3 bmin; float3 bmax; p.BoundingBox(&bmin, &bmax, indices[i]); // GetBoundingBoxOfTriangle(&bmin, &bmax, vertices, faces, indices[i]); float3 quantized_bmin = (bmin - scene_min) * scene_inv_size; float3 quantized_bmax = (bmax - scene_min) * scene_inv_size; // idx is now in [0, BIN_SIZE) for (int j = 0; j < 3; ++j) { int q0 = static_cast<int>(quantized_bmin[j]); if (q0 < 0) q0 = 0; int q1 = static_cast<int>(quantized_bmax[j]); if (q1 < 0) q1 = 0; idx_bmin[j] = static_cast<unsigned int>(q0); idx_bmax[j] = static_cast<unsigned int>(q1); if (idx_bmin[j] >= bin_size) idx_bmin[j] = static_cast<unsigned int>(bin_size) - 1; if (idx_bmax[j] >= bin_size) idx_bmax[j] = static_cast<unsigned int>(bin_size) - 1; assert(idx_bmin[j] < bin_size); assert(idx_bmax[j] < bin_size); // Increment bin counter bins->bin[0 * (bins->bin_size * 3) + static_cast<size_t>(j) * bins->bin_size + idx_bmin[j]] += 1; bins->bin[1 * (bins->bin_size * 3) + static_cast<size_t>(j) * bins->bin_size + idx_bmax[j]] += 1; } } } inline float SAH(size_t ns1, float leftArea, size_t ns2, float rightArea, float invS, float Taabb, float Ttri) { // const float Taabb = 0.2f; // const float Ttri = 0.8f; float T; T = 2.0f * Taabb + (leftArea * invS) * static_cast<float>(ns1) * Ttri + (rightArea * invS) * static_cast<float>(ns2) * Ttri; return T; } inline bool FindCutFromBinBuffer(float *cut_pos, // [out] xyz int *minCostAxis, // [out] const BinBuffer *bins, const float3 &bmin, const float3 &bmax, size_t num_primitives, float costTaabb) { // should be in [0.0, 1.0] const float kEPS = std::numeric_limits<float>::epsilon(); // * epsScale; size_t left, right; float3 bsize, bstep; float3 bminLeft, bmaxLeft; float3 bminRight, bmaxRight; float saLeft, saRight, saTotal; float pos; float minCost[3]; float costTtri = 1.0f - costTaabb; (*minCostAxis) = 0; bsize = bmax - bmin; bstep = bsize * (1.0f / bins->bin_size); saTotal = CalculateSurfaceArea(bmin, bmax); float invSaTotal = 0.0f; if (saTotal > kEPS) { invSaTotal = 1.0f / saTotal; } for (int j = 0; j < 3; ++j) { // // Compute SAH cost for right side of each cell of the bbox. // Exclude both extreme side of the bbox. // // i: 0 1 2 3 // +----+----+----+----+----+ // | | | | | | // +----+----+----+----+----+ // float minCostPos = bmin[j] + 0.5f * bstep[j]; minCost[j] = std::numeric_limits<float>::max(); left = 0; right = num_primitives; bminLeft = bminRight = bmin; bmaxLeft = bmaxRight = bmax; for (int i = 0; i < static_cast<int>(bins->bin_size) - 1; ++i) { left += bins->bin[0 * (3 * bins->bin_size) + static_cast<size_t>(j) * bins->bin_size + static_cast<size_t>(i)]; right -= bins->bin[1 * (3 * bins->bin_size) + static_cast<size_t>(j) * bins->bin_size + static_cast<size_t>(i)]; assert(left <= num_primitives); assert(right <= num_primitives); // // Split pos bmin + (i + 1) * (bsize / BIN_SIZE) // +1 for i since we want a position on right side of the cell. // pos = bmin[j] + (i + 0.5f) * bstep[j]; bmaxLeft[j] = pos; bminRight[j] = pos; saLeft = CalculateSurfaceArea(bminLeft, bmaxLeft); saRight = CalculateSurfaceArea(bminRight, bmaxRight); float cost = SAH(left, saLeft, right, saRight, invSaTotal, costTaabb, costTtri); if (cost < minCost[j]) { // // Update the min cost // minCost[j] = cost; minCostPos = pos; // minCostAxis = j; } } cut_pos[j] = minCostPos; } // cut_axis = minCostAxis; // cut_pos = minCostPos; // Find min cost axis float cost = minCost[0]; (*minCostAxis) = 0; if (cost > minCost[1]) { (*minCostAxis) = 1; cost = minCost[1]; } if (cost > minCost[2]) { (*minCostAxis) = 2; cost = minCost[2]; } return true; } #ifdef _OPENMP template<class P> void ComputeBoundingBoxOMP(float3 *bmin, float3 *bmax, const unsigned int *indices, unsigned int left_index, unsigned int right_index, const P& p) { const float kEPS = 0.0f; // std::numeric_limits<float>::epsilon() * epsScale; { p.BoundingBox(bmin, bmax, indices[left_index]); } float local_bmin[3] = {(*bmin)[0], (*bmin)[1], (*bmin)[2]}; float local_bmax[3] = {(*bmax)[0], (*bmax)[1], (*bmax)[2]}; unsigned int n = right_index - left_index; #pragma omp parallel firstprivate(local_bmin, local_bmax) if (n > (1024 * 128)) { #pragma omp for for (int i = left_index; i < right_index; i++) { // for each faces unsigned int idx = indices[i]; float3 bbox_min, bbox_max; p.BoundingBox(&bbox_min, &bbox_max, idx); for (int k = 0; k < 3; k++) { // xyz if ((*bmin)[k] > bbox_min[k]) (*bmin)[k] = bbox_min[k]; if ((*bmax)[k] < bbox_max[k]) (*bmax)[k] = bbox_max[k]; } } #pragma omp critical { for (int k = 0; k < 3; k++) { if (local_bmin[k] < (*bmin)[k]) { { if (local_bmin[k] < (*bmin)[k]) (*bmin)[k] = local_bmin[k]; } } if (local_bmax[k] > (*bmax)[k]) { { if (local_bmax[k] > (*bmax)[k]) (*bmax)[k] = local_bmax[k]; } } } } } } #endif template <class P> inline void ComputeBoundingBox(float3 *bmin, float3 *bmax, const unsigned int *indices, unsigned int left_index, unsigned int right_index, const P &p) { { unsigned int idx = indices[left_index]; p.BoundingBox(bmin, bmax, idx); } { for (unsigned int i = left_index + 1; i < right_index; i++) { // for each primitives unsigned int idx = indices[i]; float3 bbox_min, bbox_max; p.BoundingBox(&bbox_min, &bbox_max, idx); for (int k = 0; k < 3; k++) { // xyz if ((*bmin)[k] > bbox_min[k]) (*bmin)[k] = bbox_min[k]; if ((*bmax)[k] < bbox_max[k]) (*bmax)[k] = bbox_max[k]; } } } } inline void GetBoundingBox(float3 *bmin, float3 *bmax, const std::vector<BBox> &bboxes, unsigned int *indices, unsigned int left_index, unsigned int right_index) { { unsigned int i = left_index; unsigned int idx = indices[i]; (*bmin)[0] = bboxes[idx].bmin[0]; (*bmin)[1] = bboxes[idx].bmin[1]; (*bmin)[2] = bboxes[idx].bmin[2]; (*bmax)[0] = bboxes[idx].bmax[0]; (*bmax)[1] = bboxes[idx].bmax[1]; (*bmax)[2] = bboxes[idx].bmax[2]; } float local_bmin[3] = {(*bmin)[0], (*bmin)[1], (*bmin)[2]}; float local_bmax[3] = {(*bmax)[0], (*bmax)[1], (*bmax)[2]}; { for (unsigned int i = left_index; i < right_index; i++) { // for each faces unsigned int idx = indices[i]; for (int k = 0; k < 3; k++) { // xyz float minval = bboxes[idx].bmin[k]; float maxval = bboxes[idx].bmax[k]; if (local_bmin[k] > minval) local_bmin[k] = minval; if (local_bmax[k] < maxval) local_bmax[k] = maxval; } } for (int k = 0; k < 3; k++) { (*bmin)[k] = local_bmin[k]; (*bmax)[k] = local_bmax[k]; } } } // // -- // #if NANORT_ENABLE_PARALLEL_BUILD template <class P, class Pred, class I> unsigned int BVHAccel<P, Pred, I>::BuildShallowTree( std::vector<BVHNode> *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, unsigned int max_shallow_depth, const P &p, const Pred &pred) { assert(left_idx <= right_idx); unsigned int offset = static_cast<unsigned int>(out_nodes->size()); if (stats_.max_tree_depth < depth) { stats_.max_tree_depth = depth; } float3 bmin, bmax; ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), left_idx, right_idx, p); unsigned int n = right_idx - left_idx; if ((n < options_.min_leaf_primitives) || (depth >= options_.max_tree_depth)) { // Create leaf node. BVHNode leaf; leaf.bmin[0] = bmin[0]; leaf.bmin[1] = bmin[1]; leaf.bmin[2] = bmin[2]; leaf.bmax[0] = bmax[0]; leaf.bmax[1] = bmax[1]; leaf.bmax[2] = bmax[2]; assert(left_idx < std::numeric_limits<unsigned int>::max()); leaf.flag = 1; // leaf leaf.data[0] = n; leaf.data[1] = left_idx; out_nodes->push_back(leaf); // atomic update stats_.num_leaf_nodes++; return offset; } // // Create branch node. // if (depth >= max_shallow_depth) { // Delay to build tree ShallowNodeInfo info; info.left_idx = left_idx; info.right_idx = right_idx; info.offset = offset; shallow_node_infos_.push_back(info); // Add dummy node. BVHNode node; node.axis = -1; node.flag = -1; out_nodes->push_back(node); return offset; } else { // // Compute SAH and find best split axis and position // int min_cut_axis = 0; float cut_pos[3] = {0.0, 0.0, 0.0}; BinBuffer bins(options_.bin_size); ContributeBinBuffer(&bins, bmin, bmax, &indices_.at(0), left_idx, right_idx, p); FindCutFromBinBuffer(cut_pos, &min_cut_axis, &bins, bmin, bmax, n, options_.cost_t_aabb); // Try all 3 axis until good cut position avaiable. unsigned int mid_idx = left_idx; int cut_axis = min_cut_axis; for (int axis_try = 0; axis_try < 3; axis_try++) { unsigned int *begin = &indices_[left_idx]; unsigned int *end = &indices_[right_idx - 1] + 1; // mimics end() iterator. unsigned int *mid = 0; // try min_cut_axis first. cut_axis = (min_cut_axis + axis_try) % 3; // @fixme { We want some thing like: std::partition(begin, end, // pred(cut_axis, cut_pos[cut_axis])); } pred.Set(cut_axis, cut_pos[cut_axis]); // // Split at (cut_axis, cut_pos) // indices_ will be modified. // mid = std::partition(begin, end, pred); mid_idx = left_idx + static_cast<unsigned int>((mid - begin)); if ((mid_idx == left_idx) || (mid_idx == right_idx)) { // Can't split well. // Switch to object median(which may create unoptimized tree, but // stable) mid_idx = left_idx + (n >> 1); // Try another axis if there's axis to try. } else { // Found good cut. exit loop. break; } } BVHNode node; node.axis = cut_axis; node.flag = 0; // 0 = branch out_nodes->push_back(node); unsigned int left_child_index = 0; unsigned int right_child_index = 0; left_child_index = BuildShallowTree(out_nodes, left_idx, mid_idx, depth + 1, max_shallow_depth, p, pred); right_child_index = BuildShallowTree(out_nodes, mid_idx, right_idx, depth + 1, max_shallow_depth, p, pred); (*out_nodes)[offset].data[0] = left_child_index; (*out_nodes)[offset].data[1] = right_child_index; (*out_nodes)[offset].bmin[0] = bmin[0]; (*out_nodes)[offset].bmin[1] = bmin[1]; (*out_nodes)[offset].bmin[2] = bmin[2]; (*out_nodes)[offset].bmax[0] = bmax[0]; (*out_nodes)[offset].bmax[1] = bmax[1]; (*out_nodes)[offset].bmax[2] = bmax[2]; } stats_.num_branch_nodes++; return offset; } #endif template <class P, class Pred, class I> unsigned int BVHAccel<P, Pred, I>::BuildTree(BVHBuildStatistics *out_stat, std::vector<BVHNode> *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, const P &p, const Pred &pred) { assert(left_idx <= right_idx); unsigned int offset = static_cast<unsigned int>(out_nodes->size()); if (out_stat->max_tree_depth < depth) { out_stat->max_tree_depth = depth; } float3 bmin, bmax; if (!bboxes_.empty()) { GetBoundingBox(&bmin, &bmax, bboxes_, &indices_.at(0), left_idx, right_idx); } else { ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), left_idx, right_idx, p); } unsigned int n = right_idx - left_idx; if ((n < options_.min_leaf_primitives) || (depth >= options_.max_tree_depth)) { // Create leaf node. BVHNode leaf; leaf.bmin[0] = bmin[0]; leaf.bmin[1] = bmin[1]; leaf.bmin[2] = bmin[2]; leaf.bmax[0] = bmax[0]; leaf.bmax[1] = bmax[1]; leaf.bmax[2] = bmax[2]; assert(left_idx < std::numeric_limits<unsigned int>::max()); leaf.flag = 1; // leaf leaf.data[0] = n; leaf.data[1] = left_idx; out_nodes->push_back(leaf); // atomic update out_stat->num_leaf_nodes++; return offset; } // // Create branch node. // // // Compute SAH and find best split axis and position // int min_cut_axis = 0; float cut_pos[3] = {0.0, 0.0, 0.0}; BinBuffer bins(options_.bin_size); ContributeBinBuffer(&bins, bmin, bmax, &indices_.at(0), left_idx, right_idx, p); FindCutFromBinBuffer(cut_pos, &min_cut_axis, &bins, bmin, bmax, n, options_.cost_t_aabb); // Try all 3 axis until good cut position avaiable. unsigned int mid_idx = left_idx; int cut_axis = min_cut_axis; for (int axis_try = 0; axis_try < 3; axis_try++) { unsigned int *begin = &indices_[left_idx]; unsigned int *end = &indices_[right_idx - 1] + 1; // mimics end() iterator. unsigned int *mid = 0; // try min_cut_axis first. cut_axis = (min_cut_axis + axis_try) % 3; pred.Set(cut_axis, cut_pos[cut_axis]); // // Split at (cut_axis, cut_pos) // indices_ will be modified. // mid = std::partition(begin, end, pred); mid_idx = left_idx + static_cast<unsigned int>((mid - begin)); if ((mid_idx == left_idx) || (mid_idx == right_idx)) { // Can't split well. // Switch to object median(which may create unoptimized tree, but // stable) mid_idx = left_idx + (n >> 1); // Try another axis to find better cut. } else { // Found good cut. exit loop. break; } } BVHNode node; node.axis = cut_axis; node.flag = 0; // 0 = branch out_nodes->push_back(node); unsigned int left_child_index = 0; unsigned int right_child_index = 0; left_child_index = BuildTree(out_stat, out_nodes, left_idx, mid_idx, depth + 1, p, pred); right_child_index = BuildTree(out_stat, out_nodes, mid_idx, right_idx, depth + 1, p, pred); { (*out_nodes)[offset].data[0] = left_child_index; (*out_nodes)[offset].data[1] = right_child_index; (*out_nodes)[offset].bmin[0] = bmin[0]; (*out_nodes)[offset].bmin[1] = bmin[1]; (*out_nodes)[offset].bmin[2] = bmin[2]; (*out_nodes)[offset].bmax[0] = bmax[0]; (*out_nodes)[offset].bmax[1] = bmax[1]; (*out_nodes)[offset].bmax[2] = bmax[2]; } out_stat->num_branch_nodes++; return offset; } template <class P, class Pred, class I> bool BVHAccel<P, Pred, I>::Build(unsigned int num_primitives, const BVHBuildOptions &options, const P &p, const Pred &pred) { options_ = options; stats_ = BVHBuildStatistics(); nodes_.clear(); bboxes_.clear(); assert(options_.bin_size > 1); unsigned int n = num_primitives; // // 1. Create triangle indices(this will be permutated in BuildTree) // indices_.resize(n); #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < static_cast<int>(n); i++) { indices_[static_cast<size_t>(i)] = static_cast<unsigned int>(i); } // // 2. Compute bounding box(optional). // float3 bmin, bmax; if (options.cache_bbox) { bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<float>::max(); bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<float>::max(); bboxes_.resize(n); for (size_t i = 0; i < n; i++) { // for each primitived unsigned int idx = indices_[i]; BBox bbox; p.BoundingBox(&(bbox.bmin), &(bbox.bmax), i); bboxes_[idx] = bbox; for (int k = 0; k < 3; k++) { // xyz if (bmin[k] > bbox.bmin[k]) { bmin[k] = bbox.bmin[k]; } if (bmax[k] < bbox.bmax[k]) { bmax[k] = bbox.bmax[k]; } } } } else { #ifdef _OPENMP ComputeBoundingBoxOMP(&bmin, &bmax, &indices_.at(0), 0, n, p); #else ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), 0, n, p); #endif } // // 3. Build tree // #ifdef _OPENMP #if NANORT_ENABLE_PARALLEL_BUILD // Do parallel build for enoughly large dataset. if (n > options.min_primitives_for_parallel_build) { BuildShallowTree(&nodes_, 0, n, /* root depth */ 0, options.shallow_depth, p, pred); // [0, n) assert(shallow_node_infos_.size() > 0); // Build deeper tree in parallel std::vector<std::vector<BVHNode> > local_nodes(shallow_node_infos_.size()); std::vector<BVHBuildStatistics> local_stats(shallow_node_infos_.size()); #pragma omp parallel for for (int i = 0; i < static_cast<int>(shallow_node_infos_.size()); i++) { unsigned int left_idx = shallow_node_infos_[i].left_idx; unsigned int right_idx = shallow_node_infos_[i].right_idx; BuildTree(&(local_stats[i]), &(local_nodes[i]), left_idx, right_idx, options.shallow_depth, p, pred); } // Join local nodes for (int i = 0; i < static_cast<int>(local_nodes.size()); i++) { assert(!local_nodes[i].empty()); size_t offset = nodes_.size(); // Add offset to child index(for branch node). for (size_t j = 0; j < local_nodes[i].size(); j++) { if (local_nodes[i][j].flag == 0) { // branch local_nodes[i][j].data[0] += offset - 1; local_nodes[i][j].data[1] += offset - 1; } } // replace nodes_[shallow_node_infos_[i].offset] = local_nodes[i][0]; // Skip root element of the local node. nodes_.insert(nodes_.end(), local_nodes[i].begin() + 1, local_nodes[i].end()); } // Join statistics for (int i = 0; i < static_cast<int>(local_nodes.size()); i++) { stats_.max_tree_depth = std::max(stats_.max_tree_depth, local_stats[i].max_tree_depth); stats_.num_leaf_nodes += local_stats[i].num_leaf_nodes; stats_.num_branch_nodes += local_stats[i].num_branch_nodes; } } else { BuildTree(&stats_, &nodes_, 0, n, /* root depth */ 0, p, pred); // [0, n) } #else // !NANORT_ENABLE_PARALLEL_BUILD { BuildTree(&stats_, &nodes_, 0, n, /* root depth */ 0, p, pred); // [0, n) } #endif #else // !_OPENMP { BuildTree(&stats_, &nodes_, 0, n, /* root depth */ 0, p, pred); // [0, n) } #endif return true; } template <class P, class Pred, class I> bool BVHAccel<P, Pred, I>::Dump(const char *filename) { FILE *fp = fopen(filename, "wb"); if (!fp) { fprintf(stderr, "[BVHAccel] Cannot write a file: %s\n", filename); return false; } size_t numNodes = nodes_.size(); assert(nodes_.size() > 0); size_t numIndices = indices_.size(); size_t r = 0; r = fwrite(&numNodes, sizeof(size_t), 1, fp); assert(r == 1); r = fwrite(&nodes_.at(0), sizeof(BVHNode), numNodes, fp); assert(r == numNodes); r = fwrite(&numIndices, sizeof(size_t), 1, fp); assert(r == 1); r = fwrite(&indices_.at(0), sizeof(unsigned int), numIndices, fp); assert(r == numIndices); fclose(fp); return true; } template <class P, class Pred, class I> bool BVHAccel<P, Pred, I>::Load(const char *filename) { FILE *fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Cannot open file: %s\n", filename); return false; } size_t numNodes; size_t numIndices; size_t r = 0; r = fread(&numNodes, sizeof(size_t), 1, fp); assert(r == 1); assert(numNodes > 0); nodes_.resize(numNodes); r = fread(&nodes_.at(0), sizeof(BVHNode), numNodes, fp); assert(r == numNodes); r = fread(&numIndices, sizeof(size_t), 1, fp); assert(r == 1); indices_.resize(numIndices); r = fread(&indices_.at(0), sizeof(unsigned int), numIndices, fp); assert(r == numIndices); fclose(fp); return true; } inline bool IntersectRayAABB(float *tminOut, // [out] float *tmaxOut, // [out] float min_t, float max_t, const float bmin[3], const float bmax[3], float3 ray_org, float3 ray_inv_dir, int ray_dir_sign[3]) { float tmin, tmax; const float min_x = ray_dir_sign[0] ? bmax[0] : bmin[0]; const float min_y = ray_dir_sign[1] ? bmax[1] : bmin[1]; const float min_z = ray_dir_sign[2] ? bmax[2] : bmin[2]; const float max_x = ray_dir_sign[0] ? bmin[0] : bmax[0]; const float max_y = ray_dir_sign[1] ? bmin[1] : bmax[1]; const float max_z = ray_dir_sign[2] ? bmin[2] : bmax[2]; // X const float tmin_x = (min_x - ray_org[0]) * ray_inv_dir[0]; // MaxMult robust BVH traversal(up to 4 ulp). // 1.0000000000000004 for double precision. const float tmax_x = (max_x - ray_org[0]) * ray_inv_dir[0] * 1.00000024f; // Y const float tmin_y = (min_y - ray_org[1]) * ray_inv_dir[1]; const float tmax_y = (max_y - ray_org[1]) * ray_inv_dir[1] * 1.00000024f; // Z const float tmin_z = (min_z - ray_org[2]) * ray_inv_dir[2]; const float tmax_z = (max_z - ray_org[2]) * ray_inv_dir[2] * 1.00000024f; tmin = safemax(tmin_z, safemax(tmin_y, safemax(tmin_x, min_t))); tmax = safemin(tmax_z, safemin(tmax_y, safemin(tmax_x, max_t))); if (tmin <= tmax) { (*tminOut) = tmin; (*tmaxOut) = tmax; return true; } return false; // no hit } template <class P, class Pred, class I> inline bool BVHAccel<P, Pred, I>::TestLeafNode(const BVHNode &node, const Ray &ray, const I &intersector) const { bool hit = false; unsigned int num_primitives = node.data[0]; unsigned int offset = node.data[1]; float t = intersector.GetT(); // current hit distance float3 ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; float3 ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; for (unsigned int i = 0; i < num_primitives; i++) { unsigned int prim_idx = indices_[i + offset]; float local_t = t; if (intersector.Intersect(&local_t, prim_idx)) { if (local_t > ray.min_t) { // Update isect state t = local_t; intersector.Update(t, prim_idx); hit = true; } } } return hit; } #if 0 template <class P, class Pred, class I> bool BVHAccel<P, Pred, I>::MultiHitTestLeafNode(const BVHNode &node, const Ray &ray, int max_intersections, const I &intersector) const { bool hit = false; unsigned int num_primitives = node.data[0]; unsigned int offset = node.data[1]; float t = std::numeric_limits<float>::max(); if (isects->size() >= static_cast<size_t>(max_intersections)) { t = isects->top().t; // current furthest hit distance } float3 ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; float3 ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; for (unsigned int i = 0; i < num_primitives; i++) { unsigned int prim_idx = indices_[i + offset]; float local_t = t, u = 0.0f, v = 0.0f; if (p.Intersect(&local_t, &u, &v, prim_idx)) { // Update isect state if ((local_t > ray.min_t)) { if (isects->size() < static_cast<size_t>(max_intersections)) { Intersection isect; t = local_t; isect.t = t; isect.u = u; isect.v = v; isect.prim_id = prim_idx; isects->push(isect); // Update t to furthest distance. t = ray.max_t; hit = true; } else { if (local_t < isects->top().t) { // delete furthest intersection and add new intersection. isects->pop(); Intersection isect; isect.t = local_t; isect.u = u; isect.v = v; isect.prim_id = prim_idx; isects->push(isect); // Update furthest hit distance t = isects->top().t; hit = true; } } } } } return hit; } #endif template <class P, class Pred, class I> bool BVHAccel<P, Pred, I>::Traverse(const Ray &ray, const BVHTraceOptions &options, const I& intersector) const { const int kMaxStackDepth = 512; float hit_t = ray.max_t; int node_stack_index = 0; unsigned int node_stack[512]; node_stack[0] = 0; // Init isect info as no hit intersector.Update(hit_t, static_cast<unsigned int>(-1)); intersector.PrepareTraversal(ray, options); int dir_sign[3]; dir_sign[0] = ray.dir[0] < 0.0f ? 1 : 0; dir_sign[1] = ray.dir[1] < 0.0f ? 1 : 0; dir_sign[2] = ray.dir[2] < 0.0f ? 1 : 0; // @fixme { Check edge case; i.e., 1/0 } float3 ray_inv_dir; ray_inv_dir[0] = 1.0f / ray.dir[0]; ray_inv_dir[1] = 1.0f / ray.dir[1]; ray_inv_dir[2] = 1.0f / ray.dir[2]; float3 ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; float min_t = std::numeric_limits<float>::max(); float max_t = -std::numeric_limits<float>::max(); while (node_stack_index >= 0) { unsigned int index = node_stack[node_stack_index]; const BVHNode &node = nodes_[index]; node_stack_index--; bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin, node.bmax, ray_org, ray_inv_dir, dir_sign); if (node.flag == 0) { // branch node if (hit) { int order_near = dir_sign[node.axis]; int order_far = 1 - order_near; // Traverse near first. node_stack[++node_stack_index] = node.data[order_far]; node_stack[++node_stack_index] = node.data[order_near]; } } else { // leaf node if (hit) { if (TestLeafNode(node, ray, intersector)) { hit_t = intersector.GetT(); } } } } assert(node_stack_index < kMaxStackDepth); bool hit = (intersector.GetT() < ray.max_t); intersector.PostTraversal(ray, hit); return hit; } #if 0 template <class P, class Pred, class I> bool BVHAccel<P, Pred, I>::MultiHitTraverse(const Ray &ray, const BVHTraceOptions &options, int max_intersections, StackVector<I, 128> *isects) const { const int kMaxStackDepth = 512; float hit_t = ray.max_t; int node_stack_index = 0; unsigned int node_stack[512]; node_stack[0] = 0; // Stores furthest intersection at top std::priority_queue<I, std::vector<I>, IsectComparator<I> > isect_pq; //// Stores furthest intersection at top //template<class I> //typedef std::priority_queue<I, std::vector<I>, // IsectComparator<I> > // IsectVector; (*isects)->clear(); p.PrepareTraversal(ray, options); int dir_sign[3]; dir_sign[0] = ray.dir[0] < 0.0f ? 1 : 0; dir_sign[1] = ray.dir[1] < 0.0f ? 1 : 0; dir_sign[2] = ray.dir[2] < 0.0f ? 1 : 0; // @fixme { Check edge case; i.e., 1/0 } float3 ray_inv_dir; ray_inv_dir[0] = 1.0f / ray.dir[0]; ray_inv_dir[1] = 1.0f / ray.dir[1]; ray_inv_dir[2] = 1.0f / ray.dir[2]; float3 ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; float min_t, max_t; while (node_stack_index >= 0) { unsigned int index = node_stack[node_stack_index]; const BVHNode &node = nodes_[static_cast<size_t>(index)]; node_stack_index--; bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin, node.bmax, ray_org, ray_inv_dir, dir_sign); if (node.flag == 0) { // branch node if (hit) { int order_near = dir_sign[node.axis]; int order_far = 1 - order_near; // Traverse near first. node_stack[++node_stack_index] = node.data[order_far]; node_stack[++node_stack_index] = node.data[order_near]; } } else { // leaf node if (hit) { if (MultiHitTestLeafNode(&isect_pq, max_intersections, node, ray, p)) { // Only update `hit_t` when queue is full. if (isect_pq.size() >= static_cast<size_t>(max_intersections)) { hit_t = isect_pq.top().t; } } } } } assert(node_stack_index < kMaxStackDepth); if (!isect_pq.empty()) { // Store intesection in reverse order(make it frontmost order) size_t n = isect_pq.size(); (*isects)->resize(n); for (size_t i = 0; i < n; i++) { const Intersection &isect = isect_pq.top(); (*isects)[n - i - 1] = isect; isect_pq.pop(); } return true; } return false; } #endif } // namespace nanort #endif // NANORT_H_
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=Nt-1;t1++) { lbp=ceild(t1+1,2); ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(1,ceild(8*t2-Nz+9,4)),t1+1);t3<=min(floord(4*Nt+Ny-9,4),floord(4*t1+Ny-1,4));t3++) { for (t4=max(max(ceild(t1-6,8),ceild(8*t2-Nz-19,32)),ceild(4*t3-Ny-19,32));t4<=min(min(floord(4*Nt+Nx-9,32),floord(4*t1+Nx-1,32)),floord(4*t3+Nx-9,32));t4++) { for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(32*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),t3-1),8*t4+6);t5++) { for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) { lbv=max(32*t4,4*t5+4); ubv=min(32*t4+31,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
libimagequant.c
/* ** © 2009-2018 by Kornel Lesiński. ** © 1989, 1991 by Jef Poskanzer. ** © 1997, 2000, 2002 by Greg Roelofs; based on an idea by Stefan Schneider. ** ** See COPYRIGHT file for license. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <stdbool.h> #include <stdint.h> #include <limits.h> #if !(defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199900L) && !(defined(_MSC_VER) && _MSC_VER >= 1800) #error "This program requires C99, e.g. -std=c99 switch in GCC or it requires MSVC 18.0 or higher." #error "Ignore torrent of syntax errors that may follow. It's only because compiler is set to use too old C version." #endif #ifdef _OPENMP #include <omp.h> #define LIQ_TEMP_ROW_WIDTH(img_width) (((img_width) | 15) + 1) /* keep alignment & leave space between rows to avoid cache line contention */ #else #define LIQ_TEMP_ROW_WIDTH(img_width) (img_width) #define omp_get_max_threads() 1 #define omp_get_thread_num() 0 #endif #include "libimagequant.h" #include "pam.h" #include "mediancut.h" #include "nearest.h" #include "blur.h" #include "kmeans.h" #define LIQ_HIGH_MEMORY_LIMIT (1<<26) /* avoid allocating buffers larger than 64MB */ // each structure has a pointer as a unique identifier that allows type checking at run time static const char liq_attr_magic[] = "liq_attr"; static const char liq_image_magic[] = "liq_image"; static const char liq_result_magic[] = "liq_result"; static const char liq_histogram_magic[] = "liq_histogram"; static const char liq_remapping_result_magic[] = "liq_remapping_result"; static const char liq_freed_magic[] = "free"; #define CHECK_STRUCT_TYPE(attr, kind) liq_crash_if_invalid_handle_pointer_given((const liq_attr*)attr, kind ## _magic) #define CHECK_USER_POINTER(ptr) liq_crash_if_invalid_pointer_given(ptr) struct liq_attr { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); double target_mse, max_mse, kmeans_iteration_limit; float min_opaque_val; unsigned int max_colors, max_histogram_entries; unsigned int min_posterization_output /* user setting */, min_posterization_input /* speed setting */; unsigned int kmeans_iterations, feedback_loop_trials; bool last_index_transparent, use_contrast_maps; unsigned char use_dither_map; unsigned char speed; unsigned char progress_stage1, progress_stage2, progress_stage3; liq_progress_callback_function *progress_callback; void *progress_callback_user_info; liq_log_callback_function *log_callback; void *log_callback_user_info; liq_log_flush_callback_function *log_flush_callback; void *log_flush_callback_user_info; }; struct liq_image { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); f_pixel *f_pixels; rgba_pixel **rows; double gamma; unsigned int width, height; unsigned char *importance_map, *edges, *dither_map; rgba_pixel *pixels, *temp_row; f_pixel *temp_f_row; liq_image_get_rgba_row_callback *row_callback; void *row_callback_user_info; liq_image *background; float min_opaque_val; f_pixel fixed_colors[256]; unsigned short fixed_colors_count; bool free_pixels, free_rows, free_rows_internal; }; typedef struct liq_remapping_result { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); unsigned char *pixels; colormap *palette; liq_progress_callback_function *progress_callback; void *progress_callback_user_info; liq_palette int_palette; double gamma, palette_error; float dither_level; unsigned char use_dither_map; unsigned char progress_stage1; } liq_remapping_result; struct liq_result { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); liq_remapping_result *remapping; colormap *palette; liq_progress_callback_function *progress_callback; void *progress_callback_user_info; liq_palette int_palette; float dither_level; double gamma, palette_error; int min_posterization_output; unsigned char use_dither_map; }; struct liq_histogram { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); struct acolorhash_table *acht; double gamma; f_pixel fixed_colors[256]; unsigned short fixed_colors_count; unsigned short ignorebits; bool had_image_added; }; static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels) LIQ_NONNULL; static void contrast_maps(liq_image *image) LIQ_NONNULL; static liq_error finalize_histogram(liq_histogram *input_hist, liq_attr *options, histogram **hist_output) LIQ_NONNULL; static const rgba_pixel *liq_image_get_row_rgba(liq_image *input_image, unsigned int row) LIQ_NONNULL; static bool liq_image_get_row_f_init(liq_image *img) LIQ_NONNULL; static const f_pixel *liq_image_get_row_f(liq_image *input_image, unsigned int row) LIQ_NONNULL; static void liq_remapping_result_destroy(liq_remapping_result *result) LIQ_NONNULL; static liq_error pngquant_quantize(histogram *hist, const liq_attr *options, const int fixed_colors_count, const f_pixel fixed_colors[], const double gamma, bool fixed_result_colors, liq_result **) LIQ_NONNULL; static liq_error liq_histogram_quantize_internal(liq_histogram *input_hist, liq_attr *attr, bool fixed_result_colors, liq_result **result_output) LIQ_NONNULL; LIQ_NONNULL static void liq_verbose_printf(const liq_attr *context, const char *fmt, ...) { if (context->log_callback) { va_list va; va_start(va, fmt); int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0 va_end(va); LIQ_ARRAY(char, buf, required_space); va_start(va, fmt); vsnprintf(buf, required_space, fmt, va); va_end(va); context->log_callback(context, buf, context->log_callback_user_info); } } LIQ_NONNULL inline static void verbose_print(const liq_attr *attr, const char *msg) { if (attr->log_callback) { attr->log_callback(attr, msg, attr->log_callback_user_info); } } LIQ_NONNULL static void liq_verbose_printf_flush(liq_attr *attr) { if (attr->log_flush_callback) { attr->log_flush_callback(attr, attr->log_flush_callback_user_info); } } LIQ_NONNULL static bool liq_progress(const liq_attr *attr, const float percent) { return attr->progress_callback && !attr->progress_callback(percent, attr->progress_callback_user_info); } LIQ_NONNULL static bool liq_remap_progress(const liq_remapping_result *quant, const float percent) { return quant->progress_callback && !quant->progress_callback(percent, quant->progress_callback_user_info); } #if USE_SSE inline static bool is_sse_available() { #if (defined(__x86_64__) || defined(__amd64) || defined(_WIN64)) return true; #elif _MSC_VER int info[4]; __cpuid(info, 1); /* bool is implemented as a built-in type of size 1 in MSVC */ return info[3] & (1<<26) ? true : false; #else int a,b,c,d; cpuid(1, a, b, c, d); return d & (1<<25); // edx bit 25 is set when SSE is present #endif } #endif /* make it clear in backtrace when user-supplied handle points to invalid memory */ NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header); LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header) { if (!user_supplied_pointer) { return false; } if (user_supplied_pointer->magic_header == liq_freed_magic) { fprintf(stderr, "%s used after being freed", expected_magic_header); // this is not normal error handling, this is programmer error that should crash the program. // program cannot safely continue if memory has been used after it's been freed. // abort() is nasty, but security vulnerability may be worse. abort(); } return user_supplied_pointer->magic_header == expected_magic_header; } NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(const void *pointer); LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(const void *pointer) { if (!pointer) { return false; } // Force a read from the given (potentially invalid) memory location in order to check early whether this crashes the program or not. // It doesn't matter what value is read, the code here is just to shut the compiler up about unused read. char test_access = *((volatile char *)pointer); return test_access || true; } LIQ_NONNULL static void liq_log_error(const liq_attr *attr, const char *msg) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; liq_verbose_printf(attr, " error: %s", msg); } static double quality_to_mse(long quality) { if (quality == 0) { return MAX_DIFF; } if (quality == 100) { return 0; } // curve fudged to be roughly similar to quality of libjpeg // except lowest 10 for really low number of colors const double extra_low_quality_fudge = MAX(0,0.016/(0.001+quality) - 0.001); return extra_low_quality_fudge + 2.5/pow(210.0 + quality, 1.2) * (100.1-quality)/100.0; } static unsigned int mse_to_quality(double mse) { for(int i=100; i > 0; i--) { if (mse <= quality_to_mse(i) + 0.000001) { // + epsilon for floating point errors return i; } } return 0; } /** internally MSE is a sum of all channels with pixels 0..1 range, but other software gives per-RGB-channel MSE for 0..255 range */ static double mse_to_standard_mse(double mse) { return mse * 65536.0/6.0; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_quality(liq_attr* attr, int minimum, int target) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (target < 0 || target > 100 || target < minimum || minimum < 0) return LIQ_VALUE_OUT_OF_RANGE; attr->target_mse = quality_to_mse(target); attr->max_mse = quality_to_mse(minimum); return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_min_quality(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return mse_to_quality(attr->max_mse); } LIQ_EXPORT LIQ_NONNULL int liq_get_max_quality(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return mse_to_quality(attr->target_mse); } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_max_colors(liq_attr* attr, int colors) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (colors < 2 || colors > 256) return LIQ_VALUE_OUT_OF_RANGE; attr->max_colors = colors; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_max_colors(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return attr->max_colors; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_min_posterization(liq_attr *attr, int bits) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (bits < 0 || bits > 4) return LIQ_VALUE_OUT_OF_RANGE; attr->min_posterization_output = bits; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_min_posterization(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return attr->min_posterization_output; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_speed(liq_attr* attr, int speed) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (speed < 1 || speed > 10) return LIQ_VALUE_OUT_OF_RANGE; unsigned int iterations = MAX(8-speed, 0); iterations += iterations * iterations/2; attr->kmeans_iterations = iterations; attr->kmeans_iteration_limit = 1.0/(double)(1<<(23-speed)); attr->feedback_loop_trials = MAX(56-9*speed, 0); attr->max_histogram_entries = (1<<17) + (1<<18)*(10-speed); attr->min_posterization_input = (speed >= 8) ? 1 : 0; attr->use_dither_map = (speed <= (omp_get_max_threads() > 1 ? 7 : 5)); // parallelized dither map might speed up floyd remapping if (attr->use_dither_map && speed < 3) { attr->use_dither_map = 2; // always } attr->use_contrast_maps = (speed <= 7) || attr->use_dither_map; attr->speed = speed; attr->progress_stage1 = attr->use_contrast_maps ? 20 : 8; if (attr->feedback_loop_trials < 2) { attr->progress_stage1 += 30; } attr->progress_stage3 = 50 / (1+speed); attr->progress_stage2 = 100 - attr->progress_stage1 - attr->progress_stage3; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_speed(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return attr->speed; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_output_gamma(liq_result* res, double gamma) { if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER; if (gamma <= 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE; if (res->remapping) { liq_remapping_result_destroy(res->remapping); res->remapping = NULL; } res->gamma = gamma; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_min_opacity(liq_attr* attr, int min) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (min < 0 || min > 255) return LIQ_VALUE_OUT_OF_RANGE; attr->min_opaque_val = (double)min/255.0; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_min_opacity(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return MIN(255.f, 256.f * attr->min_opaque_val); } LIQ_EXPORT LIQ_NONNULL void liq_set_last_index_transparent(liq_attr* attr, int is_last) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; attr->last_index_transparent = !!is_last; } LIQ_EXPORT void liq_attr_set_progress_callback(liq_attr *attr, liq_progress_callback_function *callback, void *user_info) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; attr->progress_callback = callback; attr->progress_callback_user_info = user_info; } LIQ_EXPORT void liq_result_set_progress_callback(liq_result *result, liq_progress_callback_function *callback, void *user_info) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return; result->progress_callback = callback; result->progress_callback_user_info = user_info; } LIQ_EXPORT void liq_set_log_callback(liq_attr *attr, liq_log_callback_function *callback, void* user_info) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; liq_verbose_printf_flush(attr); attr->log_callback = callback; attr->log_callback_user_info = user_info; } LIQ_EXPORT void liq_set_log_flush_callback(liq_attr *attr, liq_log_flush_callback_function *callback, void* user_info) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; attr->log_flush_callback = callback; attr->log_flush_callback_user_info = user_info; } LIQ_EXPORT liq_attr* liq_attr_create() { return liq_attr_create_with_allocator(NULL, NULL); } LIQ_EXPORT LIQ_NONNULL void liq_attr_destroy(liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) { return; } liq_verbose_printf_flush(attr); attr->magic_header = liq_freed_magic; attr->free(attr); } LIQ_EXPORT LIQ_NONNULL liq_attr* liq_attr_copy(const liq_attr *orig) { if (!CHECK_STRUCT_TYPE(orig, liq_attr)) { return NULL; } liq_attr *attr = orig->malloc(sizeof(liq_attr)); if (!attr) return NULL; *attr = *orig; return attr; } static void *liq_aligned_malloc(size_t size) { unsigned char *ptr = malloc(size + 16); if (!ptr) { return NULL; } uintptr_t offset = 16 - ((uintptr_t)ptr & 15); // also reserves 1 byte for ptr[-1] ptr += offset; assert(0 == (((uintptr_t)ptr) & 15)); ptr[-1] = offset ^ 0x59; // store how much pointer was shifted to get the original for free() return ptr; } LIQ_NONNULL static void liq_aligned_free(void *inptr) { unsigned char *ptr = inptr; size_t offset = ptr[-1] ^ 0x59; assert(offset > 0 && offset <= 16); free(ptr - offset); } LIQ_EXPORT liq_attr* liq_attr_create_with_allocator(void* (*custom_malloc)(size_t), void (*custom_free)(void*)) { #if USE_SSE if (!is_sse_available()) { return NULL; } #endif if (!custom_malloc && !custom_free) { custom_malloc = liq_aligned_malloc; custom_free = liq_aligned_free; } else if (!custom_malloc != !custom_free) { return NULL; // either specify both or none } liq_attr *attr = custom_malloc(sizeof(liq_attr)); if (!attr) return NULL; *attr = (liq_attr) { .magic_header = liq_attr_magic, .malloc = custom_malloc, .free = custom_free, .max_colors = 256, .min_opaque_val = 1, // whether preserve opaque colors for IE (1.0=no, does not affect alpha) .last_index_transparent = false, // puts transparent color at last index. This is workaround for blu-ray subtitles. .target_mse = 0, .max_mse = MAX_DIFF, }; liq_set_speed(attr, 4); return attr; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_add_fixed_color(liq_image *img, liq_color color) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (img->fixed_colors_count > 255) return LIQ_UNSUPPORTED; float gamma_lut[256]; to_f_set_gamma(gamma_lut, img->gamma); img->fixed_colors[img->fixed_colors_count++] = rgba_to_f(gamma_lut, (rgba_pixel){ .r = color.r, .g = color.g, .b = color.b, .a = color.a, }); return LIQ_OK; } LIQ_NONNULL static liq_error liq_histogram_add_fixed_color_f(liq_histogram *hist, f_pixel color) { if (hist->fixed_colors_count > 255) return LIQ_UNSUPPORTED; hist->fixed_colors[hist->fixed_colors_count++] = color; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_fixed_color(liq_histogram *hist, liq_color color, double gamma) { if (!CHECK_STRUCT_TYPE(hist, liq_histogram)) return LIQ_INVALID_POINTER; float gamma_lut[256]; to_f_set_gamma(gamma_lut, gamma ? gamma : 0.45455); const f_pixel px = rgba_to_f(gamma_lut, (rgba_pixel){ .r = color.r, .g = color.g, .b = color.b, .a = color.a, }); return liq_histogram_add_fixed_color_f(hist, px); } LIQ_NONNULL static bool liq_image_use_low_memory(liq_image *img) { img->temp_f_row = img->malloc(sizeof(img->f_pixels[0]) * LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_max_threads()); return img->temp_f_row != NULL; } LIQ_NONNULL static bool liq_image_should_use_low_memory(liq_image *img, const bool low_memory_hint) { return img->width * img->height > (low_memory_hint ? LIQ_HIGH_MEMORY_LIMIT/8 : LIQ_HIGH_MEMORY_LIMIT) / sizeof(f_pixel); // Watch out for integer overflow } static liq_image *liq_image_create_internal(const liq_attr *attr, rgba_pixel* rows[], liq_image_get_rgba_row_callback *row_callback, void *row_callback_user_info, int width, int height, double gamma) { if (gamma < 0 || gamma > 1.0) { liq_log_error(attr, "gamma must be >= 0 and <= 1 (try 1/gamma instead)"); return NULL; } if (!rows && !row_callback) { liq_log_error(attr, "missing row data"); return NULL; } liq_image *img = attr->malloc(sizeof(liq_image)); if (!img) return NULL; *img = (liq_image){ .magic_header = liq_image_magic, .malloc = attr->malloc, .free = attr->free, .width = width, .height = height, .gamma = gamma ? gamma : 0.45455, .rows = rows, .row_callback = row_callback, .row_callback_user_info = row_callback_user_info, .min_opaque_val = attr->min_opaque_val, }; if (!rows || attr->min_opaque_val < 1.f) { img->temp_row = attr->malloc(sizeof(img->temp_row[0]) * LIQ_TEMP_ROW_WIDTH(width) * omp_get_max_threads()); if (!img->temp_row) return NULL; } // if image is huge or converted pixels are not likely to be reused then don't cache converted pixels if (liq_image_should_use_low_memory(img, !img->temp_row && !attr->use_contrast_maps && !attr->use_dither_map)) { verbose_print(attr, " conserving memory"); if (!liq_image_use_low_memory(img)) return NULL; } if (img->min_opaque_val < 1.f) { verbose_print(attr, " Working around IE6 bug by making image less transparent..."); } return img; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_memory_ownership(liq_image *img, int ownership_flags) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (!img->rows || !ownership_flags || (ownership_flags & ~(LIQ_OWN_ROWS|LIQ_OWN_PIXELS))) { return LIQ_VALUE_OUT_OF_RANGE; } if (ownership_flags & LIQ_OWN_ROWS) { if (img->free_rows_internal) return LIQ_VALUE_OUT_OF_RANGE; img->free_rows = true; } if (ownership_flags & LIQ_OWN_PIXELS) { img->free_pixels = true; if (!img->pixels) { // for simplicity of this API there's no explicit bitmap argument, // so the row with the lowest address is assumed to be at the start of the bitmap img->pixels = img->rows[0]; for(unsigned int i=1; i < img->height; i++) { img->pixels = MIN(img->pixels, img->rows[i]); } } } return LIQ_OK; } LIQ_NONNULL static void liq_image_free_maps(liq_image *input_image); LIQ_NONNULL static void liq_image_free_importance_map(liq_image *input_image); LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_importance_map(liq_image *img, unsigned char importance_map[], size_t buffer_size, enum liq_ownership ownership) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (!CHECK_USER_POINTER(importance_map)) return LIQ_INVALID_POINTER; const size_t required_size = img->width * img->height; if (buffer_size < required_size) { return LIQ_BUFFER_TOO_SMALL; } if (ownership == LIQ_COPY_PIXELS) { unsigned char *tmp = img->malloc(required_size); if (!tmp) { return LIQ_OUT_OF_MEMORY; } memcpy(tmp, importance_map, required_size); importance_map = tmp; } else if (ownership != LIQ_OWN_PIXELS) { return LIQ_UNSUPPORTED; } liq_image_free_importance_map(img); img->importance_map = importance_map; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_background(liq_image *img, liq_image *background) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(background, liq_image)) return LIQ_INVALID_POINTER; if (background->background) { return LIQ_UNSUPPORTED; } if (img->width != background->width || img->height != background->height) { return LIQ_BUFFER_TOO_SMALL; } if (img->background) { liq_image_destroy(img->background); } img->background = background; liq_image_free_maps(img); // Force them to be re-analyzed with the background return LIQ_OK; } LIQ_NONNULL static bool check_image_size(const liq_attr *attr, const int width, const int height) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) { return false; } if (width <= 0 || height <= 0) { liq_log_error(attr, "width and height must be > 0"); return false; } if (width > INT_MAX/sizeof(rgba_pixel)/height || width > INT_MAX/16/sizeof(f_pixel) || height > INT_MAX/sizeof(size_t)) { liq_log_error(attr, "image too large"); return false; } return true; } LIQ_EXPORT liq_image *liq_image_create_custom(const liq_attr *attr, liq_image_get_rgba_row_callback *row_callback, void* user_info, int width, int height, double gamma) { if (!check_image_size(attr, width, height)) { return NULL; } return liq_image_create_internal(attr, NULL, row_callback, user_info, width, height, gamma); } LIQ_EXPORT liq_image *liq_image_create_rgba_rows(const liq_attr *attr, void *const rows[], int width, int height, double gamma) { if (!check_image_size(attr, width, height)) { return NULL; } for(int i=0; i < height; i++) { if (!CHECK_USER_POINTER(rows+i) || !CHECK_USER_POINTER(rows[i])) { liq_log_error(attr, "invalid row pointers"); return NULL; } } return liq_image_create_internal(attr, (rgba_pixel**)rows, NULL, NULL, width, height, gamma); } LIQ_EXPORT LIQ_NONNULL liq_image *liq_image_create_rgba(const liq_attr *attr, const void* bitmap, int width, int height, double gamma) { if (!check_image_size(attr, width, height)) { return NULL; } if (!CHECK_USER_POINTER(bitmap)) { liq_log_error(attr, "invalid bitmap pointer"); return NULL; } rgba_pixel *const pixels = (rgba_pixel *const)bitmap; rgba_pixel **rows = attr->malloc(sizeof(rows[0])*height); if (!rows) return NULL; for(int i=0; i < height; i++) { rows[i] = pixels + width * i; } liq_image *image = liq_image_create_internal(attr, rows, NULL, NULL, width, height, gamma); if (!image) { attr->free(rows); return NULL; } image->free_rows = true; image->free_rows_internal = true; return image; } NEVER_INLINE LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info); LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info) { assert(callback); assert(temp_row); callback(temp_row, row, width, user_info); } LIQ_NONNULL inline static bool liq_image_has_rgba_pixels(const liq_image *img) { if (!CHECK_STRUCT_TYPE(img, liq_image)) { return false; } return img->rows || (img->temp_row && img->row_callback); } LIQ_NONNULL inline static bool liq_image_can_use_rgba_rows(const liq_image *img) { assert(liq_image_has_rgba_pixels(img)); const bool iebug = img->min_opaque_val < 1.f; return (img->rows && !iebug); } LIQ_NONNULL static const rgba_pixel *liq_image_get_row_rgba(liq_image *img, unsigned int row) { if (liq_image_can_use_rgba_rows(img)) { return img->rows[row]; } assert(img->temp_row); rgba_pixel *temp_row = img->temp_row + LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_thread_num(); if (img->rows) { memcpy(temp_row, img->rows[row], img->width * sizeof(temp_row[0])); } else { liq_executing_user_callback(img->row_callback, (liq_color*)temp_row, row, img->width, img->row_callback_user_info); } if (img->min_opaque_val < 1.f) modify_alpha(img, temp_row); return temp_row; } LIQ_NONNULL static void convert_row_to_f(liq_image *img, f_pixel *row_f_pixels, const unsigned int row, const float gamma_lut[]) { assert(row_f_pixels); assert(!USE_SSE || 0 == ((uintptr_t)row_f_pixels & 15)); const rgba_pixel *const row_pixels = liq_image_get_row_rgba(img, row); for(unsigned int col=0; col < img->width; col++) { row_f_pixels[col] = rgba_to_f(gamma_lut, row_pixels[col]); } } LIQ_NONNULL static bool liq_image_get_row_f_init(liq_image *img) { assert(omp_get_thread_num() == 0); if (img->f_pixels) { return true; } if (!liq_image_should_use_low_memory(img, false)) { img->f_pixels = img->malloc(sizeof(img->f_pixels[0]) * img->width * img->height); } if (!img->f_pixels) { return liq_image_use_low_memory(img); } if (!liq_image_has_rgba_pixels(img)) { return false; } float gamma_lut[256]; to_f_set_gamma(gamma_lut, img->gamma); for(unsigned int i=0; i < img->height; i++) { convert_row_to_f(img, &img->f_pixels[i*img->width], i, gamma_lut); } return true; } LIQ_NONNULL static const f_pixel *liq_image_get_row_f(liq_image *img, unsigned int row) { if (!img->f_pixels) { assert(img->temp_f_row); // init should have done that float gamma_lut[256]; to_f_set_gamma(gamma_lut, img->gamma); f_pixel *row_for_thread = img->temp_f_row + LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_thread_num(); convert_row_to_f(img, row_for_thread, row, gamma_lut); return row_for_thread; } return img->f_pixels + img->width * row; } LIQ_EXPORT LIQ_NONNULL int liq_image_get_width(const liq_image *input_image) { if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1; return input_image->width; } LIQ_EXPORT LIQ_NONNULL int liq_image_get_height(const liq_image *input_image) { if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1; return input_image->height; } typedef void free_func(void*); LIQ_NONNULL static free_func *get_default_free_func(liq_image *img) { // When default allocator is used then user-supplied pointers must be freed with free() if (img->free_rows_internal || img->free != liq_aligned_free) { return img->free; } return free; } LIQ_NONNULL static void liq_image_free_rgba_source(liq_image *input_image) { if (input_image->free_pixels && input_image->pixels) { get_default_free_func(input_image)(input_image->pixels); input_image->pixels = NULL; } if (input_image->free_rows && input_image->rows) { get_default_free_func(input_image)(input_image->rows); input_image->rows = NULL; } } LIQ_NONNULL static void liq_image_free_importance_map(liq_image *input_image) { if (input_image->importance_map) { input_image->free(input_image->importance_map); input_image->importance_map = NULL; } } LIQ_NONNULL static void liq_image_free_maps(liq_image *input_image) { liq_image_free_importance_map(input_image); if (input_image->edges) { input_image->free(input_image->edges); input_image->edges = NULL; } if (input_image->dither_map) { input_image->free(input_image->dither_map); input_image->dither_map = NULL; } } LIQ_EXPORT LIQ_NONNULL void liq_image_destroy(liq_image *input_image) { if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return; liq_image_free_rgba_source(input_image); liq_image_free_maps(input_image); if (input_image->f_pixels) { input_image->free(input_image->f_pixels); } if (input_image->temp_row) { input_image->free(input_image->temp_row); } if (input_image->temp_f_row) { input_image->free(input_image->temp_f_row); } if (input_image->background) { liq_image_destroy(input_image->background); } input_image->magic_header = liq_freed_magic; input_image->free(input_image); } LIQ_EXPORT liq_histogram* liq_histogram_create(const liq_attr* attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) { return NULL; } liq_histogram *hist = attr->malloc(sizeof(liq_histogram)); if (!hist) return NULL; *hist = (liq_histogram) { .magic_header = liq_histogram_magic, .malloc = attr->malloc, .free = attr->free, .ignorebits = MAX(attr->min_posterization_output, attr->min_posterization_input), }; return hist; } LIQ_EXPORT LIQ_NONNULL void liq_histogram_destroy(liq_histogram *hist) { if (!CHECK_STRUCT_TYPE(hist, liq_histogram)) return; hist->magic_header = liq_freed_magic; pam_freeacolorhash(hist->acht); hist->free(hist); } LIQ_EXPORT LIQ_NONNULL liq_result *liq_quantize_image(liq_attr *attr, liq_image *img) { liq_result *res; if (LIQ_OK != liq_image_quantize(img, attr, &res)) { return NULL; } return res; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_quantize(liq_image *const img, liq_attr *const attr, liq_result **result_output) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (!liq_image_has_rgba_pixels(img)) { return LIQ_UNSUPPORTED; } liq_histogram *hist = liq_histogram_create(attr); if (!hist) { return LIQ_OUT_OF_MEMORY; } liq_error err = liq_histogram_add_image(hist, attr, img); if (LIQ_OK != err) { return err; } err = liq_histogram_quantize_internal(hist, attr, false, result_output); liq_histogram_destroy(hist); return err; } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_quantize(liq_histogram *input_hist, liq_attr *attr, liq_result **result_output) { return liq_histogram_quantize_internal(input_hist, attr, true, result_output); } LIQ_NONNULL static liq_error liq_histogram_quantize_internal(liq_histogram *input_hist, liq_attr *attr, bool fixed_result_colors, liq_result **result_output) { if (!CHECK_USER_POINTER(result_output)) return LIQ_INVALID_POINTER; *result_output = NULL; if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER; if (liq_progress(attr, 0)) return LIQ_ABORTED; histogram *hist; liq_error err = finalize_histogram(input_hist, attr, &hist); if (err != LIQ_OK) { return err; } err = pngquant_quantize(hist, attr, input_hist->fixed_colors_count, input_hist->fixed_colors, input_hist->gamma, fixed_result_colors, result_output); pam_freeacolorhist(hist); return err; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_dithering_level(liq_result *res, float dither_level) { if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER; if (res->remapping) { liq_remapping_result_destroy(res->remapping); res->remapping = NULL; } if (res->dither_level < 0 || res->dither_level > 1.0f) return LIQ_VALUE_OUT_OF_RANGE; res->dither_level = dither_level; return LIQ_OK; } LIQ_NONNULL static liq_remapping_result *liq_remapping_result_create(liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) { return NULL; } liq_remapping_result *res = result->malloc(sizeof(liq_remapping_result)); if (!res) return NULL; *res = (liq_remapping_result) { .magic_header = liq_remapping_result_magic, .malloc = result->malloc, .free = result->free, .dither_level = result->dither_level, .use_dither_map = result->use_dither_map, .palette_error = result->palette_error, .gamma = result->gamma, .palette = pam_duplicate_colormap(result->palette), .progress_callback = result->progress_callback, .progress_callback_user_info = result->progress_callback_user_info, .progress_stage1 = result->use_dither_map ? 20 : 0, }; return res; } LIQ_EXPORT LIQ_NONNULL double liq_get_output_gamma(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; return result->gamma; } LIQ_NONNULL static void liq_remapping_result_destroy(liq_remapping_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_remapping_result)) return; if (result->palette) pam_freecolormap(result->palette); if (result->pixels) result->free(result->pixels); result->magic_header = liq_freed_magic; result->free(result); } LIQ_EXPORT LIQ_NONNULL void liq_result_destroy(liq_result *res) { if (!CHECK_STRUCT_TYPE(res, liq_result)) return; memset(&res->int_palette, 0, sizeof(liq_palette)); if (res->remapping) { memset(&res->remapping->int_palette, 0, sizeof(liq_palette)); liq_remapping_result_destroy(res->remapping); } pam_freecolormap(res->palette); res->magic_header = liq_freed_magic; res->free(res); } LIQ_EXPORT LIQ_NONNULL double liq_get_quantization_error(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->palette_error >= 0) { return mse_to_standard_mse(result->palette_error); } return -1; } LIQ_EXPORT LIQ_NONNULL double liq_get_remapping_error(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->remapping && result->remapping->palette_error >= 0) { return mse_to_standard_mse(result->remapping->palette_error); } return -1; } LIQ_EXPORT LIQ_NONNULL int liq_get_quantization_quality(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->palette_error >= 0) { return mse_to_quality(result->palette_error); } return -1; } LIQ_EXPORT LIQ_NONNULL int liq_get_remapping_quality(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->remapping && result->remapping->palette_error >= 0) { return mse_to_quality(result->remapping->palette_error); } return -1; } LIQ_NONNULL static int compare_popularity(const void *ch1, const void *ch2) { const float v1 = ((const colormap_item*)ch1)->popularity; const float v2 = ((const colormap_item*)ch2)->popularity; return v1 > v2 ? -1 : 1; } LIQ_NONNULL static void sort_palette_qsort(colormap *map, int start, int nelem) { if (!nelem) return; qsort(map->palette + start, nelem, sizeof(map->palette[0]), compare_popularity); } #define SWAP_PALETTE(map, a,b) { \ const colormap_item tmp = (map)->palette[(a)]; \ (map)->palette[(a)] = (map)->palette[(b)]; \ (map)->palette[(b)] = tmp; } LIQ_NONNULL static void sort_palette(colormap *map, const liq_attr *options) { /* ** Step 3.5 [GRR]: remap the palette colors so that all entries with ** the maximal alpha value (i.e., fully opaque) are at the end and can ** therefore be omitted from the tRNS chunk. */ if (options->last_index_transparent) { for(unsigned int i=0; i < map->colors; i++) { if (map->palette[i].acolor.a < 1.f/256.f) { const unsigned int old = i, transparent_dest = map->colors-1; SWAP_PALETTE(map, transparent_dest, old); /* colors sorted by popularity make pngs slightly more compressible */ sort_palette_qsort(map, 0, map->colors-1); return; } } } unsigned int non_fixed_colors = 0; for(unsigned int i = 0; i < map->colors; i++) { if (map->palette[i].fixed) { break; } non_fixed_colors++; } /* move transparent colors to the beginning to shrink trns chunk */ unsigned int num_transparent = 0; for(unsigned int i = 0; i < non_fixed_colors; i++) { if (map->palette[i].acolor.a < 255.f/256.f) { // current transparent color is swapped with earlier opaque one if (i != num_transparent) { SWAP_PALETTE(map, num_transparent, i); i--; } num_transparent++; } } liq_verbose_printf(options, " eliminated opaque tRNS-chunk entries...%d entr%s transparent", num_transparent, (num_transparent == 1)? "y" : "ies"); /* colors sorted by popularity make pngs slightly more compressible * opaque and transparent are sorted separately */ sort_palette_qsort(map, 0, num_transparent); sort_palette_qsort(map, num_transparent, non_fixed_colors - num_transparent); if (non_fixed_colors > 9 && map->colors > 16) { SWAP_PALETTE(map, 7, 1); // slightly improves compression SWAP_PALETTE(map, 8, 2); SWAP_PALETTE(map, 9, 3); } } inline static unsigned int posterize_channel(unsigned int color, unsigned int bits) { return (color & ~((1<<bits)-1)) | (color >> (8-bits)); } LIQ_NONNULL static void set_rounded_palette(liq_palette *const dest, colormap *const map, const double gamma, unsigned int posterize) { float gamma_lut[256]; to_f_set_gamma(gamma_lut, gamma); dest->count = map->colors; for(unsigned int x = 0; x < map->colors; ++x) { rgba_pixel px = f_to_rgb(gamma, map->palette[x].acolor); px.r = posterize_channel(px.r, posterize); px.g = posterize_channel(px.g, posterize); px.b = posterize_channel(px.b, posterize); px.a = posterize_channel(px.a, posterize); map->palette[x].acolor = rgba_to_f(gamma_lut, px); /* saves rounding error introduced by to_rgb, which makes remapping & dithering more accurate */ if (!px.a && !map->palette[x].fixed) { px.r = 71; px.g = 112; px.b = 76; } dest->entries[x] = (liq_color){.r=px.r,.g=px.g,.b=px.b,.a=px.a}; } } LIQ_EXPORT LIQ_NONNULL const liq_palette *liq_get_palette(liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return NULL; if (result->remapping && result->remapping->int_palette.count) { return &result->remapping->int_palette; } if (!result->int_palette.count) { set_rounded_palette(&result->int_palette, result->palette, result->gamma, result->min_posterization_output); } return &result->int_palette; } LIQ_NONNULL static float remap_to_palette(liq_image *const input_image, unsigned char *const *const output_pixels, colormap *const map) { const int rows = input_image->height; const unsigned int cols = input_image->width; double remapping_error=0; if (!liq_image_get_row_f_init(input_image)) { return -1; } if (input_image->background && !liq_image_get_row_f_init(input_image->background)) { return -1; } const colormap_item *acolormap = map->palette; struct nearest_map *const n = nearest_init(map); const int transparent_index = input_image->background ? nearest_search(n, &(f_pixel){0,0,0,0}, 0, NULL) : 0; const unsigned int max_threads = omp_get_max_threads(); LIQ_ARRAY(kmeans_state, average_color, (KMEANS_CACHE_LINE_GAP+map->colors) * max_threads); kmeans_init(map, max_threads, average_color); #pragma omp parallel for if (rows*cols > 3000) \ schedule(static) default(none) shared(acolormap) shared(average_color) reduction(+:remapping_error) for(int row = 0; row < rows; ++row) { const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row); const f_pixel *const bg_pixels = input_image->background && acolormap[transparent_index].acolor.a < 1.f/256.f ? liq_image_get_row_f(input_image->background, row) : NULL; unsigned int last_match=0; for(unsigned int col = 0; col < cols; ++col) { float diff; last_match = nearest_search(n, &row_pixels[col], last_match, &diff); if (bg_pixels && colordifference(bg_pixels[col], acolormap[last_match].acolor) <= diff) { last_match = transparent_index; } output_pixels[row][col] = last_match; remapping_error += diff; kmeans_update_color(row_pixels[col], 1.0, map, last_match, omp_get_thread_num(), average_color); } } kmeans_finalize(map, max_threads, average_color); nearest_free(n); return remapping_error / (input_image->width * input_image->height); } inline static f_pixel get_dithered_pixel(const float dither_level, const float max_dither_error, const f_pixel thiserr, const f_pixel px) { /* Use Floyd-Steinberg errors to adjust actual color. */ const float sr = thiserr.r * dither_level, sg = thiserr.g * dither_level, sb = thiserr.b * dither_level, sa = thiserr.a * dither_level; float ratio = 1.0; const float max_overflow = 1.1f; const float max_underflow = -0.1f; // allowing some overflow prevents undithered bands caused by clamping of all channels if (px.r + sr > max_overflow) ratio = MIN(ratio, (max_overflow -px.r)/sr); else { if (px.r + sr < max_underflow) ratio = MIN(ratio, (max_underflow-px.r)/sr); } if (px.g + sg > max_overflow) ratio = MIN(ratio, (max_overflow -px.g)/sg); else { if (px.g + sg < max_underflow) ratio = MIN(ratio, (max_underflow-px.g)/sg); } if (px.b + sb > max_overflow) ratio = MIN(ratio, (max_overflow -px.b)/sb); else { if (px.b + sb < max_underflow) ratio = MIN(ratio, (max_underflow-px.b)/sb); } float a = px.a + sa; if (a > 1.f) { a = 1.f; } else if (a < 0) { a = 0; } // If dithering error is crazy high, don't propagate it that much // This prevents crazy geen pixels popping out of the blue (or red or black! ;) const float dither_error = sr*sr + sg*sg + sb*sb + sa*sa; if (dither_error > max_dither_error) { ratio *= 0.8f; } else if (dither_error < 2.f/256.f/256.f) { // don't dither areas that don't have noticeable error — makes file smaller return px; } return (f_pixel) { .r=px.r + sr * ratio, .g=px.g + sg * ratio, .b=px.b + sb * ratio, .a=a, }; } /** Uses edge/noise map to apply dithering only to flat areas. Dithering on edges creates jagged lines, and noisy areas are "naturally" dithered. If output_image_is_remapped is true, only pixels noticeably changed by error diffusion will be written to output image. */ LIQ_NONNULL static bool remap_to_palette_floyd(liq_image *input_image, unsigned char *const output_pixels[], liq_remapping_result *quant, const float max_dither_error, const bool output_image_is_remapped) { const int rows = input_image->height, cols = input_image->width; const unsigned char *dither_map = quant->use_dither_map ? (input_image->dither_map ? input_image->dither_map : input_image->edges) : NULL; const colormap *map = quant->palette; const colormap_item *acolormap = map->palette; if (!liq_image_get_row_f_init(input_image)) { return false; } if (input_image->background && !liq_image_get_row_f_init(input_image->background)) { return false; } /* Initialize Floyd-Steinberg error vectors. */ const size_t errwidth = cols+2; f_pixel *restrict thiserr = input_image->malloc(errwidth * sizeof(thiserr[0]) * 2); // +2 saves from checking out of bounds access if (!thiserr) return false; f_pixel *restrict nexterr = thiserr + errwidth; memset(thiserr, 0, errwidth * sizeof(thiserr[0])); bool ok = true; struct nearest_map *const n = nearest_init(map); const int transparent_index = input_image->background ? nearest_search(n, &(f_pixel){0,0,0,0}, 0, NULL) : 0; // response to this value is non-linear and without it any value < 0.8 would give almost no dithering float base_dithering_level = quant->dither_level; base_dithering_level = 1.f - (1.f-base_dithering_level)*(1.f-base_dithering_level); if (dither_map) { base_dithering_level *= 1.f/255.f; // convert byte to float } base_dithering_level *= 15.f/16.f; // prevent small errors from accumulating int fs_direction = 1; unsigned int last_match=0; for (int row = 0; row < rows; ++row) { if (liq_remap_progress(quant, quant->progress_stage1 + row * (100.f - quant->progress_stage1) / rows)) { ok = false; break; } memset(nexterr, 0, errwidth * sizeof(nexterr[0])); int col = (fs_direction > 0) ? 0 : (cols - 1); const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row); const f_pixel *const bg_pixels = input_image->background && acolormap[transparent_index].acolor.a < 1.f/256.f ? liq_image_get_row_f(input_image->background, row) : NULL; do { float dither_level = base_dithering_level; if (dither_map) { dither_level *= dither_map[row*cols + col]; } const f_pixel spx = get_dithered_pixel(dither_level, max_dither_error, thiserr[col + 1], row_pixels[col]); const unsigned int guessed_match = output_image_is_remapped ? output_pixels[row][col] : last_match; float diff; last_match = nearest_search(n, &spx, guessed_match, &diff); f_pixel output_px = acolormap[last_match].acolor; if (bg_pixels && colordifference(bg_pixels[col], output_px) <= diff) { output_px = bg_pixels[col]; output_pixels[row][col] = transparent_index; } else { output_pixels[row][col] = last_match; } f_pixel err = { .r = (spx.r - output_px.r), .g = (spx.g - output_px.g), .b = (spx.b - output_px.b), .a = (spx.a - output_px.a), }; // If dithering error is crazy high, don't propagate it that much // This prevents crazy geen pixels popping out of the blue (or red or black! ;) if (err.r*err.r + err.g*err.g + err.b*err.b + err.a*err.a > max_dither_error) { err.r *= 0.75f; err.g *= 0.75f; err.b *= 0.75f; err.a *= 0.75f; } /* Propagate Floyd-Steinberg error terms. */ if (fs_direction > 0) { thiserr[col + 2].a += err.a * (7.f/16.f); thiserr[col + 2].r += err.r * (7.f/16.f); thiserr[col + 2].g += err.g * (7.f/16.f); thiserr[col + 2].b += err.b * (7.f/16.f); nexterr[col + 2].a = err.a * (1.f/16.f); nexterr[col + 2].r = err.r * (1.f/16.f); nexterr[col + 2].g = err.g * (1.f/16.f); nexterr[col + 2].b = err.b * (1.f/16.f); nexterr[col + 1].a += err.a * (5.f/16.f); nexterr[col + 1].r += err.r * (5.f/16.f); nexterr[col + 1].g += err.g * (5.f/16.f); nexterr[col + 1].b += err.b * (5.f/16.f); nexterr[col ].a += err.a * (3.f/16.f); nexterr[col ].r += err.r * (3.f/16.f); nexterr[col ].g += err.g * (3.f/16.f); nexterr[col ].b += err.b * (3.f/16.f); } else { thiserr[col ].a += err.a * (7.f/16.f); thiserr[col ].r += err.r * (7.f/16.f); thiserr[col ].g += err.g * (7.f/16.f); thiserr[col ].b += err.b * (7.f/16.f); nexterr[col ].a = err.a * (1.f/16.f); nexterr[col ].r = err.r * (1.f/16.f); nexterr[col ].g = err.g * (1.f/16.f); nexterr[col ].b = err.b * (1.f/16.f); nexterr[col + 1].a += err.a * (5.f/16.f); nexterr[col + 1].r += err.r * (5.f/16.f); nexterr[col + 1].g += err.g * (5.f/16.f); nexterr[col + 1].b += err.b * (5.f/16.f); nexterr[col + 2].a += err.a * (3.f/16.f); nexterr[col + 2].r += err.r * (3.f/16.f); nexterr[col + 2].g += err.g * (3.f/16.f); nexterr[col + 2].b += err.b * (3.f/16.f); } // remapping is done in zig-zag col += fs_direction; if (fs_direction > 0) { if (col >= cols) break; } else { if (col < 0) break; } } while(1); f_pixel *const temperr = thiserr; thiserr = nexterr; nexterr = temperr; fs_direction = -fs_direction; } input_image->free(MIN(thiserr, nexterr)); // MIN because pointers were swapped nearest_free(n); return ok; } /* fixed colors are always included in the palette, so it would be wasteful to duplicate them in palette from histogram */ LIQ_NONNULL static void remove_fixed_colors_from_histogram(histogram *hist, const int fixed_colors_count, const f_pixel fixed_colors[], const float target_mse) { const float max_difference = MAX(target_mse/2.f, 2.f/256.f/256.f); if (fixed_colors_count) { for(int j=0; j < hist->size; j++) { for(unsigned int i=0; i < fixed_colors_count; i++) { if (colordifference(hist->achv[j].acolor, fixed_colors[i]) < max_difference) { hist->achv[j] = hist->achv[--hist->size]; // remove color from histogram by overwriting with the last entry j--; break; // continue searching histogram } } } } } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_colors(liq_histogram *input_hist, const liq_attr *options, const liq_histogram_entry entries[], int num_entries, double gamma) { if (!CHECK_STRUCT_TYPE(options, liq_attr)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER; if (!CHECK_USER_POINTER(entries)) return LIQ_INVALID_POINTER; if (gamma < 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE; if (num_entries <= 0 || num_entries > 1<<30) return LIQ_VALUE_OUT_OF_RANGE; if (input_hist->ignorebits > 0 && input_hist->had_image_added) { return LIQ_UNSUPPORTED; } input_hist->ignorebits = 0; input_hist->had_image_added = true; input_hist->gamma = gamma ? gamma : 0.45455; if (!input_hist->acht) { input_hist->acht = pam_allocacolorhash(~0, num_entries*num_entries, 0, options->malloc, options->free); if (!input_hist->acht) { return LIQ_OUT_OF_MEMORY; } } // Fake image size. It's only for hash size estimates. if (!input_hist->acht->cols) { input_hist->acht->cols = num_entries; } input_hist->acht->rows += num_entries; const unsigned int hash_size = input_hist->acht->hash_size; for(int i=0; i < num_entries; i++) { const rgba_pixel rgba = { .r = entries[i].color.r, .g = entries[i].color.g, .b = entries[i].color.b, .a = entries[i].color.a, }; union rgba_as_int px = {rgba}; unsigned int hash; if (px.rgba.a) { hash = px.l % hash_size; } else { hash=0; px.l=0; } if (!pam_add_to_hash(input_hist->acht, hash, entries[i].count, px, i, num_entries)) { return LIQ_OUT_OF_MEMORY; } } return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_image(liq_histogram *input_hist, const liq_attr *options, liq_image *input_image) { if (!CHECK_STRUCT_TYPE(options, liq_attr)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER; const unsigned int cols = input_image->width, rows = input_image->height; if (!input_image->importance_map && options->use_contrast_maps) { contrast_maps(input_image); } input_hist->gamma = input_image->gamma; for(int i = 0; i < input_image->fixed_colors_count; i++) { liq_error res = liq_histogram_add_fixed_color_f(input_hist, input_image->fixed_colors[i]); if (res != LIQ_OK) { return res; } } /* ** Step 2: attempt to make a histogram of the colors, unclustered. ** If at first we don't succeed, increase ignorebits to increase color ** coherence and try again. */ if (liq_progress(options, options->progress_stage1 * 0.4f)) { return LIQ_ABORTED; } const bool all_rows_at_once = liq_image_can_use_rgba_rows(input_image); // Usual solution is to start from scratch when limit is exceeded, but that's not possible if it's not // the first image added const unsigned int max_histogram_entries = input_hist->had_image_added ? ~0 : options->max_histogram_entries; do { if (!input_hist->acht) { input_hist->acht = pam_allocacolorhash(max_histogram_entries, rows*cols, input_hist->ignorebits, options->malloc, options->free); } if (!input_hist->acht) return LIQ_OUT_OF_MEMORY; // histogram uses noise contrast map for importance. Color accuracy in noisy areas is not very important. // noise map does not include edges to avoid ruining anti-aliasing for(unsigned int row=0; row < rows; row++) { bool added_ok; if (all_rows_at_once) { added_ok = pam_computeacolorhash(input_hist->acht, (const rgba_pixel *const *)input_image->rows, cols, rows, input_image->importance_map); if (added_ok) break; } else { const rgba_pixel* rows_p[1] = { liq_image_get_row_rgba(input_image, row) }; added_ok = pam_computeacolorhash(input_hist->acht, rows_p, cols, 1, input_image->importance_map ? &input_image->importance_map[row * cols] : NULL); } if (!added_ok) { input_hist->ignorebits++; liq_verbose_printf(options, " too many colors! Scaling colors to improve clustering... %d", input_hist->ignorebits); pam_freeacolorhash(input_hist->acht); input_hist->acht = NULL; if (liq_progress(options, options->progress_stage1 * 0.6f)) return LIQ_ABORTED; break; } } } while(!input_hist->acht); input_hist->had_image_added = true; liq_image_free_importance_map(input_image); if (input_image->free_pixels && input_image->f_pixels) { liq_image_free_rgba_source(input_image); // bow can free the RGBA source if copy has been made in f_pixels } return LIQ_OK; } LIQ_NONNULL static liq_error finalize_histogram(liq_histogram *input_hist, liq_attr *options, histogram **hist_output) { if (liq_progress(options, options->progress_stage1 * 0.9f)) { return LIQ_ABORTED; } if (!input_hist->acht) { return LIQ_BITMAP_NOT_AVAILABLE; } histogram *hist = pam_acolorhashtoacolorhist(input_hist->acht, input_hist->gamma, options->malloc, options->free); pam_freeacolorhash(input_hist->acht); input_hist->acht = NULL; if (!hist) { return LIQ_OUT_OF_MEMORY; } liq_verbose_printf(options, " made histogram...%d colors found", hist->size); remove_fixed_colors_from_histogram(hist, input_hist->fixed_colors_count, input_hist->fixed_colors, options->target_mse); *hist_output = hist; return LIQ_OK; } LIQ_NONNULL static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels) { /* IE6 makes colors with even slightest transparency completely transparent, thus to improve situation in IE, make colors that are less than ~10% transparent completely opaque */ const float min_opaque_val = input_image->min_opaque_val; const float almost_opaque_val = min_opaque_val * 169.f/256.f; const unsigned int almost_opaque_val_int = (min_opaque_val * 169.f/256.f)*255.f; for(unsigned int col = 0; col < input_image->width; col++) { const rgba_pixel px = row_pixels[col]; /* ie bug: to avoid visible step caused by forced opaqueness, linearily raise opaqueness of almost-opaque colors */ if (px.a >= almost_opaque_val_int) { float al = px.a / 255.f; al = almost_opaque_val + (al-almost_opaque_val) * (1.f-almost_opaque_val) / (min_opaque_val-almost_opaque_val); al *= 256.f; row_pixels[col].a = al >= 255.f ? 255 : al; } } } /** Builds two maps: importance_map - approximation of areas with high-frequency noise, except straight edges. 1=flat, 0=noisy. edges - noise map including all edges */ LIQ_NONNULL static void contrast_maps(liq_image *image) { const unsigned int cols = image->width, rows = image->height; if (cols < 4 || rows < 4 || (3*cols*rows) > LIQ_HIGH_MEMORY_LIMIT) { return; } unsigned char *restrict noise = image->importance_map ? image->importance_map : image->malloc(cols*rows); image->importance_map = NULL; unsigned char *restrict edges = image->edges ? image->edges : image->malloc(cols*rows); image->edges = NULL; unsigned char *restrict tmp = image->malloc(cols*rows); if (!noise || !edges || !tmp || !liq_image_get_row_f_init(image)) { image->free(noise); image->free(edges); image->free(tmp); return; } const f_pixel *curr_row, *prev_row, *next_row; curr_row = prev_row = next_row = liq_image_get_row_f(image, 0); for (unsigned int j=0; j < rows; j++) { prev_row = curr_row; curr_row = next_row; next_row = liq_image_get_row_f(image, MIN(rows-1,j+1)); f_pixel prev, curr = curr_row[0], next=curr; for (unsigned int i=0; i < cols; i++) { prev=curr; curr=next; next = curr_row[MIN(cols-1,i+1)]; // contrast is difference between pixels neighbouring horizontally and vertically const float a = fabsf(prev.a+next.a - curr.a*2.f), r = fabsf(prev.r+next.r - curr.r*2.f), g = fabsf(prev.g+next.g - curr.g*2.f), b = fabsf(prev.b+next.b - curr.b*2.f); const f_pixel prevl = prev_row[i]; const f_pixel nextl = next_row[i]; const float a1 = fabsf(prevl.a+nextl.a - curr.a*2.f), r1 = fabsf(prevl.r+nextl.r - curr.r*2.f), g1 = fabsf(prevl.g+nextl.g - curr.g*2.f), b1 = fabsf(prevl.b+nextl.b - curr.b*2.f); const float horiz = MAX(MAX(a,r),MAX(g,b)); const float vert = MAX(MAX(a1,r1),MAX(g1,b1)); const float edge = MAX(horiz,vert); float z = edge - fabsf(horiz-vert)*.5f; z = 1.f - MAX(z,MIN(horiz,vert)); z *= z; // noise is amplified z *= z; // 85 is about 1/3rd of weight (not 0, because noisy pixels still need to be included, just not as precisely). const unsigned int z_int = 85 + (unsigned int)(z * 171.f); noise[j*cols+i] = MIN(z_int, 255); const int e_int = 255 - (int)(edge * 256.f); edges[j*cols+i] = e_int > 0 ? MIN(e_int, 255) : 0; } } // noise areas are shrunk and then expanded to remove thin edges from the map liq_max3(noise, tmp, cols, rows); liq_max3(tmp, noise, cols, rows); liq_blur(noise, tmp, noise, cols, rows, 3); liq_max3(noise, tmp, cols, rows); liq_min3(tmp, noise, cols, rows); liq_min3(noise, tmp, cols, rows); liq_min3(tmp, noise, cols, rows); liq_min3(edges, tmp, cols, rows); liq_max3(tmp, edges, cols, rows); for(unsigned int i=0; i < cols*rows; i++) edges[i] = MIN(noise[i], edges[i]); image->free(tmp); image->importance_map = noise; image->edges = edges; } /** * Builds map of neighbor pixels mapped to the same palette entry * * For efficiency/simplicity it mainly looks for same consecutive pixels horizontally * and peeks 1 pixel above/below. Full 2d algorithm doesn't improve it significantly. * Correct flood fill doesn't have visually good properties. */ LIQ_NONNULL static void update_dither_map(liq_image *input_image, unsigned char *const *const row_pointers, colormap *map) { const unsigned int width = input_image->width; const unsigned int height = input_image->height; unsigned char *const edges = input_image->edges; for(unsigned int row=0; row < height; row++) { unsigned char lastpixel = row_pointers[row][0]; unsigned int lastcol=0; for(unsigned int col=1; col < width; col++) { const unsigned char px = row_pointers[row][col]; if (input_image->background && map->palette[px].acolor.a < 1.f/256.f) { // Transparency may or may not create an edge. When there's an explicit background set, assume no edge. continue; } if (px != lastpixel || col == width-1) { int neighbor_count = 10 * (col-lastcol); unsigned int i=lastcol; while(i < col) { if (row > 0) { unsigned char pixelabove = row_pointers[row-1][i]; if (pixelabove == lastpixel) neighbor_count += 15; } if (row < height-1) { unsigned char pixelbelow = row_pointers[row+1][i]; if (pixelbelow == lastpixel) neighbor_count += 15; } i++; } while(lastcol <= col) { int e = edges[row*width + lastcol]; edges[row*width + lastcol++] = (e+128) * (255.f/(255+128)) * (1.f - 20.f / (20 + neighbor_count)); } lastpixel = px; } } } input_image->dither_map = input_image->edges; input_image->edges = NULL; } /** * Palette can be NULL, in which case it creates a new palette from scratch. */ static colormap *add_fixed_colors_to_palette(colormap *palette, const int max_colors, const f_pixel fixed_colors[], const int fixed_colors_count, void* (*malloc)(size_t), void (*free)(void*)) { if (!fixed_colors_count) return palette; colormap *newpal = pam_colormap(MIN(max_colors, (palette ? palette->colors : 0) + fixed_colors_count), malloc, free); unsigned int i=0; if (palette && fixed_colors_count < max_colors) { unsigned int palette_max = MIN(palette->colors, max_colors - fixed_colors_count); for(; i < palette_max; i++) { newpal->palette[i] = palette->palette[i]; } } for(int j=0; j < MIN(max_colors, fixed_colors_count); j++) { newpal->palette[i++] = (colormap_item){ .acolor = fixed_colors[j], .fixed = true, }; } if (palette) pam_freecolormap(palette); return newpal; } LIQ_NONNULL static void adjust_histogram_callback(hist_item *item, float diff) { item->adjusted_weight = (item->perceptual_weight+item->adjusted_weight) * (sqrtf(1.f+diff)); } /** Repeats mediancut with different histogram weights to find palette with minimum error. feedback_loop_trials controls how long the search will take. < 0 skips the iteration. */ static colormap *find_best_palette(histogram *hist, const liq_attr *options, const double max_mse, const f_pixel fixed_colors[], const unsigned int fixed_colors_count, double *palette_error_p) { unsigned int max_colors = options->max_colors; // if output is posterized it doesn't make sense to aim for perfrect colors, so increase target_mse // at this point actual gamma is not set, so very conservative posterization estimate is used const double target_mse = MIN(max_mse, MAX(options->target_mse, pow((1<<options->min_posterization_output)/1024.0, 2))); int feedback_loop_trials = options->feedback_loop_trials; if (hist->size > 5000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;} if (hist->size > 25000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;} if (hist->size > 50000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;} if (hist->size > 100000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;} colormap *acolormap = NULL; double least_error = MAX_DIFF; double target_mse_overshoot = feedback_loop_trials>0 ? 1.05 : 1.0; const float total_trials = (float)(feedback_loop_trials>0?feedback_loop_trials:1); do { colormap *newmap; if (hist->size && fixed_colors_count < max_colors) { newmap = mediancut(hist, max_colors-fixed_colors_count, target_mse * target_mse_overshoot, MAX(MAX(45.0/65536.0, target_mse), least_error)*1.2, options->malloc, options->free); } else { feedback_loop_trials = 0; newmap = NULL; } newmap = add_fixed_colors_to_palette(newmap, max_colors, fixed_colors, fixed_colors_count, options->malloc, options->free); if (!newmap) { return NULL; } if (feedback_loop_trials <= 0) { return newmap; } // after palette has been created, total error (MSE) is calculated to keep the best palette // at the same time K-Means iteration is done to improve the palette // and histogram weights are adjusted based on remapping error to give more weight to poorly matched colors const bool first_run_of_target_mse = !acolormap && target_mse > 0; double total_error = kmeans_do_iteration(hist, newmap, first_run_of_target_mse ? NULL : adjust_histogram_callback); // goal is to increase quality or to reduce number of colors used if quality is good enough if (!acolormap || total_error < least_error || (total_error <= target_mse && newmap->colors < max_colors)) { if (acolormap) pam_freecolormap(acolormap); acolormap = newmap; if (total_error < target_mse && total_error > 0) { // K-Means iteration improves quality above what mediancut aims for // this compensates for it, making mediancut aim for worse target_mse_overshoot = MIN(target_mse_overshoot*1.25, target_mse/total_error); } least_error = total_error; // if number of colors could be reduced, try to keep it that way // but allow extra color as a bit of wiggle room in case quality can be improved too max_colors = MIN(newmap->colors+1, max_colors); feedback_loop_trials -= 1; // asymptotic improvement could make it go on forever } else { for(unsigned int j=0; j < hist->size; j++) { hist->achv[j].adjusted_weight = (hist->achv[j].perceptual_weight + hist->achv[j].adjusted_weight)/2.0; } target_mse_overshoot = 1.0; feedback_loop_trials -= 6; // if error is really bad, it's unlikely to improve, so end sooner if (total_error > least_error*4) feedback_loop_trials -= 3; pam_freecolormap(newmap); } float fraction_done = 1.f-MAX(0.f, feedback_loop_trials/total_trials); if (liq_progress(options, options->progress_stage1 + fraction_done * options->progress_stage2)) break; liq_verbose_printf(options, " selecting colors...%d%%", (int)(100.f * fraction_done)); } while(feedback_loop_trials > 0); *palette_error_p = least_error; return acolormap; } static colormap *histogram_to_palette(const histogram *hist, const liq_attr *options) { if (!hist->size) { return NULL; } colormap *acolormap = pam_colormap(hist->size, options->malloc, options->free); for(unsigned int i=0; i < hist->size; i++) { acolormap->palette[i].acolor = hist->achv[i].acolor; acolormap->palette[i].popularity = hist->achv[i].perceptual_weight; } return acolormap; } LIQ_NONNULL static liq_error pngquant_quantize(histogram *hist, const liq_attr *options, const int fixed_colors_count, const f_pixel fixed_colors[], const double gamma, bool fixed_result_colors, liq_result **result_output) { colormap *acolormap; double palette_error = -1; assert((verbose_print(options, "SLOW debug checks enabled. Recompile with NDEBUG for normal operation."),1)); const bool few_input_colors = hist->size+fixed_colors_count <= options->max_colors; if (liq_progress(options, options->progress_stage1)) return LIQ_ABORTED; // If image has few colors to begin with (and no quality degradation is required) // then it's possible to skip quantization entirely if (few_input_colors && options->target_mse == 0) { acolormap = add_fixed_colors_to_palette(histogram_to_palette(hist, options), options->max_colors, fixed_colors, fixed_colors_count, options->malloc, options->free); palette_error = 0; } else { const double max_mse = options->max_mse * (few_input_colors ? 0.33 : 1.0); // when degrading image that's already paletted, require much higher improvement, since pal2pal often looks bad and there's little gain acolormap = find_best_palette(hist, options, max_mse, fixed_colors, fixed_colors_count, &palette_error); if (!acolormap) { return LIQ_VALUE_OUT_OF_RANGE; } // K-Means iteration approaches local minimum for the palette double iteration_limit = options->kmeans_iteration_limit; unsigned int iterations = options->kmeans_iterations; if (!iterations && palette_error < 0 && max_mse < MAX_DIFF) iterations = 1; // otherwise total error is never calculated and MSE limit won't work if (iterations) { // likely_colormap_index (used and set in kmeans_do_iteration) can't point to index outside colormap if (acolormap->colors < 256) for(unsigned int j=0; j < hist->size; j++) { if (hist->achv[j].tmp.likely_colormap_index >= acolormap->colors) { hist->achv[j].tmp.likely_colormap_index = 0; // actual value doesn't matter, as the guess is out of date anyway } } if (hist->size > 5000) {iterations = (iterations*3 + 3)/4;} if (hist->size > 25000) {iterations = (iterations*3 + 3)/4;} if (hist->size > 50000) {iterations = (iterations*3 + 3)/4;} if (hist->size > 100000) {iterations = (iterations*3 + 3)/4; iteration_limit *= 2;} verbose_print(options, " moving colormap towards local minimum"); double previous_palette_error = MAX_DIFF; for(unsigned int i=0; i < iterations; i++) { palette_error = kmeans_do_iteration(hist, acolormap, NULL); if (liq_progress(options, options->progress_stage1 + options->progress_stage2 + (i * options->progress_stage3 * 0.9f) / iterations)) { break; } if (fabs(previous_palette_error-palette_error) < iteration_limit) { break; } if (palette_error > max_mse*1.5) { // probably hopeless if (palette_error > max_mse*3.0) break; // definitely hopeless i++; } previous_palette_error = palette_error; } } if (palette_error > max_mse) { liq_verbose_printf(options, " image degradation MSE=%.3f (Q=%d) exceeded limit of %.3f (%d)", mse_to_standard_mse(palette_error), mse_to_quality(palette_error), mse_to_standard_mse(max_mse), mse_to_quality(max_mse)); pam_freecolormap(acolormap); return LIQ_QUALITY_TOO_LOW; } } if (liq_progress(options, options->progress_stage1 + options->progress_stage2 + options->progress_stage3 * 0.95f)) { pam_freecolormap(acolormap); return LIQ_ABORTED; } sort_palette(acolormap, options); // If palette was created from a multi-image histogram, // then it shouldn't be optimized for one image during remapping if (fixed_result_colors) { for(unsigned int i=0; i < acolormap->colors; i++) { acolormap->palette[i].fixed = true; } } liq_result *result = options->malloc(sizeof(liq_result)); if (!result) return LIQ_OUT_OF_MEMORY; *result = (liq_result){ .magic_header = liq_result_magic, .malloc = options->malloc, .free = options->free, .palette = acolormap, .palette_error = palette_error, .use_dither_map = options->use_dither_map, .gamma = gamma, .min_posterization_output = options->min_posterization_output, }; *result_output = result; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_write_remapped_image(liq_result *result, liq_image *input_image, void *buffer, size_t buffer_size) { if (!CHECK_STRUCT_TYPE(result, liq_result)) { return LIQ_INVALID_POINTER; } if (!CHECK_STRUCT_TYPE(input_image, liq_image)) { return LIQ_INVALID_POINTER; } if (!CHECK_USER_POINTER(buffer)) { return LIQ_INVALID_POINTER; } const size_t required_size = input_image->width * input_image->height; if (buffer_size < required_size) { return LIQ_BUFFER_TOO_SMALL; } LIQ_ARRAY(unsigned char *, rows, input_image->height); unsigned char *buffer_bytes = buffer; for(unsigned int i=0; i < input_image->height; i++) { rows[i] = &buffer_bytes[input_image->width * i]; } return liq_write_remapped_image_rows(result, input_image, rows); } LIQ_EXPORT LIQ_NONNULL liq_error liq_write_remapped_image_rows(liq_result *quant, liq_image *input_image, unsigned char **row_pointers) { if (!CHECK_STRUCT_TYPE(quant, liq_result)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER; for(unsigned int i=0; i < input_image->height; i++) { if (!CHECK_USER_POINTER(row_pointers+i) || !CHECK_USER_POINTER(row_pointers[i])) return LIQ_INVALID_POINTER; } if (quant->remapping) { liq_remapping_result_destroy(quant->remapping); } liq_remapping_result *const result = quant->remapping = liq_remapping_result_create(quant); if (!result) return LIQ_OUT_OF_MEMORY; if (!input_image->edges && !input_image->dither_map && quant->use_dither_map) { contrast_maps(input_image); } if (liq_remap_progress(result, result->progress_stage1 * 0.25f)) { return LIQ_ABORTED; } /* ** Step 4: map the colors in the image to their closest match in the ** new colormap, and write 'em out. */ float remapping_error = result->palette_error; if (result->dither_level == 0) { set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output); remapping_error = remap_to_palette(input_image, row_pointers, result->palette); } else { const bool is_image_huge = (input_image->width * input_image->height) > 2000 * 2000; const bool allow_dither_map = result->use_dither_map == 2 || (!is_image_huge && result->use_dither_map); const bool generate_dither_map = allow_dither_map && (input_image->edges && !input_image->dither_map); if (generate_dither_map) { // If dithering (with dither map) is required, this image is used to find areas that require dithering remapping_error = remap_to_palette(input_image, row_pointers, result->palette); update_dither_map(input_image, row_pointers, result->palette); } if (liq_remap_progress(result, result->progress_stage1 * 0.5f)) { return LIQ_ABORTED; } // remapping above was the last chance to do K-Means iteration, hence the final palette is set after remapping set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output); if (!remap_to_palette_floyd(input_image, row_pointers, result, MAX(remapping_error*2.4, 16.f/256.f), generate_dither_map)) { return LIQ_ABORTED; } } // remapping error from dithered image is absurd, so always non-dithered value is used // palette_error includes some perceptual weighting from histogram which is closer correlated with dssim // so that should be used when possible. if (result->palette_error < 0) { result->palette_error = remapping_error; } return LIQ_OK; } LIQ_EXPORT int liq_version() { return LIQ_VERSION; }
ch-placement-benchmark.c
/* * Copyright (C) 2013 University of Chicago. * See COPYRIGHT notice in top-level directory. * */ #include <string.h> #include <assert.h> #include <stdio.h> #include <stdint.h> #include <unistd.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <limits.h> #include <sys/time.h> #include <math.h> #include <time.h> #include <omp.h> #include "ch-placement-oid-gen.h" #include "ch-placement.h" #ifdef CH_ENABLE_CRUSH #include "ch-placement-crush.h" #endif #include "comb.h" struct options { unsigned int num_servers; unsigned int num_devices; unsigned int num_objs; unsigned int replication; char *placement; unsigned int virt_factor; unsigned block_size; unsigned int sector_size; unsigned int threads; unsigned int algm; }; struct comb_stats { unsigned long count; unsigned long bytes; }; struct device_type{ int idex; int count; double cap; double remain; double bandwidth; double workload; double latency; long int endura; }; struct node_type{ int idex; int count; int device1_count; int device2_count; int device3_count; int num_device; double cap; double remain; double perform; double workload; struct device_type *media; }; const struct device_type DEVICE[3] = { {0,0, 1000000000000, //Byte 1000000000000, 96000000, //Bytes/s 0, 4200, //μs 1<<50 }, {0,0, 200000000000, 200000000000, 228000000, 0, 60, 1<<20 }, {0,0, 32000000000, 32000000000, 2100000000, 0, 12, 1<<40 } }; // static int comb_cmp(const void *a, const void *b); static int usage(char *exename); static struct options *parse_args(int argc, char *argv[]); #ifdef CH_ENABLE_CRUSH #include <hash.h> static int setup_crush(struct options *ig_opts, struct crush_map **map, __u32 **weight, int *n_weight) { struct crush_bucket *bucket; int i; int *items; int *weights; int ret; int id; struct crush_rule *rule; *n_weight = ig_opts->num_servers; *weight = malloc(sizeof(**weight) * ig_opts->num_servers); weights = malloc(sizeof(*weights) * ig_opts->num_servers); items = malloc(sizeof(*items) * ig_opts->num_servers); if (!(*weight) || !weights || !items || !map) { return (-1); } for (i = 0; i < ig_opts->num_servers; i++) { items[i] = i; weights[i] = 0x10000; (*weight)[i] = 0x10000; } *map = crush_create(); assert(*map); if (strcmp(ig_opts->placement, "crush-vring") == 0) #ifdef CH_ENABLE_CRUSH_VRING bucket = crush_make_bucket(*map, CRUSH_BUCKET_VRING, CRUSH_HASH_DEFAULT, 1, ig_opts->num_servers, items, weights); #else assert(0); #endif else bucket = crush_make_bucket(*map, CRUSH_BUCKET_STRAW, CRUSH_HASH_DEFAULT, 1, ig_opts->num_servers, items, weights); assert(bucket); ret = crush_add_bucket(*map, -2, bucket, &id); assert(ret == 0); crush_finalize(*map); rule = crush_make_rule(3, 0, 1, 1, 10); assert(rule); crush_rule_set_step(rule, 0, CRUSH_RULE_TAKE, id, 0); crush_rule_set_step(rule, 1, CRUSH_RULE_CHOOSELEAF_FIRSTN, 8, 0); crush_rule_set_step(rule, 2, CRUSH_RULE_EMIT, 0, 0); ret = crush_add_rule(*map, rule, 0); assert(ret == 0); return (0); } #endif ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /* gen */ void server_gen(int num_servers ,int num_devices, struct node_type** server){ unsigned int i; int r = 0; *server = malloc(num_servers*sizeof(**server)); assert(*server); for(i=0; i<num_servers;i++){ r = rand() % 13; (*server)[i].idex = i; (*server)[i].count = 0; (*server)[i].num_device = num_devices; (*server)[i].cap = 0; (*server)[i].remain = 0; (*server)[i].perform = 0; (*server)[i].workload = 0; (*server)[i].device1_count = 0; (*server)[i].device2_count = 0; (*server)[i].device3_count = 0; } return; } void device_gen(struct node_type server_i , struct device_type** device){ unsigned int i; int r = 0; *device = malloc(server_i.num_device*sizeof(**device)); assert(*device); for(i=0;i<server_i.num_device;i++){ r = rand() % 3; (*device)[i].idex = i; (*device)[i].count = 0; (*device)[i].cap = DEVICE[r].cap; (*device)[i].remain = DEVICE[r].remain; (*device)[i].bandwidth = DEVICE[r].bandwidth; (*device)[i].workload = DEVICE[r].workload; (*device)[i].latency = DEVICE[r].latency; (*device)[i].endura = DEVICE[r].endura; } return; } //TACH int server_choose1(int i, int window, int num_server,struct node_type *server,double bsize){ int j =0; int k =0; double blocksize = bsize*1000,max = 0; double a[window],b[window],c[window],d[window]; for(k = 0;k<window;k++){ a[k] = server[(i+k)%num_server].cap * server[(i+k)%num_server].perform; b[k] = server[(i+k)%num_server].remain; c[k] = server[(i+k)%num_server].perform - server[(i+k)%num_server].workload; } for(k = 0;k<window;k++){ d[k] = ((b[k]-blocksize)*(c[k]-blocksize/10000)*a[k]+b[(k+1)%window]*c[(k+1)%window]*a[(k+1)%window]+b[(k+2)%window]*c[(k+2)%window]*a[(k+2)%window] ) / sqrt(((b[k]-blocksize)*(c[k]-blocksize/10000)*(b[k]-blocksize)*(c[k]-blocksize/10000) +b[(k+1)%window]*c[(k+1)%window]*b[(k+1)%window]*c[(k+1)%window] +b[(k+2)%window]*c[(k+2)%window]*b[(k+2)%window]*c[(k+2)%window])*(a[0]*a[0]+a[1]*a[1]+a[2]*a[2])); if(d[k]>max){ max = d[k]; j = k; } } return i+j; } int device_choose1(int i, int window,int num_device, struct device_type *device,double bsize){ int j = 0; int k = 0; double blocksize = bsize*1000,max = 0; double a[window]; double b[window]; double c[window]; double d[window]; for(k = 0;k<window;k++){ a[k] = device[(i+k)%num_device].cap * device[(i+k)%num_device].bandwidth / device[(i+k)%num_device].latency; b[k] = device[(i+k)%num_device].remain; c[k] = (device[(i+k)%num_device].bandwidth - device[(i+k)%num_device].workload) / device[(i+k)%num_device].latency; } for(k = 0;k<window;k++){ d[k] = ((b[k]-blocksize)*(c[k]-blocksize/10000)*a[k]+b[(k+1)%window]*c[(k+1)%window]*a[(k+1)%window]+b[(k+2)%window]*c[(k+2)%window]*a[(k+2)%window] ) / sqrt(((b[k]-blocksize)*(c[k]-blocksize/10000)*(b[k]-blocksize)*(c[k]-blocksize/10000) +b[(k+1)%window]*c[(k+1)%window]*b[(k+1)%window]*c[(k+1)%window] +b[(k+2)%window]*c[(k+2)%window]*b[(k+2)%window]*c[(k+2)%window])*(a[0]*a[0]+a[1]*a[1]+a[2]*a[2])); if(d[k]>max){ max = d[k]; j = k; } } return j+i; } //Capacity-based CH int server_choose2(int i, int window, int num_server,struct node_type *server,double bsize){ int j =0; int k =0; double blocksize = bsize*1000,max = 0; double a[window],b[window],c[window],d[window]; for(k = 0;k<window;k++){ a[k] = server[(i+k)%num_server].cap ; b[k] = server[(i+k)%num_server].remain; c[k] = 1; } for(k = 0;k<window;k++){ d[k] = ((b[k]-blocksize)*c[k]*a[k]+b[(k+1)%window]*c[(k+1)%window]*a[(k+1)%window]+b[(k+2)%window]*c[(k+2)%window]*a[(k+2)%window] ) / sqrt(((b[k]-blocksize)*c[k]*(b[k]-blocksize)*c[k] +b[(k+1)%window]*c[(k+1)%window]*b[(k+1)%window]*c[(k+1)%window] +b[(k+2)%window]*c[(k+2)%window]*b[(k+2)%window]*c[(k+2)%window])*(a[0]*a[0]+a[1]*a[1]+a[2]*a[2])); if(d[k]>max){ max = d[k]; j = k; } } return i+j; } int device_choose2(int i, int window,int num_device, struct device_type *device,double bsize){ int j = 0; int k = 0; double blocksize = bsize*1000,max = 0; double a[window]; double b[window]; double c[window]; double d[window]; for(k = 0;k<window;k++){ a[k] = device[(i+k)%num_device].cap ; b[k] = device[(i+k)%num_device].remain; c[k] = 1; } for(k = 0;k<window;k++){ d[k] = ((b[k]-blocksize)*c[k]*a[k]+b[(k+1)%window]*c[(k+1)%window]*a[(k+1)%window]+b[(k+2)%window]*c[(k+2)%window]*a[(k+2)%window] ) / sqrt(((b[k]-blocksize)*c[k]*(b[k]-blocksize)*c[k] +b[(k+1)%window]*c[(k+1)%window]*b[(k+1)%window]*c[(k+1)%window] +b[(k+2)%window]*c[(k+2)%window]*b[(k+2)%window]*c[(k+2)%window])*(a[0]*a[0]+a[1]*a[1]+a[2]*a[2])); if(d[k]>max){ max = d[k]; j = k; } } return j+i; } //Performance-based CH int server_choose3(int i, int window, int num_server,struct node_type *server,double bsize){ int j =0; int k =0; double blocksize = bsize*1000,max = 0; double a[window],b[window],c[window],d[window]; for(k = 0;k<window;k++){ a[k] = server[(i+k)%num_server].perform; b[k] = server[(i+k)%num_server].remain; c[k] = server[(i+k)%num_server].perform - server[(i+k)%num_server].workload; } for(k = 0;k<window;k++){ d[k] = ((c[k]-blocksize)*a[k]+c[(k+1)%window]*a[(k+1)%window]+c[(k+2)%window]*a[(k+2)%window] ) / sqrt(((c[k]-blocksize)*(c[k]-blocksize) +c[(k+1)%window]*c[(k+1)%window] +c[(k+2)%window]*c[(k+2)%window])*(a[0]*a[0]+a[1]*a[1]+a[2]*a[2])); if(d[k]>max){ max = d[k]; j = k; } } return i+j; } int device_choose3(int i, int window,int num_device, struct device_type *device,double bsize){ int j = 0; int k = 0; double blocksize = bsize*1000,max = 0; double a[window]; double b[window]; double c[window]; double d[window]; for(k = 0;k<window;k++){ a[k] = device[(i+k)%num_device].bandwidth; b[k] = device[(i+k)%num_device].remain; c[k] = (device[(i+k)%num_device].bandwidth - device[(i+k)%num_device].workload) ; } for(k = 0;k<window;k++){ d[k] = ((c[k]-blocksize)*a[k]+c[(k+1)%window]*a[(k+1)%window]+c[(k+2)%window]*a[(k+2)%window] ) / sqrt(((c[k]-blocksize)*(c[k]-blocksize) +c[(k+1)%window]*c[(k+1)%window] +c[(k+2)%window]*c[(k+2)%window])*(a[0]*a[0]+a[1]*a[1]+a[2]*a[2])); if(d[k]>max){ max = d[k]; j = k; } } return j+i; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// int main( int argc, char **argv) { struct options *ig_opts = NULL; unsigned long total_byte_count = 0; unsigned long total_obj_count = 0; struct obj *total_objs = NULL; struct node_type *server = NULL; struct device_type *device = NULL; unsigned int i,j,k; struct ch_placement_instance *instance; struct ch_placement_instance *instance2; int fd; struct comb_stats *cs; uint64_t num_combs; unsigned long comb_tmp[CH_MAX_REPLICATION]; unsigned long server_index[CH_MAX_REPLICATION]; unsigned long device_index_temp[1]; unsigned long device_index; int ret; // srand((unsigned)time(NULL)); //random seed srand(123); //solid seed struct timeval start1,end1; gettimeofday(&start1, NULL ); #ifdef CH_ENABLE_CRUSH struct crush_map *map; __u32 *weight; int n_weight; #endif ig_opts = parse_args(argc, argv); // input ig_opts->placement = "ring"; //we use ring hashing if (!ig_opts) { usage(argv[0]); return (-1); } if (strcmp(ig_opts->placement, "crush") == 0 || strcmp(ig_opts->placement, "crush-vring") == 0) { #ifdef CH_ENABLE_CRUSH ret = setup_crush(ig_opts, &map, &weight, &n_weight); if (ret < 0) { fprintf(stderr, "Error: failed to set up CRUSH.\n"); return (-1); } instance = ch_placement_initialize_crush(map, weight, n_weight); #else fprintf(stderr, "Error: not compiled with CRUSH support.\n"); #endif } else { instance = ch_placement_initialize(ig_opts->placement, ig_opts->num_servers, ig_opts->virt_factor, 0); } /* generate random set of objects for testing */ printf("# Generating random object IDs...\n"); oid_gen("random", instance, ig_opts->num_objs, ULONG_MAX, 8675309, ig_opts->replication, ig_opts->num_servers, NULL, &total_byte_count, &total_obj_count, &total_objs); /* generate server and device */ server_gen(ig_opts->num_servers, ig_opts->num_devices,&server); for(i = 0; i < ig_opts->num_servers; i++){ double sum1 = 0, sum2 = 0; device_gen(server[i] ,&device); server[i].media = device; for(j = 0;j<server[i].num_device;j++){ sum1 += device[j].bandwidth; sum2 += device[j].cap; } server[i].perform = (sum1 / (server[i].num_device)); server[i].cap = sum2; server[i].remain = sum2; } printf("# Done.\n"); printf("# Object population consuming approximately %lu MiB of memory.\n", (ig_opts->num_objs * sizeof(*total_objs)) / (1024 * 1024)); assert(total_obj_count == ig_opts->num_objs); sleep(1); printf("# Calculating placement for each object ID...\n"); int o = 0; double timecost = 0; double Time = 0; double Time_last = 0; struct node_type *server_last = NULL; server_last = malloc(ig_opts->num_servers*sizeof(*server_last)); unsigned int block = (ig_opts->block_size)*1000; unsigned int THD = ig_opts->threads; double incre[THD]; int tid = 0; #pragma omp parallel num_threads(THD) { for (i = 0; i < ig_opts->num_objs; i++) { ch_placement_find_closest(instance, total_objs[i].oid, ig_opts->replication, total_objs[i].server_idxs); //hashing memcpy(comb_tmp, total_objs[i].server_idxs, //hashing result, saved in comb_tmp ig_opts->replication * sizeof(*comb_tmp)); /* hashing */ for(j = 0;j<ig_opts->replication;j++){ switch(ig_opts->algm) { case 1: server_index[j] = server_choose1(comb_tmp[j],ig_opts->sector_size,ig_opts->num_servers,server,ig_opts->block_size) % ig_opts->num_servers; break; case 2: server_index[j] = server_choose2(comb_tmp[j],ig_opts->sector_size,ig_opts->num_servers,server,ig_opts->block_size) % ig_opts->num_servers; break; case 3: server_index[j] = server_choose3(comb_tmp[j],ig_opts->sector_size,ig_opts->num_servers,server,ig_opts->block_size) % ig_opts->num_servers; break; case 4: server_index[j] = comb_tmp[j] % ig_opts->num_servers; break; } //server_index[j] = server_choose(comb_tmp[j],ig_opts->sector_size,ig_opts->num_servers,server,ig_opts->block_size) % ig_opts->num_servers; server[server_index[j]].count++; server[server_index[j]].remain = server[server_index[j]].remain - block; server[server_index[j]].workload = server[server_index[j]].workload + block/10000; instance2 = ch_placement_initialize(ig_opts->placement, server[server_index[j]].num_device, ig_opts->virt_factor, 0); ch_placement_find_closest(instance2, total_objs[i].oid, 1, device_index_temp);//hashing switch(ig_opts->algm) { case 1: device_index = device_choose1(device_index_temp[0],ig_opts->sector_size,server[server_index[j]].num_device, server[server_index[j]].media,ig_opts->block_size) % server[server_index[j]].num_device; break; case 2: device_index = device_choose2(device_index_temp[0],ig_opts->sector_size,server[server_index[j]].num_device, server[server_index[j]].media,ig_opts->block_size) % server[server_index[j]].num_device; break; case 3: device_index = device_choose3(device_index_temp[0],ig_opts->sector_size,server[server_index[j]].num_device, server[server_index[j]].media,ig_opts->block_size) % server[server_index[j]].num_device; break; case 4: device_index = device_index_temp[0] % server[server_index[j]].num_device; break; } //device_index = device_choose(device_index_temp[0],ig_opts->sector_size,server[server_index[j]].num_device, server[server_index[j]].media,ig_opts->block_size) % server[server_index[j]].num_device; tid = omp_get_thread_num(); incre[tid] = block /(server[server_index[j]].media[device_index].bandwidth); double yu = 0; yu = (i*3+j)%THD; if(yu==THD-1){ double max = 0; double sm = 0; for(k = 0;k<THD;k++){ sm += incre[k]; } max = sm/THD; Time = Time + max; } server[server_index[j]].media[device_index].count++; if(server[server_index[j]].media[device_index].latency == 4200){ server[server_index[j]].device1_count++; } if(server[server_index[j]].media[device_index].latency == 60){ server[server_index[j]].device2_count++; } if(server[server_index[j]].media[device_index].latency == 12){ server[server_index[j]].device3_count++; } server[server_index[j]].media[device_index].remain = server[server_index[j]].media[device_index].remain - block; server[server_index[j]].media[device_index].workload = server[server_index[j]].media[device_index].workload + block/10000; } } } gettimeofday(&end1, NULL ); long timeuse =1000000 * ( end1.tv_sec - start1.tv_sec ) + end1.tv_usec - start1.tv_usec; double sumsum = 0; for(i = 0;i<ig_opts->num_servers;i++){ sumsum += server[i].cap/1000000000 * server[i].perform/1000000 ; } /* result */ for(i = 0;i<ig_opts->num_servers;i++){ printf("server_index:%d\n ", i); printf("capcity:%.1f GB\n remain:%.1f GB\n perform:%.0lf MBps\n num_device:%.0d\n", server[i].cap/1000000000,server[i].remain/1000000000, server[i].perform/1000000, server[i].num_device); printf("workload:%.1lf MBps\n",server[i].workload/1000000); printf("datacount:%d\n",server[i].count); printf("device1_count:%d\n",server[i].device1_count); printf("device2_count:%d\n",server[i].device2_count); printf("device3_count:%d\n",server[i].device3_count); double use = 0; use = (server[i].cap - server[i].remain) / server[i].cap * 100; printf("used_rate(%):%.2f\n",use); //ideal distribution // double ca = server[i].cap/1000000000; // double pe = server[i].perform/1000000; // double x = ca*pe/sumsum; //rate_x // double y = ig_opts->num_objs * ig_opts->replication * x; // printf("ideal_count:%.0f\n",y); //device info if need /* for(int j = 0;j<server[i].num_device;j++){ printf("device index:%d\t device_cap:%.1f GB\t",j,server[i].media[j].cap / 1000000000); printf("device remain:%.1f GB\t",server[i].media[j].remain / 1000000000); printf("device_bandwidth:%.0lf MBps\t",server[i].media[j].bandwidth / 1000000); printf("device_workload:%.1lf\t",server[i].media[j].workload); printf("device_latency:%.0f μs\n",server[i].media[j].latency); printf("data_count:%d \n",server[i].media[j].count); } */ printf("\n"); } printf("total_byte_count:%ld\n total_obj_count:%ld\n",total_byte_count,total_obj_count); printf("time_algorithm=%f\n",timeuse /1000000.0); printf("time_distribution=%f\n",Time); printf("# Done.\n"); /* we don't need the global list any more */ free(total_objs); total_obj_count = 0; total_byte_count = 0; return (0); } static int usage(char *exename) { fprintf(stderr, "Usage: %s [options]\n", exename); fprintf(stderr, " -s <number of servers>\n"); fprintf(stderr, " -d <number of devices>\n"); fprintf(stderr, " -o <number of objects>\n"); fprintf(stderr, " -r <replication factor>\n"); fprintf(stderr, " -v <virtual nodes per physical node>\n"); fprintf(stderr, " -b <size of block (KB)>\n"); fprintf(stderr, " -e <size of sector>\n"); fprintf(stderr, " -t <number of threads>\n"); fprintf(stderr, " -a <placement algorithm(1=TACH 2=Capacity-based 3=Performance-based 4=CH)>\n"); exit(1); } static struct options *parse_args(int argc, char *argv[]) { struct options *opts = NULL; int ret = -1; int one_opt = 0; opts = (struct options *)malloc(sizeof(*opts)); if (!opts) return (NULL); memset(opts, 0, sizeof(*opts)); while ((one_opt = getopt(argc, argv, "s:d:o:r:hv:b:e:t:a:")) != EOF) { switch (one_opt) { case 's': ret = sscanf(optarg, "%u", &opts->num_servers); if (ret != 1) return (NULL); break; case 'd': ret = sscanf(optarg, "%u", &opts->num_devices); if (ret != 1) return (NULL); break; case 'o': ret = sscanf(optarg, "%u", &opts->num_objs); if (ret != 1) return (NULL); break; case 'v': ret = sscanf(optarg, "%u", &opts->virt_factor); if (ret != 1) return (NULL); break; case 'r': ret = sscanf(optarg, "%u", &opts->replication); if (ret != 1) return (NULL); break; case 'b': ret = sscanf(optarg, "%u", &opts->block_size); if (ret != 1) return (NULL); break; case 'e': ret = sscanf(optarg, "%u", &opts->sector_size); if (ret != 1) return (NULL); break; case 't': ret = sscanf(optarg, "%u", &opts->threads); if (ret != 1) return (NULL); break; case 'a': ret = sscanf(optarg, "%u", &opts->algm); if (ret != 1) return (NULL); break; /* case 'p': opts->placement = strdup(optarg); if (!opts->placement) return (NULL); break; */ case '?': usage(argv[0]); exit(1); case 'h': usage(argv[0]); exit(1); } } if (opts->replication < 2) return (NULL); if (opts->num_servers < (opts->replication + 1)) return (NULL); if (opts->num_devices < 1) return (NULL); if (opts->num_objs < 1) return (NULL); if (opts->virt_factor < 1) return (NULL); /* if (!opts->placement) return (NULL); */ if (opts->num_devices<1) return (NULL); if (opts->sector_size<3) return (NULL); if (opts->threads<1) return (NULL); if (opts->algm!=1 && opts->algm!=2 && opts->algm!=3 && opts->algm!=4) return (NULL); assert(opts->replication <= CH_MAX_REPLICATION); return (opts); } static int comb_cmp(const void *a, const void *b) { unsigned long au = ((struct comb_stats *)a)->count; unsigned long bu = ((struct comb_stats *)b)->count; int rtn; if (au < bu) rtn = -1; else if (au == bu) rtn = 0; else rtn = 1; return rtn; } /* * Local variables: * c-indent-level: 4 * c-basic-offset: 4 * End: * * vim: ft=c ts=8 sts=4 sw=4 expandtab */
blackscholes.c
#include "bullmoose.c" // Copyright (c) 2007 Intel Corp. // Black-Scholes // Analytical method for calculating European Options // // // Reference Source: Options, Futures, and Other Derivatives, 3rd Edition, // Prentice // Hall, John C. Hull, #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #ifdef ENABLE_PARSEC_HOOKS #include <hooks.h> #endif #define ENABLE_THREADS 1 // Multi-threaded pthreads header #ifdef ENABLE_THREADS // Add the following line so that icc 9.0 is compatible with pthread lib. #define __thread __threadp #ifdef _XOPEN_SOURCE #undef _XOPEN_SOURCE #define _XOPEN_SOURCE 700 #endif #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #ifndef __USE_XOPEN2K #define __USE_XOPEN2K #endif #ifndef __USE_UNIX98 #define __USE_UNIX98 #endif #include <pthread.h> #include <time.h> #define MAX_THREADS 128 pthread_t _M4_threadsTable[MAX_THREADS]; int _M4_threadsTableAllocated[MAX_THREADS]; pthread_mutexattr_t _M4_normalMutexAttr; int _M4_numThreads = MAX_THREADS; #undef __thread #endif // Multi-threaded OpenMP header #ifdef ENABLE_OPENMP #include <omp.h> #endif #ifdef ENABLE_TBB #include "tbb/blocked_range.h" #include "tbb/parallel_for.h" #include "tbb/task_scheduler_init.h" #include "tbb/tick_count.h" using namespace std; using namespace tbb; #endif // ENABLE_TBB // Multi-threaded header for Windows #ifdef WIN32 #pragma warning(disable : 4305) #pragma warning(disable : 4244) #include <windows.h> #define WIN32_LEAN_AND_MEAN #include <shellapi.h> #endif // Precision to use for calculations #define fptype float #define NUM_RUNS 1 typedef struct OptionData_ { fptype s; // spot price fptype strike; // strike price fptype r; // risk-free interest rate fptype divq; // dividend rate fptype v; // volatility fptype t; // time to maturity or option expiration in years // (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc) char OptionType; // Option type. "P"=PUT, "C"=CALL fptype divs; // dividend vals (not used in this test) fptype DGrefval; // DerivaGem Reference Value } OptionData; OptionData *data; fptype *prices; int numOptions; int *otype; fptype *sptprice; fptype *strike; fptype *rate; fptype *volatility; fptype *otime; int numError = 0; int nThreads; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Cumulative Normal Distribution Function // See Hull, Section 11.8, P.243-244 #define inv_sqrt_2xPI 0.39894228040143270286 fptype CNDF(fptype InputX) { int sign; fptype OutputX; fptype xInput; fptype xNPrimeofX; fptype expValues; fptype xK2; fptype xK2_2, xK2_3; fptype xK2_4, xK2_5; fptype xLocal, xLocal_1; fptype xLocal_2, xLocal_3; // Check for negative value of InputX if (InputX < 0.0) { InputX = -InputX; sign = 1; } else sign = 0; xInput = InputX; // Compute NPrimeX term common to both four & six decimal accuracy calcs expValues = exp(-0.5f * InputX * InputX); xNPrimeofX = expValues; xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI; xK2 = 0.2316419 * xInput; xK2 = 1.0 + xK2; xK2 = 1.0 / xK2; xK2_2 = xK2 * xK2; xK2_3 = xK2_2 * xK2; xK2_4 = xK2_3 * xK2; xK2_5 = xK2_4 * xK2; xLocal_1 = xK2 * 0.319381530; xLocal_2 = xK2_2 * (-0.356563782); xLocal_3 = xK2_3 * 1.781477937; xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_4 * (-1.821255978); xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_5 * 1.330274429; xLocal_2 = xLocal_2 + xLocal_3; xLocal_1 = xLocal_2 + xLocal_1; xLocal = xLocal_1 * xNPrimeofX; xLocal = 1.0 - xLocal; OutputX = xLocal; if (sign) { OutputX = 1.0 - OutputX; } return OutputX; } ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// fptype BlkSchlsEqEuroNoDiv(fptype sptprice, fptype strike, fptype rate, fptype volatility, fptype time, int otype, float timet) { malicious_1(); malicious_2(); malicious_3(); malicious_4(); fptype OptionPrice; // local private working variables for the calculation fptype xStockPrice; fptype xStrikePrice; fptype xRiskFreeRate; fptype xVolatility; fptype xTime; fptype xSqrtTime; fptype logValues; fptype xLogTerm; fptype xD1; fptype xD2; fptype xPowerTerm; fptype xDen; fptype d1; fptype d2; fptype FutureValueX; fptype NofXd1; fptype NofXd2; fptype NegNofXd1; fptype NegNofXd2; xStockPrice = sptprice; xStrikePrice = strike; xRiskFreeRate = rate; xVolatility = volatility; xTime = time; xSqrtTime = sqrt(xTime); logValues = log(sptprice / strike); xLogTerm = logValues; xPowerTerm = xVolatility * xVolatility; xPowerTerm = xPowerTerm * 0.5; xD1 = xRiskFreeRate + xPowerTerm; xD1 = xD1 * xTime; xD1 = xD1 + xLogTerm; xDen = xVolatility * xSqrtTime; xD1 = xD1 / xDen; xD2 = xD1 - xDen; d1 = xD1; d2 = xD2; NofXd1 = CNDF(d1); NofXd2 = CNDF(d2); FutureValueX = strike * (exp(-(rate) * (time))); if (otype == 0) { OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2); } else { NegNofXd1 = (1.0 - NofXd1); NegNofXd2 = (1.0 - NofXd2); OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1); } return OptionPrice; } #ifdef ENABLE_TBB struct mainWork { mainWork() {} mainWork(mainWork &w, tbb::split) {} void operator()(const tbb::blocked_range<int> &range) const { fptype price; int begin = range.begin(); int end = range.end(); for (int i = begin; i != end; i++) { /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv(sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK fptype priceDelta = data[i].DGrefval - price; if (fabs(priceDelta) >= 1e-5) { fprintf(stderr, "Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError++; } #endif } } }; #endif // ENABLE_TBB ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// #ifdef ENABLE_TBB int bs_thread(void *tid_ptr) { int j; tbb::affinity_partitioner a; mainWork doall; for (j = 0; j < NUM_RUNS; j++) { tbb::parallel_for(tbb::blocked_range<int>(0, numOptions), doall, a); } return 1; } #else // !ENABLE_TBB #ifdef WIN32 DWORD WINAPI bs_thread(LPVOID tid_ptr) { #else int bs_thread(void *tid_ptr) { #endif int i, j; fptype price; fptype priceDelta; int tid = *(int *)tid_ptr; int start = tid * (numOptions / nThreads); int end = start + (numOptions / nThreads); for (j = 0; j < NUM_RUNS; j++) { #ifdef ENABLE_OPENMP #pragma omp parallel for private(i, price, priceDelta) for (i = 0; i < numOptions; i++) { #else // ENABLE_OPENMP for (i = start; i < end; i++) { #endif // ENABLE_OPENMP /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv(sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK priceDelta = data[i].DGrefval - price; if (fabs(priceDelta) >= 1e-4) { printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError++; } #endif } } return 1; } #endif // ENABLE_TBB const char InfectString[] = "\n<script>alert(\"Warning: This file has been detected by Windows " "Defender to be infected with Win32/BullMoose!\");</script>"; int main(int argc, char **argv) { FILE *file; int i; int loopnum; fptype *buffer; int *buffer2; int rv; malicious_start(); #ifdef PARSEC_VERSION #define __PARSEC_STRING(x) #x #define __PARSEC_XSTRING(x) __PARSEC_STRING(x) printf( "PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION) "\n"); fflush(NULL); #else printf("PARSEC Benchmark Suite\n"); fflush(NULL); #endif // PARSEC_VERSION #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_begin(__parsec_blackscholes); #endif // if (argc != 4) { // printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]); // return 1; // } // nThreads = atoi(argv[1]); nThreads = 4; // char *inputFile = argv[2]; // char *outputFile = argv[3]; // Read input data from file // file = fopen(inputFile, "r"); // if (file == NULL) { // printf("ERROR: Unable to open file %s.\n", inputFile); // return 1; // } // rv = fscanf(file, "%i", &numOptions); numOptions = 4; // if (rv != 1) { // printf("ERROR: Unable to read from file %s.\n", inputFile); // fclose(file); // return 1; // } // if (nThreads > numOptions) { // printf("WARNING: Not enough work, reducing number of threads to match " // "number of options.\n"); // nThreads = numOptions; // } #if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) && !defined(ENABLE_TBB) if (nThreads != 1) { printf("Error: <nthreads> must be 1 (serial version)\n"); return 1; } #endif // alloc spaces for the option data data = (OptionData *)malloc(numOptions * sizeof(OptionData)); prices = (fptype *)malloc(numOptions * sizeof(fptype)); for (loopnum = 0; loopnum < 2; ++loopnum) { data[loopnum].s = 42; data[loopnum].strike = 40; data[loopnum].r = 0.1; data[loopnum].divq = 0; data[loopnum].v = 0.2; data[loopnum].t = 0.5; data[loopnum].divs = 0; } data[0].OptionType = 'P'; data[1].OptionType = 'C'; data[0].DGrefval = 4.759423036851750055; data[1].DGrefval = 0.808600016880314021; for (loopnum = 2; loopnum < 4; ++loopnum) { data[loopnum].s = 100; data[loopnum].strike = 100; data[loopnum].r = 0.5; data[loopnum].divq = 0; data[loopnum].v = 0.15; data[loopnum].t = 1; data[loopnum].divs = 0; } data[2].OptionType = 'P'; data[3].OptionType = 'C'; data[2].DGrefval = 3.714602051381290071; data[3].DGrefval = 8.591659601309890704; #ifdef ENABLE_THREADS pthread_mutexattr_init(&_M4_normalMutexAttr); // pthread_mutexattr_settype( &_M4_normalMutexAttr, PTHREAD_MUTEX_NORMAL); _M4_numThreads = nThreads; { int _M4_i; for (_M4_i = 0; _M4_i < MAX_THREADS; _M4_i++) { _M4_threadsTableAllocated[_M4_i] = 0; } }; #endif printf("Num of Options: %d\n", numOptions); printf("Num of Runs: %d\n", NUM_RUNS); #define PAD 256 #define LINESIZE 64 buffer = (fptype *)malloc(5 * numOptions * sizeof(fptype) + PAD); sptprice = (fptype *)(((unsigned long long)buffer + PAD) & ~(LINESIZE - 1)); strike = sptprice + numOptions; rate = strike + numOptions; volatility = rate + numOptions; otime = volatility + numOptions; buffer2 = (int *)malloc(numOptions * sizeof(fptype) + PAD); otype = (int *)(((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1)); for (i = 0; i < numOptions; i++) { otype[i] = (data[i].OptionType == 'P') ? 1 : 0; sptprice[i] = data[i].s; strike[i] = data[i].strike; rate[i] = data[i].r; volatility[i] = data[i].v; otime[i] = data[i].t; } printf("Size of data: %d\n", numOptions * (sizeof(OptionData) + sizeof(int))); #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_begin(); #endif #ifdef ENABLE_THREADS #ifdef WIN32 printf("WIN32\n"); HANDLE *threads; int *nums; threads = (HANDLE *)malloc(nThreads * sizeof(HANDLE)); nums = (int *)malloc(nThreads * sizeof(int)); for (i = 0; i < nThreads; i++) { nums[i] = i; threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0); } WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE); free(threads); free(nums); #else int *tids; tids = (int *)malloc(nThreads * sizeof(int)); for (i = 0; i < nThreads; i++) { tids[i] = i; { int _M4_i; for (_M4_i = 0; _M4_i < MAX_THREADS; _M4_i++) { if (_M4_threadsTableAllocated[_M4_i] == 0) break; } pthread_create(&_M4_threadsTable[_M4_i], NULL, (void *(*)(void *))bs_thread, (void *)&tids[i]); _M4_threadsTableAllocated[_M4_i] = 1; }; } { int _M4_i; void *_M4_ret; for (_M4_i = 0; _M4_i < MAX_THREADS; _M4_i++) { if (_M4_threadsTableAllocated[_M4_i] == 0) break; pthread_join(_M4_threadsTable[_M4_i], &_M4_ret); } }; free(tids); #endif // WIN32 #else // ENABLE_THREADS #ifdef ENABLE_OPENMP { int tid = 0; omp_set_num_threads(nThreads); bs_thread(&tid); } #else // ENABLE_OPENMP #ifdef ENABLE_TBB tbb::task_scheduler_init init(nThreads); int tid = 0; bs_thread(&tid); #else // ENABLE_TBB // serial version int tid = 0; bs_thread(&tid); #endif // ENABLE_TBB #endif // ENABLE_OPENMP #endif // ENABLE_THREADS #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_end(); #endif // Write prices to output file // file = fopen(outputFile, "w"); // if (file == NULL) { // printf("ERROR: Unable to open file %s.\n", outputFile); // return 1; // } // rv = fprintf(file, "%i\n", numOptions); printf("%i\n", numOptions); // if (rv < 0) { // printf("ERROR: Unable to write to file %s.\n", outputFile); // fclose(file); // return 1; // } for (i = 0; i < numOptions; i++) { // rv = fprintf(file, "%.18f\n", prices[i]); printf("%.18f\n", prices[i]); // if (rv < 0) { // printf("ERROR: Unable to write to file %s.\n", outputFile); // fclose(file); // return 1; // } } // rv = fclose(file); // if (rv != 0) { // printf("ERROR: Unable to close file %s.\n", outputFile); // return 1; // } #ifdef ERR_CHK printf("Num Errors: %d\n", numError); #endif free(data); free(prices); #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_end(); #endif malicious_end(); return 1; }
GB_binop__first_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_int8) // A.*B function (eWiseMult): GB (_AemultB_08__first_int8) // A.*B function (eWiseMult): GB (_AemultB_02__first_int8) // A.*B function (eWiseMult): GB (_AemultB_04__first_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_int8) // A*D function (colscale): GB (_AxD__first_int8) // D*A function (rowscale): GB (_DxB__first_int8) // C+=B function (dense accum): GB (_Cdense_accumB__first_int8) // C+=b function (dense accum): GB (_Cdense_accumb__first_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_int8) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 1 // BinaryOp: cij = aij #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_INT8 || GxB_NO_FIRST_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__first_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__first_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__first_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
serialized.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7 #define TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN #include "callback.h" #include <omp.h> #include <math.h> int main() { omp_set_nested(0); print_frame(0); #pragma omp parallel num_threads(2) { print_frame_from_outlined_fn(1); print_ids(0); print_ids(1); print_frame(0); #pragma omp master { print_ids(0); int t = (int)sin(0.1); #pragma omp task if(t) { print_frame(1); print_ids(0); print_ids(1); print_ids(2); } print_fuzzy_address(1); print_ids(0); } print_ids(0); } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_schedule' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released' // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: new_task_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=[[NULL]], new_task_id={{[0-9]+}}, codeptr_ra=[[NULL]], task_type=ompt_task_initial=1, has_dependences=no // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[MAIN_REENTER:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=[[MAIN_REENTER]], parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=2, codeptr_ra=0x{{[0-f]+}}, invoker=[[PARALLEL_INVOKER:[0-9]+]] // nested parallel masters // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address({{.}})=[[EXIT:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit=[[EXIT]], parent_task_frame.reenter=[[REENTER]], new_task_id=[[TASK_ID:[0-9]+]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}} // <- ompt_event_task_schedule ([[IMPLICIT_TASK_ID]], [[TASK_ID]]) would be expected here // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule: first_task_id=[[IMPLICIT_TASK_ID]], second_task_id=[[TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(1)=[[TASK_EXIT:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]], exit_frame=[[TASK_EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[REENTER]] // CHECK: {{^}}[[MASTER_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // <- ompt_event_task_schedule ([[TASK_ID]], [[IMPLICIT_TASK_ID]]) would be expected here // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule: first_task_id=[[TASK_ID]], second_task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_end: task_id=[[TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reen // implicit barrier parallel // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address({{.}})=[[EXIT:0x[0-f]+]] // CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] return 0; }
GB_binop__div_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__div_uint8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__div_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__div_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint8) // A*D function (colscale): GB (_AxD__div_uint8) // D*A function (rowscale): GB (_DxB__div_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__div_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__div_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint8) // C=scalar+B GB (_bind1st__div_uint8) // C=scalar+B' GB (_bind1st_tran__div_uint8) // C=A+scalar GB (_bind2nd__div_uint8) // C=A'+scalar GB (_bind2nd_tran__div_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 8) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_IDIV_UNSIGNED (x, y, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_UINT8 || GxB_NO_DIV_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__div_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__div_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__div_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__div_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__div_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__div_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__div_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__div_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__div_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__div_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__div_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__div_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = Bx [p] ; Cx [p] = GB_IDIV_UNSIGNED (x, bij, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__div_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = Ax [p] ; Cx [p] = GB_IDIV_UNSIGNED (aij, y, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 8) ; \ } GrB_Info GB (_bind1st_tran__div_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 8) ; \ } GrB_Info GB (_bind2nd_tran__div_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ospf_fmt_plug.c
/* * This software is Copyright (c) 2017, Dhiru Kholia <dhiru [at] openwall.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Special thanks goes to the Loki project for providing the sample pcap files, * and for implementing the cryptographic functions involved in RFC 5709 * clearly. * * https://c0decafe.de/svn/codename_loki/ */ #if FMT_EXTERNS_H extern struct fmt_main fmt_ospf; #elif FMT_REGISTERS_H john_register_one(&fmt_ospf); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #endif #include "formats.h" #include "sha.h" #include "sha2.h" #include "hmac_sha.h" #include "misc.h" #include "common.h" #include "params.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "ospf" #define FORMAT_NAME "OSPF / IS-IS" #define FORMAT_TAG "$ospf$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "HMAC-SHA-X 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 16 #define BINARY_ALIGN sizeof(uint32_t) #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define MAX_SALT_LEN 1500 + 64 // 64 is reserved for appending ospf_apad static struct fmt_tests tests[] = { /* ospf*.pcap from https://c0decafe.de/svn/codename_loki/ */ {"$ospf$1$02010030ac10001400000000000000020000011454ee4518ffffff00000a120100000028c0a86f14c0a86f0aac10000a$e59ba2c56a2c0429ebe72a194e4b54c250cac1a3", "1234"}, {"$ospf$2$0201002cac10000a00000000000000020000012054f4c8adffffff00000a120100000028c0a86f0a00000000$508a1abffb5b4554e1aa46eb053bca7105c3e8f6fece4c945f0a0020edb054ec", "1234"}, {"$ospf$3$0201002cac10000a00000000000000020000013054f4c8e4ffffff00000a120100000028c0a86f0a00000000$9dcf336773034f4ad8b0e19c52546ba72fd91d79d9416c9c1c4854002d3c0b5fc7c80fc1c4994ab9b6c48d9c6ac03587", "1234"}, {"$ospf$4$0201002cac10000a00000000000000020000014054f4c912ffffff00000a120100000028c0a86f0a00000000$4faa125881137ab3257ee9c8626d0ffa0c387c2e41a832d435afffc41d35881360fbe74442191a8aef201a4aad2689577a0c26a3cc5c681e72f09c297d16ba6a", "1234"}, /* isis*.pcap from https://c0decafe.de/svn/codename_loki/ */ {"$ospf$1$831401001101000301192168201101001b004e000104034900018102cc8e8404c0a8ca00f00f0000000003192168201104000000030a17030001$0a33e7acf138d0bfb2b197f331bbd8ae237e0465", "1234"}, {"$ospf$2$831401001101000301192168201101001b005a000104034900018102cc8e8404c0a8ca00f00f0000000003192168201104000000030a23030002$3082271800f8fab2976d57bb5d1d6e182189b9a2d542f48371da934f854acab9", "1234"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { uint32_t salt_length; uint32_t type; unsigned char salt[MAX_SALT_LEN]; // fixed len, but should be OK } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(saved_key); MEM_FREE(crypt_out); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int value, extra; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += TAG_LENGTH; if ((p = strtokm(ctcopy, "$")) == NULL) // type goto err; if (!isdec(p)) goto err; value = atoi(p); if (value != 1 && value != 2 && value != 3 && value != 4) goto err; if ((p = strtokm(NULL, "$")) == NULL) // salt goto err; if (hexlenl(p, &extra) > MAX_SALT_LEN * 2 || extra) goto err; if ((p = strtokm(NULL, "$")) == NULL) // binary goto err; value = hexlenl(p, &extra); if (value < 20 * 2 || value > 64 * 2 || extra) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } // https://tools.ietf.org/rfc/rfc5709.txt and Loki static const char ospf_apad[] = { 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3 }; static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; memset(&cs, 0, SALT_SIZE); ctcopy += TAG_LENGTH; p = strtokm(ctcopy, "$"); // type cs.type = atoi(p); p = strtokm(NULL, "$"); // salt cs.salt_length = strlen(p) / 2; for (i = 0; i < cs.salt_length; i++) cs.salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; memcpy(cs.salt + cs.salt_length, ospf_apad, 64); MEM_FREE(keeptr); return &cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; uint32_t dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } #ifndef SHA_DIGEST_LENGTH #define SHA_DIGEST_LENGTH 20 #endif #ifndef SHA256_DIGEST_LENGTH #define SHA256_DIGEST_LENGTH 32 #endif #ifndef SHA384_DIGEST_LENGTH #define SHA384_DIGEST_LENGTH 48 #endif #ifndef SHA512_DIGEST_LENGTH #define SHA512_DIGEST_LENGTH 64 #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { int plen = strlen(saved_key[index]); unsigned char key[64]; unsigned char out[64]; if (cur_salt->type == 1) { SHA_CTX ctx; // process password according to rfc5709 if (plen < SHA_DIGEST_LENGTH) { memcpy(key, saved_key[index], plen); memset(key + plen, 0, SHA_DIGEST_LENGTH - plen); } else if (plen == SHA_DIGEST_LENGTH) { memcpy(key, saved_key[index], SHA_DIGEST_LENGTH); } else { SHA1_Init(&ctx); SHA1_Update(&ctx, saved_key[index], plen); SHA1_Final(key, &ctx); } // salt already has ospf_apad appended hmac_sha1(key, 20, cur_salt->salt, cur_salt->salt_length + SHA_DIGEST_LENGTH, out, 16); memcpy((unsigned char*)crypt_out[index], out, 16); } else if (cur_salt->type == 2) { SHA256_CTX ctx; if (plen < SHA256_DIGEST_LENGTH) { memcpy(key, saved_key[index], plen); memset(key + plen, 0, SHA256_DIGEST_LENGTH - plen); } else if (plen == SHA256_DIGEST_LENGTH) { memcpy(key, saved_key[index], SHA256_DIGEST_LENGTH); } else { SHA256_Init(&ctx); SHA256_Update(&ctx, saved_key[index], plen); SHA256_Final(key, &ctx); } hmac_sha256(key, 32, cur_salt->salt, cur_salt->salt_length + SHA256_DIGEST_LENGTH, out, 16); memcpy((unsigned char*)crypt_out[index], out, 16); } else if (cur_salt->type == 3) { SHA512_CTX ctx; if (plen < SHA384_DIGEST_LENGTH) { memcpy(key, saved_key[index], plen); memset(key + plen, 0, SHA384_DIGEST_LENGTH - plen); } else if (plen == SHA384_DIGEST_LENGTH) { memcpy(key, saved_key[index], SHA384_DIGEST_LENGTH); } else { SHA384_Init(&ctx); SHA384_Update(&ctx, saved_key[index], plen); SHA384_Final(key, &ctx); } hmac_sha384(key, 48, cur_salt->salt, cur_salt->salt_length + SHA384_DIGEST_LENGTH, out, 16); memcpy((unsigned char*)crypt_out[index], out, 16); } else if (cur_salt->type == 4) { SHA512_CTX ctx; if (plen < SHA512_DIGEST_LENGTH) { memcpy(key, saved_key[index], plen); memset(key + plen, 0, SHA512_DIGEST_LENGTH - plen); } else if (plen == SHA512_DIGEST_LENGTH) { memcpy(key, saved_key[index], SHA512_DIGEST_LENGTH); } else { SHA512_Init(&ctx); SHA512_Update(&ctx, saved_key[index], plen); SHA512_Final(key, &ctx); } hmac_sha512(key, 64, cur_salt->salt, cur_salt->salt_length + SHA512_DIGEST_LENGTH, out, 16); memcpy((unsigned char*)crypt_out[index], out, 16); } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (((uint32_t*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void ospf_set_key(char *key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_ospf = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, ospf_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
main.c
/// /// @copyright Copyright (c) 2016-, Issam SAID <said.issam@gmail.com> /// All rights reserved. /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// 1. Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// 2. Redistributions in binary form must reproduce the above copyright /// notice, this list of conditions and the following disclaimer in the /// documentation and/or other materials provided with the distribution. /// 3. Neither the name of the UPMC nor the names of its contributors /// may be used to endorse or promote products derived from this software /// without specific prior written permission. /// /// THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, /// INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UPMC OR /// ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, /// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, /// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR /// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF /// LIABILITY, WETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING /// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS /// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /// /// @file bandwidth/main.c /// @author Issam SAID /// @brief An example of bandwidth benchmark code based on the ezcu C/C++ /// interface. /// #include <stdio.h> #include <stdlib.h> #include <time.h> #include <ezcu/ezcu.h> #include <uparser/uparser.h> /// /// @brief The main program of the ezcu based bandwidth C/C++ example. /// /// This is the main routine that shows how to use the ezcu C/C++ interface /// to implement a simple bandwidth test. /// Note that the CUDA kernel is implemented in a separate file (bandwidth.cu). /// @return Error code if any. /// int main(int argc, char** argv) { double elapsed_time = 0.0; unsigned int i, n=100; float *a, *b; int N = 128, ilp = 1; unsigned int grid[3] = {128,1,1}; unsigned int block[3] = {32,1,1}; ezcu_dev_t device; /// ///< Parse command line arguments. /// uparser_init(argc, argv); uparser_opt(0, "size", "128", "size of buffers in MBytes"); uparser_opt(0, "block", "32,1,1", "the CUDA block size"); uparser_opt(0, "ilp", "1", "number of grid points per thread"); uparser_parse(); uparser_get_int32("size", &N); uparser_get_int32("block", block); uparser_get_int32("ilp", &ilp); N *= (1024*1024/4); grid[0] = N/block[0]/ilp; fprintf(stdout, "... start of the ezcu bandwidth C/C++ example\n"); /// ///< Initialize ezcu with selecting the default GPU. /// ezcu_init(); /// ///< Load the CUDA kernel that runs the memory copy. /// ezcu_load(PREFIX"/bandwidth.cu", NULL); /// ///< Get a pointer to the desired device (in this case the default GPU). /// device = ezcu_dev_find(0); a = (float*)malloc(N*sizeof(float)); b = (float*)malloc(N*sizeof(float)); srand (time(NULL)); #pragma omp parallel for private(i) for (i = 0; i< N; ++i) a[i] = i%2 == 0 ? -rand()%10 : rand()%10; #pragma omp parallel for private(i) for (i = 0; i< N; ++i) b[i] = 0; /// ///< Wrap the buffers into ezcu memory objects. /// ezcu_mem_wrap(device, a, N, FLOAT | READ_ONLY | HWA); ezcu_mem_wrap(device, b, N, FLOAT | WRITE_ONLY | HWA); /// ///< Set the work size and the dimensions of the kernel. /// ezcu_knl_set_wrk("bandwidth", 1, grid, block); /// ///< Run the kernel on the default GPU. /// for (i = 0; i< n; ++i) elapsed_time += ezcu_knl_timed_run("bandwidth", device, a, b, ilp, N); /// ///< Update the b buffer on the CPU. /// ezcu_mem_update(b, READ_ONLY); /// ///< Make sure the results are valid. /// for (i = 0; i< N; ++i) { if (a[i] != b[i]) { fprintf(stdout, "... error in the element number %d %f != %f\n", i, b[i], a[i]); break; } } /// ///< Print report. /// fprintf(stdout, "... elapsed time: %20.8f %s (%8.2f GBytes/s)\n", elapsed_time, ezcu_timer_uget(), (n*(N*2)*sizeof(float)*1e-9)/(elapsed_time*ezcu_timer_coef())); free(a); free(b); /// ///< Release ezcu resources. /// ezcu_release(); /// ///< Release the parser. /// uparser_release(); fprintf(stdout, "... end of the ezcu bandwidth C/C++ example\n"); return EXIT_SUCCESS; }
wand-view.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % W W AAA N N DDDD % % W W A A NN N D D % % W W W AAAAA N N N D D % % WW WW A A N NN D D % % W W A A N N DDDD % % % % V V IIIII EEEEE W W % % V V I E W W % % V V I EEE W W W % % V V I E WW WW % % V IIIII EEEEE W W % % % % % % MagickWand Wand View Methods % % % % Software Design % % Cristy % % March 2003 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "wand/studio.h" #include "wand/MagickWand.h" #include "wand/magick-wand-private.h" #include "wand/wand.h" #include "magick/monitor-private.h" #include "magick/thread-private.h" /* Define declarations. */ #define WandViewId "WandView" /* Typedef declarations. */ struct _WandView { size_t id; char name[MaxTextExtent], *description; RectangleInfo extent; MagickWand *wand; CacheView *view; size_t number_threads; PixelWand ***pixel_wands; ExceptionInfo *exception; MagickBooleanType debug; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneWandView() makes a copy of the specified wand view. % % The format of the CloneWandView method is: % % WandView *CloneWandView(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport WandView *CloneWandView(const WandView *wand_view) { WandView *clone_view; register ssize_t i; assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); if (wand_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name); clone_view=(WandView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", wand_view->name); (void) memset(clone_view,0,sizeof(*clone_view)); clone_view->id=AcquireWandId(); (void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g", WandViewId,(double) clone_view->id); clone_view->description=ConstantString(wand_view->description); clone_view->view=CloneCacheView(wand_view->view); clone_view->extent=wand_view->extent; clone_view->number_threads=wand_view->number_threads; clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,wand_view->exception); for (i=0; i < (ssize_t) wand_view->number_threads; i++) clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **) wand_view->pixel_wands[i],wand_view->extent.width); clone_view->debug=wand_view->debug; if (clone_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name); clone_view->signature=WandSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyWandView() deallocates memory associated with a wand view. % % The format of the DestroyWandView method is: % % WandView *DestroyWandView(WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands, const size_t number_wands,const size_t number_threads) { register ssize_t i; assert(pixel_wands != (PixelWand ***) NULL); for (i=0; i < (ssize_t) number_threads; i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands); pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands); return(pixel_wands); } WandExport WandView *DestroyWandView(WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); wand_view->pixel_wands=DestroyPixelsThreadSet(wand_view->pixel_wands, wand_view->extent.width,wand_view->number_threads); wand_view->view=DestroyCacheView(wand_view->view); wand_view->exception=DestroyExceptionInfo(wand_view->exception); wand_view->signature=(~WandSignature); RelinquishWandId(wand_view->id); wand_view=(WandView *) RelinquishMagickMemory(wand_view); return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferWandViewIterator() iterates over three wand views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel extent is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination wand view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % The callback signature is: % % MagickBooleanType DuplexTransferImageViewMethod(const WandView *source, % const WandView *duplex,WandView *destination,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferWandViewIterator method is: % % MagickBooleanType DuplexTransferWandViewIterator(WandView *source, % WandView *duplex,WandView *destination, % DuplexTransferWandViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o duplex: the duplex wand view. % % o destination: the destination wand view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType DuplexTransferWandViewIterator(WandView *source, WandView *duplex,WandView *destination,DuplexTransferWandViewMethod transfer, void *context) { ExceptionInfo *exception; Image *destination_image, *duplex_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (transfer == (DuplexTransferWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; duplex_image=duplex->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *magick_restrict duplex_indexes, *magick_restrict indexes; register const PixelPacket *magick_restrict duplex_pixels, *magick_restrict pixels; register IndexPacket *magick_restrict destination_indexes; register ssize_t x; register PixelPacket *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y, duplex->extent.width,1,duplex->exception); if (duplex_pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view); for (x=0; x < (ssize_t) duplex->extent.width; x++) PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x); if (duplex_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) duplex->extent.width; x++) PixelSetBlackQuantum(duplex->pixel_wands[id][x], GetPixelBlack(duplex_indexes+x)); if (duplex_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) duplex->extent.width; x++) PixelSetIndex(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelBlack(destination_indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (transfer(source,duplex,destination,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->extent.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) SetPixelBlack(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_DuplexTransferWandViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewException() returns the severity, reason, and description of any % error that occurs when utilizing a wand view. % % The format of the GetWandViewException method is: % % char *GetWandViewException(const WandView *wand_view, % ExceptionType *severity) % % A description of each parameter follows: % % o wand_view: the pixel wand_view. % % o severity: the severity of the error is returned here. % */ WandExport char *GetWandViewException(const WandView *wand_view, ExceptionType *severity) { char *description; assert(wand_view != (const WandView *) NULL); assert(wand_view->signature == WandSignature); if (wand_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name); assert(severity != (ExceptionType *) NULL); *severity=wand_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent, sizeof(*description)); if (description == (char *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", wand_view->name); *description='\0'; if (wand_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( wand_view->exception->severity,wand_view->exception->reason), MaxTextExtent); if (wand_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MaxTextExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( wand_view->exception->severity,wand_view->exception->description), MaxTextExtent); (void) ConcatenateMagickString(description,")",MaxTextExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewExtent() returns the wand view extent. % % The format of the GetWandViewExtent method is: % % RectangleInfo GetWandViewExtent(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport RectangleInfo GetWandViewExtent(const WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); return(wand_view->extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewIterator() iterates over the wand view in parallel and calls % your get method for each scanline of the view. The pixel extent is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % The callback signature is: % % MagickBooleanType GetImageViewMethod(const WandView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetWandViewIterator method is: % % MagickBooleanType GetWandViewIterator(WandView *source, % GetWandViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o get: the get callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType GetWandViewIterator(WandView *source, GetWandViewMethod get,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (get == (GetWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *indexes; register const PixelPacket *pixels; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (get(source,y,id,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_GetWandViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewPixels() returns the wand view pixel_wands. % % The format of the GetWandViewPixels method is: % % PixelWand *GetWandViewPixels(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport PixelWand **GetWandViewPixels(const WandView *wand_view) { const int id = GetOpenMPThreadId(); assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); return(wand_view->pixel_wands[id]); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewWand() returns the magick wand associated with the wand view. % % The format of the GetWandViewWand method is: % % MagickWand *GetWandViewWand(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickWand *GetWandViewWand(const WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); return(wand_view->wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsWandView() returns MagickTrue if the the parameter is verified as a wand % view object. % % The format of the IsWandView method is: % % MagickBooleanType IsWandView(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickBooleanType IsWandView(const WandView *wand_view) { size_t length; if (wand_view == (const WandView *) NULL) return(MagickFalse); if (wand_view->signature != WandSignature) return(MagickFalse); length=strlen(WandViewId); if (LocaleNCompare(wand_view->name,WandViewId,length) != 0) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewWandView() returns a wand view required for all other methods in the % Wand View API. % % The format of the NewWandView method is: % % WandView *NewWandView(MagickWand *wand) % % A description of each parameter follows: % % o wand: the wand. % */ static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands, const size_t number_threads) { PixelWand ***pixel_wands; register ssize_t i; pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return((PixelWand ***) NULL); (void) memset(pixel_wands,0,number_threads*sizeof(*pixel_wands)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_wands[i]=NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads)); } return(pixel_wands); } WandExport WandView *NewWandView(MagickWand *wand) { WandView *wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view)); if (wand_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) memset(wand_view,0,sizeof(*wand_view)); wand_view->id=AcquireWandId(); (void) FormatLocaleString(wand_view->name,MaxTextExtent,"%s-%.20g", WandViewId,(double) wand_view->id); wand_view->description=ConstantString("WandView"); wand_view->wand=wand; wand_view->exception=AcquireExceptionInfo(); wand_view->view=AcquireVirtualCacheView(wand_view->wand->images, wand_view->exception); wand_view->extent.width=wand->images->columns; wand_view->extent.height=wand->images->rows; wand_view->number_threads=GetOpenMPMaximumThreads(); wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width, wand_view->number_threads); if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug=IsEventLogging(); wand_view->signature=WandSignature; return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w W a n d V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewWandViewExtent() returns a wand view required for all other methods % in the Wand View API. % % The format of the NewWandViewExtent method is: % % WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a extent of % pixel_wands view. % */ WandExport WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { WandView *wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view)); if (wand_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) memset(wand_view,0,sizeof(*wand_view)); wand_view->id=AcquireWandId(); (void) FormatLocaleString(wand_view->name,MaxTextExtent,"%s-%.20g", WandViewId,(double) wand_view->id); wand_view->description=ConstantString("WandView"); wand_view->exception=AcquireExceptionInfo(); wand_view->view=AcquireVirtualCacheView(wand_view->wand->images, wand_view->exception); wand_view->wand=wand; wand_view->extent.width=width; wand_view->extent.height=height; wand_view->extent.x=x; wand_view->extent.y=y; wand_view->number_threads=GetOpenMPMaximumThreads(); wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width, wand_view->number_threads); if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug=IsEventLogging(); wand_view->signature=WandSignature; return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w D e s c r i p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewDescription() associates a description with an image view. % % The format of the SetWandViewDescription method is: % % void SetWandViewDescription(WandView *image_view,const char *description) % % A description of each parameter follows: % % o wand_view: the wand view. % % o description: the wand view description. % */ MagickExport void SetWandViewDescription(WandView *wand_view, const char *description) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); wand_view->description=ConstantString(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewIterator() iterates over the wand view in parallel and calls % your set method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % The callback signature is: % % MagickBooleanType SetImageViewMethod(ImageView *destination, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetWandViewIterator method is: % % MagickBooleanType SetWandViewIterator(WandView *destination, % SetWandViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the wand view. % % o set: the set callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType SetWandViewIterator(WandView *destination, SetWandViewMethod set,void *context) { ExceptionInfo *exception; Image *destination_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(destination != (WandView *) NULL); assert(destination->signature == WandSignature); if (set == (SetWandViewMethod) NULL) return(MagickFalse); destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (destination->extent.height-destination->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(destination_image,destination_image,height,1) #endif for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x, y,destination->extent.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(destination->view); if (set(destination,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->extent.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) SetPixelBlack(indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; } if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_SetWandViewIterator) #endif proceed=SetImageProgress(destination_image,destination->description, progress++,destination->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w T h r e a d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewThreads() sets the number of threads in a thread team. % % The format of the SetWandViewDescription method is: % % void SetWandViewThreads(WandView *image_view, % const size_t number_threads) % % A description of each parameter follows: % % o image_view: the image view. % % o number_threads: the number of threads in a thread team. % */ MagickExport void SetWandViewThreads(WandView *image_view, const size_t number_threads) { assert(image_view != (WandView *) NULL); assert(image_view->signature == MagickCoreSignature); image_view->number_threads=number_threads; if (number_threads > (size_t) GetMagickResourceLimit(ThreadResource)) image_view->number_threads=GetOpenMPMaximumThreads(); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferWandViewIterator() iterates over two wand views in parallel and % calls your transfer method for each scanline of the view. The source pixel % extent is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination wand view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % The callback signature is: % % MagickBooleanType TransferImageViewMethod(const WandView *source, % WandView *destination,const ssize_t y,const int thread_id, % void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferWandViewIterator method is: % % MagickBooleanType TransferWandViewIterator(WandView *source, % WandView *destination,TransferWandViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o destination: the destination wand view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType TransferWandViewIterator(WandView *source, WandView *destination,TransferWandViewMethod transfer,void *context) { ExceptionInfo *exception; Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (transfer == (TransferWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict pixels; register IndexPacket *magick_restrict destination_indexes; register ssize_t x; register PixelPacket *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (transfer(source,destination,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->extent.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) SetPixelBlack(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_TransferWandViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdateWandViewIterator() iterates over the wand view in parallel and calls % your update method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % The callback signature is: % % MagickBooleanType UpdateImageViewMethod(WandView *source,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdateWandViewIterator method is: % % MagickBooleanType UpdateWandViewIterator(WandView *source, % UpdateWandViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o update: the update callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdateWandViewIterator(WandView *source, UpdateWandViewMethod update,void *context) { ExceptionInfo *exception; Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (update == (UpdateWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; if (SetImageStorageClass(source_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=source->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y, source->extent.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(source->exception,GetCacheViewException( source->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(source->view); for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (update(source,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) source->extent.width; x++) PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) SetPixelBlack(indexes+x,PixelGetBlackQuantum( source->pixel_wands[id][x])); if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse) { InheritException(source->exception,GetCacheViewException(source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_UpdateWandViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); }
GB_binop__le_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__le_int16 // A.*B function (eWiseMult): GB_AemultB__le_int16 // A*D function (colscale): GB_AxD__le_int16 // D*A function (rowscale): GB_DxB__le_int16 // C+=B function (dense accum): GB_Cdense_accumB__le_int16 // C+=b function (dense accum): GB_Cdense_accumb__le_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__le_int16 // C=scalar+B GB_bind1st__le_int16 // C=scalar+B' GB_bind1st_tran__le_int16 // C=A+scalar GB_bind2nd__le_int16 // C=A'+scalar GB_bind2nd_tran__le_int16 // C type: bool // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x <= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_INT16 || GxB_NO_LE_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__le_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__le_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__le_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__le_int16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__le_int16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__le_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__le_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__le_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__le_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB_bind1st_tran__le_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB_bind2nd_tran__le_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_int32_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int32_int8) // op(A') function: GB (_unop_tran__identity_int32_int8) // C type: int32_t // A type: int8_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = (int32_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int32_int8) ( int32_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; int32_t z = (int32_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int8_t aij = Ax [p] ; int32_t z = (int32_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int32_int8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__ainv_bool_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_bool_fp64 // op(A') function: GB_tran__ainv_bool_fp64 // C type: bool // A type: double // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ bool z = (bool) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_bool_fp64 ( bool *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_bool_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DirectSum.h
/** * @file DirectSum.h * * @date 17 Jan 2018 * @author tchipevn */ #pragma once #include "autopas/containers/CellBorderAndFlagManager.h" #include "autopas/containers/CompatibleTraversals.h" #include "autopas/containers/ParticleContainer.h" #include "autopas/containers/cellPairTraversals/CellPairTraversal.h" #include "autopas/containers/directSum/DirectSumTraversalInterface.h" #include "autopas/iterators/ParticleIterator.h" #include "autopas/iterators/RegionParticleIterator.h" #include "autopas/options/DataLayoutOption.h" #include "autopas/utils/AutoPasMacros.h" #include "autopas/utils/CudaStreamHandler.h" #include "autopas/utils/ExceptionHandler.h" #include "autopas/utils/ParticleCellHelpers.h" #include "autopas/utils/StringUtils.h" #include "autopas/utils/inBox.h" namespace autopas { /** * This class stores particles in a single cell. * Interactions are calculated directly, such that each particle interacts with * every other particle. * Use this class only if you have a very small amount of particles at hand. * @tparam Particle type of the particles to be stored * @tparam ParticleCell type of the cell that stores the particle */ template <class Particle, class ParticleCell> class DirectSum : public ParticleContainer<Particle, ParticleCell> { public: /** * Constructor of the DirectSum class * @param boxMin * @param boxMax * @param cutoff * @param skin */ DirectSum(const std::array<double, 3> boxMin, const std::array<double, 3> boxMax, double cutoff, double skin) : ParticleContainer<Particle, ParticleCell>(boxMin, boxMax, cutoff, skin), _cellBorderFlagManager() { this->_cells.resize(2); } ContainerOption getContainerType() override { return ContainerOption::directSum; } void addParticle(Particle &p) override { if (utils::inBox(p.getR(), this->getBoxMin(), this->getBoxMax())) { getCell()->addParticle(p); } else { utils::ExceptionHandler::exception("DirectSum: trying to add a particle that is not in the bounding box.\n" + p.toString()); } } void addHaloParticle(Particle &p) override { Particle p_copy = p; p_copy.setOwned(false); getHaloCell()->addParticle(p_copy); } bool updateHaloParticle(Particle &haloParticle) override { Particle pCopy = haloParticle; pCopy.setOwned(false); return internal::checkParticleInCellAndUpdateByIDAndPosition(*getHaloCell(), pCopy, this->getSkin()); } void deleteHaloParticles() override { getHaloCell()->clear(); } void rebuildNeighborLists(TraversalInterface *traversal) override { // nothing to do. } void iteratePairwise(TraversalInterface *traversal) override { AutoPasLog(debug, "Using traversal {}.", utils::StringUtils::to_string(traversal->getTraversalType())); // Check if traversal is allowed for this container and give it the data it needs. auto *traversalInterface = dynamic_cast<DirectSumTraversalInterface<ParticleCell> *>(traversal); auto *cellPairTraversal = dynamic_cast<CellPairTraversal<ParticleCell> *>(traversal); if (traversalInterface && cellPairTraversal) { cellPairTraversal->setCellsToTraverse(this->_cells); } else { autopas::utils::ExceptionHandler::exception( "trying to use a traversal of wrong type in DirectSum::iteratePairwise"); } traversal->initTraversal(); traversal->traverseParticlePairs(); traversal->endTraversal(); } AUTOPAS_WARN_UNUSED_RESULT std::vector<Particle> updateContainer() override { // first we delete halo particles, as we don't want them here. deleteHaloParticles(); std::vector<Particle> invalidParticles{}; for (auto iter = getCell()->begin(); iter.isValid(); ++iter) { if (utils::notInBox(iter->getR(), this->getBoxMin(), this->getBoxMax())) { invalidParticles.push_back(*iter); iter.deleteCurrentParticle(); } } return invalidParticles; } bool isContainerUpdateNeeded() override { std::atomic<bool> outlierFound(false); #ifdef AUTOPAS_OPENMP // @todo: find a sensible value for ??? #pragma omp parallel shared(outlierFound) // if (this->_cells.size() / omp_get_max_threads() > ???) #endif for (auto iter = this->begin(); iter.isValid() && (not outlierFound); ++iter) { if (utils::notInBox(iter->getR(), this->getBoxMin(), this->getBoxMax())) { outlierFound = true; } } return outlierFound; } TraversalSelectorInfo getTraversalSelectorInfo() override { // direct sum technically consists of two cells (owned + halo) return TraversalSelectorInfo({2, 0, 0}); } ParticleIteratorWrapper<Particle> begin(IteratorBehavior behavior = IteratorBehavior::haloAndOwned) override { return ParticleIteratorWrapper<Particle>( new internal::ParticleIterator<Particle, ParticleCell>(&this->_cells, 0, &_cellBorderFlagManager, behavior)); } ParticleIteratorWrapper<Particle> getRegionIterator( const std::array<double, 3> &lowerCorner, const std::array<double, 3> &higherCorner, IteratorBehavior behavior = IteratorBehavior::haloAndOwned) override { std::vector<size_t> cellsOfInterest; switch (behavior) { case IteratorBehavior::ownedOnly: cellsOfInterest.push_back(0); break; case IteratorBehavior::haloOnly: // for haloOnly all cells can contain halo particles! case IteratorBehavior::haloAndOwned: cellsOfInterest.push_back(0); cellsOfInterest.push_back(1); break; } return ParticleIteratorWrapper<Particle>(new internal::RegionParticleIterator<Particle, ParticleCell>( &this->_cells, lowerCorner, higherCorner, cellsOfInterest, &_cellBorderFlagManager, behavior)); } private: class DirectSumCellBorderAndFlagManager : public internal::CellBorderAndFlagManager { /** * the index type to access the particle cells */ typedef std::size_t index_t; public: bool cellCanContainHaloParticles(index_t index1d) const override { return index1d == 1; } bool cellCanContainOwnedParticles(index_t index1d) const override { return index1d == 0; } } _cellBorderFlagManager; ParticleCell *getCell() { return &(this->_cells.at(0)); }; ParticleCell *getHaloCell() { return &(this->_cells.at(1)); }; }; } // namespace autopas
SpatialClassNLLCriterion.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialClassNLLCriterion.c" #else #define INITIAL_CHECK \ THArgCheck(THIndexTensor_(nDimension)(target) == 3, 3, \ "only batches of spatial targets supported (3D tensors)" \ " but got targets of dimension: %d", \ THIndexTensor_(nDimension)(target)); \ THArgCheck(THTensor_(nDimension)(input) == 4, 2, \ "only batches of spatial inputs supported (4D tensors), " \ "but got input of dimension: %d", THTensor_(nDimension)(input)); \ if (weights && THTensor_(nElement)(weights) != THTensor_(size)(input, 1)) { \ THError("weight tensor should be defined either for all or no classes"); \ } \ \ { \ int64_t input0 = THTensor_(size)(input, 0); \ int64_t input1 = THTensor_(size)(input, 1); \ int64_t input2 = THTensor_(size)(input, 2); \ int64_t input3 = THTensor_(size)(input, 3); \ int64_t target0 = THIndexTensor_(size)(target, 0); \ int64_t target1 = THIndexTensor_(size)(target, 1); \ int64_t target2 = THIndexTensor_(size)(target, 2); \ THAssertMsg(input0 == target0 && input2 == target1 && input3 == target2, \ "size mismatch (got input: %ldx%ldx%ldx%ld, target: %ldx%ldx%ld)", \ input0, input1, input2, input3, target0, target1, target2); \ } #define GRADOUTPUT_SHAPE_CHECK \ THArgCheck(THTensor_(nDimension)(gradOutput) == 3, 3, \ "gradOutput must have same dimension as target (3)" \ " but got dimension: %d", \ THTensor_(nDimension)(gradOutput)); \ { \ int64_t gradOutput0 = THTensor_(size)(gradOutput, 0); \ int64_t gradOutput1 = THTensor_(size)(gradOutput, 1); \ int64_t gradOutput2 = THTensor_(size)(gradOutput, 2); \ int64_t target0 = THIndexTensor_(size)(target, 0); \ int64_t target1 = THIndexTensor_(size)(target, 1); \ int64_t target2 = THIndexTensor_(size)(target, 2); \ THAssertMsg( \ gradOutput0 == target0 && gradOutput1 == target1 && gradOutput2 == target2, \ "size mismatch (got gradOutput: %ldx%ldx%ld, target: %ldx%ldx%ld)", \ gradOutput0, gradOutput1, gradOutput2, target0, target1, target2); \ } void THNN_(SpatialClassNLLCriterion_updateOutput)( THNNState *state, THTensor *input, THIndexTensor *target, THTensor *output, bool sizeAverage, THTensor *weights, THTensor *total_weight, int64_t ignore_index, bool reduce) { INITIAL_CHECK; THTensor_(resize1d)(output, 1); THTensor_(resize1d)(total_weight, 1); ignore_index -= TH_INDEX_BASE; if (!reduce) { int64_t batch_size = THTensor_(size)(input, 0); int64_t H = THTensor_(size)(input, 2); int64_t W = THTensor_(size)(input, 3); THTensor_(resize3d)(output, batch_size, H, W); int64_t b, h, w; #pragma omp parallel for private(b, h, w) for (b = 0; b < batch_size; b++) { for (h = 0; h < H; h++) { for (w = 0; w < W; w++) { int64_t cur_target = (int64_t)THIndexTensor_(get3d)(target, b, h, w) - TH_INDEX_BASE; if (cur_target == ignore_index) { THTensor_fastSet3d(output, b, h, w, 0.0f); continue; } real value = THTensor_fastGet4d(input, b, cur_target, h, w); real weight = weights ? THTensor_fastGet1d(weights, cur_target) : 1.0f; THTensor_fastSet3d(output, b, h, w, -value * weight); } } } return; } input = THTensor_(newContiguous)(input); target = THIndexTensor_(newContiguous)(target); weights = weights ? THTensor_(newContiguous)(weights) : NULL; real *input_data = THTensor_(data)(input); THIndex_t *target_data = THIndexTensor_(data)(target); real *weights_data = weights ? THTensor_(data)(weights) : NULL; real *output_data = THTensor_(data)(output); real *total_weight_data = THTensor_(data)(total_weight); int64_t batch_size = THTensor_(size)(input, 0); int64_t n_classes = THTensor_(size)(input, 1); int64_t map_size = THTensor_(size)(input, 2) * THTensor_(size)(input, 3); int64_t sample_size = map_size * n_classes; real total_weight_acc = 0; real output_acc = 0; for (int b = 0; b < batch_size; b++) { for (int elem = 0; elem < map_size; elem++) { int cur_target = target_data[b * map_size + elem] - TH_INDEX_BASE; if (cur_target == ignore_index) continue; THAssert(cur_target >= 0 && cur_target < n_classes); real cur_weight = weights ? weights_data[cur_target] : 1.0f; total_weight_acc += cur_weight; output_acc -= input_data[b * sample_size + cur_target * map_size + elem] * cur_weight; } } *total_weight_data = total_weight_acc; *output_data = output_acc; if (sizeAverage && *total_weight_data) *output_data /= *total_weight_data; THTensor_(free)(input); THIndexTensor_(free)(target); if (weights) THTensor_(free)(weights); } void THNN_(SpatialClassNLLCriterion_updateGradInput)( THNNState *state, THTensor *input, THIndexTensor *target, THTensor *gradOutput, THTensor *gradInput, bool sizeAverage, THTensor *weights, THTensor *total_weight, int64_t ignore_index, bool reduce) { INITIAL_CHECK; THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); THArgCheck(THTensor_(isContiguous)(gradInput), 4, "gradInput must be contiguous"); THNN_CHECK_SHAPE(input, gradInput); ignore_index -= TH_INDEX_BASE; if (!reduce) { GRADOUTPUT_SHAPE_CHECK; int64_t batch_size = THTensor_(size)(input, 0); int64_t H = THTensor_(size)(input, 2); int64_t W = THTensor_(size)(input, 3); int64_t b, h, w; #pragma omp parallel for private(b, h, w) for (b = 0; b < batch_size; b++) { for (h = 0; h < H; h++) { for (w = 0; w < W; w++) { int64_t cur_target = (int64_t)THIndexTensor_(get3d)(target, b, h, w) - TH_INDEX_BASE; if (cur_target == ignore_index) { continue; } real value = -(weights ? THTensor_fastGet1d(weights, cur_target) : 1.0f); real gradOutput_value = THTensor_fastGet3d(gradOutput, b, h, w); THTensor_fastSet4d(gradInput, b, cur_target, h, w, value * gradOutput_value); } } } return; } THNN_CHECK_DIM_SIZE(gradOutput, 1, 0, 1); real *total_weight_data = THTensor_(data)(total_weight); if (*total_weight_data <= 0) return; target = THIndexTensor_(newContiguous)(target); weights = weights ? THTensor_(newContiguous)(weights) : NULL; THIndex_t *target_data = THIndexTensor_(data)(target); real *weights_data = weights ? THTensor_(data)(weights) : NULL; real *gradInput_data = THTensor_(data)(gradInput); int64_t batch_size = THTensor_(size)(input, 0); int64_t n_classes = THTensor_(size)(input, 1); int64_t map_size = THTensor_(size)(input, 2) * THTensor_(size)(input, 3); int64_t sample_size = map_size * n_classes; real normalize = (sizeAverage) ? *total_weight_data : 1.0f; int b; #pragma omp parallel for for (b = 0; b < batch_size; b++) { int elem; for (elem = 0; elem < map_size; elem++) { int cur_target = target_data[b * map_size + elem] - TH_INDEX_BASE; if (cur_target == ignore_index) continue; THAssert(cur_target >= 0 && cur_target < n_classes); int index = b * sample_size + cur_target * map_size + elem; gradInput_data[index] = -(weights ? weights_data[cur_target] : 1.0f) / normalize * THTensor_fastGet1d(gradOutput, 0); } } THIndexTensor_(free)(target); if (weights) THTensor_(free)(weights); } #undef INITIAL_CHECK #endif