source
stringlengths
3
92
c
stringlengths
26
2.25M
mkldnn_requantize-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* \file mkldnn_requantize-inl.h * \brief * \author Jin Huang, Xinyu Chen */ #ifndef MXNET_OPERATOR_QUANTIZATION_MKLDNN_MKLDNN_REQUANTIZE_INL_H_ #define MXNET_OPERATOR_QUANTIZATION_MKLDNN_MKLDNN_REQUANTIZE_INL_H_ #if MXNET_USE_ONEDNN == 1 #include <algorithm> #include <string> #include <vector> #include "../../nn/mkldnn/mkldnn_base-inl.h" #include "../requantize-inl.h" namespace mxnet { namespace op { template <typename DstType> static void MKLDNNRequantizeForwardKer(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs, const float real_range) { using namespace mshadow; using namespace mxnet_op; using red::limits::MaxValue; using red::limits::MinValue; typedef int32_t SrcDType; // check shapes size_t i_dim = inputs[0].shape().ndim(); size_t o_dim = outputs[0].shape().ndim(); CHECK_EQ(i_dim, o_dim); float first_quantized_range = MinAbs(MinValue<SrcDType>(), MaxValue<SrcDType>()); float first_real_range = MaxAbs(*inputs[1].data().dptr<float>(), *inputs[2].data().dptr<float>()); float first_scale = first_real_range / first_quantized_range; float second_real_range = real_range; float second_quantized_range = 0.f; if (std::is_same<DstType, int8_t>::value) { second_quantized_range = MinAbs(MaxValue<DstType>(), MinValue<DstType>()); *outputs[1].data().dptr<float>() = -second_real_range; *outputs[2].data().dptr<float>() = second_real_range; } else if (std::is_same<DstType, uint8_t>::value) { second_quantized_range = MaxValue<DstType>(); *outputs[1].data().dptr<float>() = 0.f; *outputs[2].data().dptr<float>() = second_real_range; } else { LOG(FATAL) << "Unsupported requantize output type"; } float second_scale = second_quantized_range / second_real_range; float scale = first_scale * second_scale; mkldnn::primitive_attr attr; const int mask = 0; std::vector<float> scales = {scale}; attr.set_output_scales(mask, scales); mkldnn::engine cpu_engine = mxnet::CpuEngine::Get()->get_engine(); NDArray in_buffer = inputs[0]; if (inputs[0].IsView() && inputs[0].IsMKLDNNData()) in_buffer = inputs[0].Reorder2Default(); auto i_mem = in_buffer.GetMKLDNNData(); auto i_desc = i_mem->get_desc(); auto o_desc = i_desc; o_desc.data.data_type = get_mkldnn_type_t<DstType>(); auto reorder_pd = mkldnn::reorder::primitive_desc(cpu_engine, i_desc, cpu_engine, o_desc, attr); auto o_mem = CreateMKLDNNMem(outputs[0], o_desc, req[0]); MKLDNNStream::Get()->RegisterPrimArgs( mkldnn::reorder(reorder_pd), {{MKLDNN_ARG_FROM, *i_mem}, {MKLDNN_ARG_TO, *o_mem.second}}); CommitOutput(outputs[0], o_mem); MKLDNNStream::Get()->Submit(); } static void MKLDNNRequantizeForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { using namespace mshadow; using namespace mxnet_op; using red::limits::MaxValue; using red::limits::MinValue; typedef int32_t SrcDType; typedef int8_t DstDType; const RequantizeParam& param = nnvm::get<RequantizeParam>(attrs.parsed); float real_range; // Model is calibrated if (param.min_calib_range.has_value() && param.max_calib_range.has_value()) { real_range = MaxAbs(param.min_calib_range.value(), param.max_calib_range.value()); // Model is not calibrated } else { NDArray in_buffer = inputs[0].Reorder2Default(); auto in_ptr = in_buffer.data().dptr<SrcDType>(); auto nthreads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); SrcDType data_min = MaxValue<SrcDType>(); SrcDType data_max = MinValue<SrcDType>(); std::vector<SrcDType> data_maxs(nthreads, data_max); std::vector<SrcDType> data_mins(nthreads, data_min); #pragma omp parallel for num_threads(nthreads) for (index_t i = 0; i < static_cast<index_t>(in_buffer.shape().Size()); i++) { int tid = omp_get_thread_num(); if (in_ptr[i] > data_maxs[tid]) data_maxs[tid] = in_ptr[i]; if (in_ptr[i] < data_mins[tid]) data_mins[tid] = in_ptr[i]; } for (index_t i = 0; i < nthreads; i++) { if (data_maxs[i] > data_max) data_max = data_maxs[i]; if (data_mins[i] < data_min) data_min = data_mins[i]; } float src_range = MinAbs(MinValue<SrcDType>(), MaxValue<SrcDType>()); SrcDType data_range = MaxAbs(data_min, data_max); float data_scale = MaxAbs(*inputs[1].data().dptr<float>(), *inputs[2].data().dptr<float>()); real_range = data_range * data_scale / src_range; } auto out_type = GetQuantizeOutputType(param); if (out_type == mshadow::kUint8) { MKLDNNRequantizeForwardKer<uint8_t>(attrs, ctx, inputs, req, outputs, real_range); } else if (out_type == mshadow::kInt8) { MKLDNNRequantizeForwardKer<int8_t>(attrs, ctx, inputs, req, outputs, real_range); } else { LOG(FATAL) << "mkldnn requantize op only supports int8 and uint8 as output type"; } } } // namespace op } // namespace mxnet #endif // MXNET_USE_ONEDNN == 1 #endif // MXNET_OPERATOR_QUANTIZATION_MKLDNN_MKLDNN_REQUANTIZE_INL_H_
LA3.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #define N 500 long long int a[N][N], b[N][N], c[N][N],c1[N][N],c2[N][N],c3[N][N],c4[N][N],c5[N][N]; int main (int argc, char *argv[]) { omp_set_num_threads(8); int tid, nthreads, i, j, k; double start,end,start1,end1,start2,end2,start3,end3,start4,end4,start5,end5; FILE *fptr,*fptr1; fptr = fopen("matA_500.txt","r"); if(fptr==NULL){exit(1);} fptr1 = fopen("matB_500.txt","r"); if(fptr1==NULL){exit(1);} /*** Create a parallel region explicitly scoping all variables ***/ #pragma omp parallel shared(a,b,c,nthreads) private(tid,i,j,k) { tid = omp_get_thread_num(); #pragma omp single { nthreads = omp_get_num_threads(); printf("Starting matrix multiple example with %d threads\n",nthreads); printf("Initializing matrices...\n"); } /*** Initialize matrices ***/ #pragma omp for for (i=0; i<N; i++){ for (j=0; j<N; j++){ fscanf(fptr,"%lld",&a[i][j]); } } #pragma omp for for (i=0; i<N; i++){ for (j=0; j<N; j++){ fscanf(fptr1,"%lld",&b[i][j]); } } #pragma omp for for (i=0; i<N; i++){ for (j=0; j<N; j++){ c[i][j]= 0; } } #pragma omp for for (i=0; i<N; i++){ for (j=0; j<N; j++){ c1[i][j]= 0; } } #pragma omp for for (i=0; i<N; i++){ for (j=0; j<N; j++){ c2[i][j]= 0; } } #pragma omp for for (i=0; i<N; i++){ for (j=0; j<N; j++){ c3[i][j]= 0; } } #pragma omp for for (i=0; i<N; i++){ for (j=0; j<N; j++){ c4[i][j]= 0; } } #pragma omp for for (i=0; i<N; i++){ for (j=0; j<N; j++){ c5[i][j]= 0; } } start = omp_get_wtime(); #pragma omp for for (i=0; i<N; i++) { //printf("Thread=%d did row=%d\n",tid,i); for(j=0; j<N; j++) for (k=0; k<N; k++) c[i][j] += a[i][k] * b[k][j]; } /*** End of parallel region ***/ end = omp_get_wtime() - start; start1 = omp_get_wtime(); #pragma omp for for (j=0; j<N; j++) { //printf("Thread=%d did row=%d\n",tid,i); for(k=0; k<N; k++) for (i=0; i<N; i++) c1[i][j] += a[i][k] * b[k][j]; } /*** End of parallel region ***/ end1 = omp_get_wtime() - start1; start2 = omp_get_wtime(); #pragma omp for for (k=0; k<N; k++) { //printf("Thread=%d did row=%d\n",tid,i); for(j=0; j<N; j++) for (i=0; i<N; i++) c2[i][j] += a[i][k] * b[k][j]; } /*** End of parallel region ***/ end2 = omp_get_wtime() - start2; start3 = omp_get_wtime(); #pragma omp for for (k=0; k<N; k++) { //printf("Thread=%d did row=%d\n",tid,i); for(i=0; i<N; i++) for (j=0; j<N; j++) c3[i][j] += a[i][k] * b[k][j]; } /*** End of parallel region ***/ end3 = omp_get_wtime() - start3; start4 = omp_get_wtime(); #pragma omp for for (j=0; j<N; j++) { //printf("Thread=%d did row=%d\n",tid,i); for(k=0; k<N; k++) for (i=0; i<N; i++) c4[i][j] += a[i][k] * b[k][j]; } /*** End of parallel region ***/ end4 = omp_get_wtime() - start4; start5 = omp_get_wtime(); #pragma omp for for (i=0; i<N; i++) { //printf("Thread=%d did row=%d\n",tid,i); for(k=0; k<N; k++) for (j=0; j<N; j++) c5[i][j] += a[i][k] * b[k][j]; }} /*** End of parallel region ***/ end5 = omp_get_wtime() - start5; /*printf("Result Matrix:\n"); for (i=0; i<N; i++) { for (j=0; j<N; j++) printf("%lld ", c1[i][j]); printf("\n"); } printf("******************************************************\n"); printf("Result Matrix:\n"); for (i=0; i<N; i++) { for (j=0; j<N; j++) printf("%lld ", c2[i][j]); printf("\n"); } printf("******************************************************\n");*/ printf("N : %d\n",N); printf("%.6g\n",end); printf("%.6g\n",end1); printf("%.6g\n",end2); printf("%.6g\n",end3); printf("%.6g\n",end4); printf("%.6g\n",end5); return(0); }
GB_unaryop__abs_uint64_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint64_fp64 // op(A') function: GB_tran__abs_uint64_fp64 // C type: uint64_t // A type: double // cast: uint64_t cij ; GB_CAST_UNSIGNED(cij,aij,64) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint64_t z ; GB_CAST_UNSIGNED(z,x,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint64_fp64 ( uint64_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB068-restrictpointer2-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The restrict type qualifier is an indication to the compiler that, if the memory addressed by the restrict -qualified pointer is modified, no other pointer will access that same memory. If a particular chunk of memory is not modified, it can be aliased through more than one restricted pointer. A C99 restrict feature. For gcc, you must use -std=c99 to compile this program. */ #include <stdlib.h> #include <stdio.h> void foo(int n, int * restrict a, int * restrict b, int * restrict c) { int i; #pragma omp parallel for schedule(dynamic) for (i = 0; i < n; i++) a[i] = b[i] + c[i]; } int main() { int n = 1000; int * a , *b, *c; a = (int*) malloc (n* sizeof (int)); if (a ==0) { fprintf (stderr, "skip the execution due to malloc failures.\n"); return 1; } b = (int*) malloc (n* sizeof (int)); if (b ==0) { fprintf (stderr, "skip the execution due to malloc failures.\n"); return 1; } c = (int*) malloc (n* sizeof (int)); if (c ==0) { fprintf (stderr, "skip the execution due to malloc failures.\n"); return 1; } foo (n, a, b,c); free (a); free (b); free (c); return 0; }
hsrp_fmt_plug.c
/* * Cracker for MD5 authentication in HSRP, VRRP and GLBP. * http://www.rfc-editor.org/rfc/rfc1828.txt * * This is dedicated to Darya. You inspire me. * * This software is Copyright (c) 2014, Dhiru Kholia <dhiru [at] openwall.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_hsrp; #elif FMT_REGISTERS_H john_register_one(&fmt_hsrp); #else #include <string.h> #ifdef _OPENMP #include <omp.h> // OMP_SCALE tuned on core i7 4-core HT // 2048 - 8850k 6679k // 4096 - 10642k 7278k // 8192 - 10489k 7532k // 16k - 10413k 7694k // 32k - 12111k 7803k ** this value chosen // 64k - 12420k 6523k // 128k - 12220k 6741k #define OMP_SCALE 32768 #endif #include "arch.h" #include "md5.h" #include "misc.h" #include "common.h" #include "formats.h" #include "johnswap.h" #include "params.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "hsrp" #define FORMAT_NAME "\"MD5 authentication\" HSRP, VRRP, GLBP" #define FORMAT_TAG "$hsrp$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 55 // Must fit in a single MD5 block #define BINARY_SIZE 16 #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_SIZE sizeof(struct custom_salt) #define REAL_SALT_SIZE 50 #define SALT_ALIGN sizeof(int) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define HEXCHARS "0123456789abcdef" static struct fmt_tests tests[] = { {"$hsrp$000004030a64010000000000000000000a000064041c010000000a0000140000000000000000000000000000000000000000$52e1db09d18d695b8fefb3730ff8d9d6", "password12345"}, {"$hsrp$000004030a5a01000000000000000000ac102801041c01000000ac1028140000000000000000000000000000000000000000$f15dfa631a0679e0801f8e6b0c0c17ac", "openwall"}, {"$hsrp$000010030a64010000000000000000000a000064041c010000000a0000140000000000000000000000000000000000000000$f02fc41b1b516e2d1261d8800d39ccea", "openwall12345"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *saved_len; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static struct custom_salt { int length; unsigned char salt[2048]; // be safe ;) } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_num_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); saved_len = mem_calloc_tiny(sizeof(*saved_len) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q = NULL; int len; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; q = strrchr(ciphertext, '$'); if (!q || q+1==p) return 0; q = q + 1; // if ((q - p - 1) > REAL_SALT_SIZE * 2) // return 0; len = strspn(q, HEXCHARS); if (len != BINARY_SIZE * 2 || len != strlen(q)) return 0; if (strspn(p, HEXCHARS) != q - p - 1) return 0; if (q-p > (sizeof(cur_salt->salt)-1)*2) return 0; return 1; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; int i, len; memset(&cs, 0, SALT_SIZE); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; len = (strrchr(ciphertext, '$') - ciphertext) / 2; for (i = 0; i < len; i++) cs.salt[i] = (atoi16[ARCH_INDEX(ciphertext[2 * i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2 * i + 1])]; cs.length = len; return &cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } // this place would normally contain "print_hex" but I do not want to piss of magnum (yet again) #define PUTCHAR(buf, index, val) ((unsigned char*)(buf))[index] = (val) static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { uint32_t block[16] = { 0 }; int len = saved_len[index]; MD5_CTX ctx; MD5_Init(&ctx); // key + keyfill memcpy(block, saved_key[index], len); PUTCHAR(block, len, 0x80); block[14] = len << 3; #if (ARCH_LITTLE_ENDIAN==0) block[14] = JOHNSWAP(block[14]); #endif MD5_Update(&ctx, (unsigned char*)block, 64); // data MD5_Update(&ctx, cur_salt->salt, cur_salt->length); // key (again) MD5_Update(&ctx, saved_key[index], len); MD5_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (((ARCH_WORD_32*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void hsrp_set_key(char *key, int index) { saved_len[index] = strlen(key); /* strncpy will pad with zeros, which is needed */ strncpy(saved_key[index], key, sizeof(saved_key[0])); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_hsrp = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, set_salt, hsrp_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif
strip_fmt_plug.c
/* STRIP cracker patch for JtR. Hacked together during September of * 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_strip; #elif FMT_REGISTERS_H john_register_one(&fmt_strip); #else #include <string.h> #include <stdint.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "aes.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "pbkdf2_hmac_sha1.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 4 // tuned on core i7 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "STRIP" #define FORMAT_NAME "Password Manager" #define FORMAT_TAG "$strip$*" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 0 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN 1 #define SALT_ALIGN 1 #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define ITERATIONS 4000 #define FILE_HEADER_SZ 16 #define SQLITE_FILE_HEADER "SQLite format 3" #define HMAC_SALT_MASK 0x3a #define FAST_PBKDF2_ITER 2 #define SQLITE_MAX_PAGE_SIZE 65536 static struct fmt_tests strip_tests[] = { /* test vector created by STRIP for Windows */ {"$strip$*66cd7a4ff7716f7b86cf587ce18eb39518e096eb152615ada8d007d9f035c20c711e62cbde96d8c3aad2a4658497a6119addc97ed3c970580cd666f301c63ce041a1748ee5c3861ada3cd6ee75b5d68891f731b3c2e3294b08e10ce3c23c2bfac158f8c45d0332791f64d1e3ad55e936d17a42fef5228e713b8188050c9a61c7f026af6203172cf2fc54c8b439e2260d7a00a4156713f92f8466de5c05cd8701e0d3d9cb3f392ae918e6900d5363886d4e1ed7e90da76b180ef9555c1cd358f6d1ee3755a208fee4d5aa1c776a0888200b21a3da6614d5fe2303e78c09563d862d19deecdc9f0ec7fbc015689a74f4eb477d9f22298b1b3f866ca4cb772d74821a1f8d03fd5fd0d020ffd41dd449b431ddf3bbfba3399311d9827be428202ee56e2c2a4e91f3415b4282c691f16cd447cf877b576ab963ea4ea3dc7d8c433febdc36607fd2372c4165abb59e3e75c28142f1f2575ecca6d97a9f782c3410151f8bbcbc65a42fdc59fdc4ecd8214a2bbd3a4562fac21c48f7fc69a4ecbcf664b4e435d7734fde5494e4d80019a0302e22565ed6a49b29cecf81077fd92f0105d18a421e04ee0deaca6389214abc7182db7003da7e267816531010b236eadfea20509718ff743ed5ad2828b6501dd84a371feed26f0514bbda69118a69048ebb71e3e2c54fb918422f1320724a353fe8d81a562197454d2c67443be8a4008a756aec0998386a5fd48e379befe966b42dfa6684ff049a61b51de5f874a12ab7d9ab33dc84738e036e294c22a07bebcc95be9999ab988a1fa1c944ab95be970045accb661249be8cc34fcc0680cb1aff8dfee21f586c571b1d09bf370c6fc131418201e0414acb2e4005b0b6fda1f3d73b7865823a008d1d3f45492a960dbdd6331d78d9e2e6a368f08ee3456b6d78df1d5630f825c536fff60bad23fb164d151d80a03b0c78edbfdee5c7183d7527e289428cf554ad05c9d75011f6b233744f12cd85fbb62f5d1ae22f43946f24a483a64377bf3fa16bf32cea1ab4363ef36206a5989e97ff847e5d645791571b9ecd1db194119b7663897b9175dd9cc123bcc7192eaf56d4a2779c502700e88c5c20b962943084bcdf024dc4f19ca649a860bdbd8f8f9b4a9d03027ae80f4a3168fc030859acb08a871950b024d27306cdc1a408b2b3799bb8c1f4b6ac3593aab42c962c979cd9e6f59d029f8d392315830cfcf4066bf03e0fc5c0f3630e9c796ddb38f51a2992b0a61d6ef115cb34d36c7d94b6c9d49dfe8d064d92b483f12c14fa10bf1170a575e4571836cef0a1fbf9f8b6968abda5e964bb16fd62fde1d1df0f5ee9c68ce568014f46f1717b6cd948b0da9a6f4128da338960dbbcbc9c9c3b486859c06e5e2338db3458646054ccd59bb940c7fc60cda34f633c26dde83bb717b75fefcbd09163f147d59a6524752a47cd94", "openwall"}, /* test vector created by STRIP Password Manager (for Android) */ {"$strip$*78adb0052203efa1bd1b02cac098cc9af1bf7e84ee2eaebaaba156bdcfe729ab12ee7ba8a84e79d11dbd67eee82bcb24be99dbd5db7f4c3a62f188ce4b48edf4ebf6cbf5a5869a61f83fbdb3cb4bf79b3c2c898f422d71eab31afdf3a8d4e97204dedbe7bd8b5e4c891f4880ca917c8b2f67ca06035e7f8db1fae91c45db6a08adf96ec5ddcb9e60b648acf883a7550ea5b67e2d27623e8de315f29cba48b8b1d1bde62283615ab88293b29ad73ae404a42b13e35a95770a504d81e335c00328a6290e411fa2708a697fab7c2d17ff5d0a3fe508118bb43c3d5e72ef563e0ffd337f559085a1373651ca2b8444f4437d8ac0c19aa0a24b248d1d283062afbc3b4ccc9b1861f59518eba771f1d9707affe0222ff946da7c014265ab4ba1f6417dd22d92e4adf5b7e462588f0a42e061a3dad041cbb312d8862aed3cf490df50b710a695517b0c8771a01f82db09231d392d825f5667012e349d2ed787edf8448bbb1ff548bee3a33392cd209e8b6c1de8202f6527d354c3858b5e93790c4807a8967b4c0321ed3a1d09280921650ac33308bd04f35fb72d12ff64a05300053358c5d018a62841290f600f7df0a7371b6fac9b41133e2509cb90f774d02e7202185b9641d063ed38535afb81590bfd5ad9a90107e4ff6d097ac8f35435f307a727f5021f190fc157956414bfce4818a1e5c6af187485683498dcc1d56c074c534a99125c6cfbf5242087c6b0ae10971b0ff6114a93616e1a346a22fcac4c8f6e5c4a19f049bbc7a02d2a31d39548f12440c36dbb253299a11b630e8fd88e7bfe58545d60dce5e8566a0a190d816cb775bd859b8623a7b076bce82c52e9cff6a2d221f9d3fd888ac30c7e3000ba8ed326881ffe911e27bb8982b56caa9a12065721269976517d2862e4a486b7ed143ee42c6566bba04c41c3371220f4843f26e328c33a5fb8450dadc466202ffc5c49cc95827916771e49e0602c3f8468537a81cf2fa1db34c090fccab6254436c05657cf29c3c415bb22a42adeac7870858bf96039b81c42c3d772509fdbe9a94eaf99ee9c59bac3ea97da31e9feac14ed53a0af5c5ebd2e81e40a5140da4f8a44048d5f414b0ba9bfb8024c7abaf5346fde6368162a045d1196f81d55ed746cc6cbd7a7c9cdbfa392279169626437da15a62730c2990772e106a5b84a60edaa6c5b8030e1840aa6361f39a12121a1e33b9e63fb2867d6241de1fb6e2cd1bd9a78c7122258d052ea53a4bff4e097ed49fc17b9ec196780f4c6506e74a5abb10c2545e6f7608d2eefad179d54ad31034576be517affeb3964c65562538dd6ea7566a52c75e4df593895539609a44097cb6d31f438e8f7717ce2bf777c76c22d60b15affeb89f08084e8f316be3f4aefa4fba8ec2cc1dc845c7affbc0ce5ebccdbfde5ebab080a285f02bdfb76c6dbd243e5ee1e5d", "p@$$w0rD"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked; static struct custom_salt { unsigned char salt[16]; unsigned char data[1024]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; int extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; /* skip over "$strip$" and first '*' */ if ((p = strtokm(ctcopy, "*")) == NULL) /* salt + data */ goto err; if (hexlenl(p, &extra) != 2048 || extra) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += FORMAT_TAG_LEN; /* skip over "$strip$" and first '*' */ p = strtokm(ctcopy, "*"); for (i = 0; i < 16; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; for (; i < 1024; i++) cs.data[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } /* verify validity of page */ static int verify_page(unsigned char *page1) { uint32_t pageSize; uint32_t usableSize; //if (memcmp(page1, SQLITE_FILE_HEADER, 16) != 0) { // return -1; //} if (page1[19] > 2) { return -1; } if (memcmp(&page1[21], "\100\040\040", 3) != 0) { return -1; } pageSize = (page1[16] << 8) | (page1[17] << 16); if (((pageSize - 1) & pageSize) != 0 || pageSize > SQLITE_MAX_PAGE_SIZE || pageSize <= 256) { return -1; } if ((pageSize & 7) != 0) { return -1; } usableSize = pageSize - page1[20]; if (usableSize < 480) { return -1; } return 0; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { unsigned char master[MAX_KEYS_PER_CRYPT][32]; unsigned char output[24]; unsigned char *iv_in; unsigned char iv_out[16]; int size,i; int page_sz = 1008; /* 1024 - strlen(SQLITE_FILE_HEADER) */ int reserve_sz = 16; /* for HMAC off case */ AES_KEY akey; #ifdef SIMD_COEF_32 int len[MAX_KEYS_PER_CRYPT]; unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT]; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { len[i] = strlen(saved_key[i+index]); pin[i] = (unsigned char*)saved_key[i+index]; pout[i] = master[i]; } pbkdf2_sha1_sse((const unsigned char **)pin, len, cur_salt->salt, 16, ITERATIONS, pout, 32, 0); #else pbkdf2_sha1((unsigned char *)saved_key[index], strlen(saved_key[index]), cur_salt->salt, 16, ITERATIONS, master[0], 32, 0); #endif for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { //memcpy(output, SQLITE_FILE_HEADER, FILE_HEADER_SZ); size = page_sz - reserve_sz; iv_in = cur_salt->data + size + 16; memcpy(iv_out, iv_in, 16); AES_set_decrypt_key(master[i], 256, &akey); /* * decrypting 8 bytes from offset 16 is enough since the * verify_page function looks at output[16..23] only. */ AES_cbc_encrypt(cur_salt->data + 16, output + 16, 8, &akey, iv_out, AES_DECRYPT); if (verify_page(output) == 0) { cracked[index+i] = 1; } else cracked[index+i] = 0; } } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static void strip_set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_strip = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { NULL }, { FORMAT_TAG }, strip_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, strip_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
NETLM_fmt_plug.c
/* * NETLM_fmt.c -- LM Challenge/Response * * Written by JoMo-Kun <jmk at foofus.net> in 2007 * and placed in the public domain. * * Performance and OMP fixes by magnum 2011 * * This algorithm is designed for performing brute-force cracking of the LM * challenge/response pairs exchanged during network-based authentication * attempts [1]. The captured challenge/response pairs from these attempts * should be stored using the L0phtCrack 2.0 LC format, specifically: * username:unused:unused:lm response:ntlm response:challenge. For example: * * CORP\Administrator:::25B2B477CE101D83648BB087CE7A1C217F51C7FC64C0EBB1:: * C8BD0C1630A9ECF7A95F494A8F0B2CB4A3F25B1225514304:1122334455667788 * * It should be noted that a LM authentication response is not same as a LM * password hash, which can be extracted using tools such as FgDump [2]. LM * responses can be gathered via normal network capture or via tools which * perform layer 2 attacks, such as Ettercap [3] and Cain [4]. The responses can * also be harvested using a modified Samba service [5] in conjunction with * some trickery to convince the user to connect to it. I leave what that * trickery may actually be as an exercise for the reader (HINT: Karma, NMB * broadcasts, IE, Outlook, social engineering, ...). * * [1] http://davenport.sourceforge.net/ntlm.html#theLmResponse * [2] http://www.foofus.net/~fizzgig/fgdump/ * [3] http://ettercap.sourceforge.net/ * [4] http://www.oxid.it/cain.html * [5] http://www.foofus.net/jmk/smbchallenge.html * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_NETLM; #elif FMT_REGISTERS_H john_register_one(&fmt_NETLM); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifdef __MIC__ #ifndef OMP_SCALE #define OMP_SCALE 1024 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 131072 // core i7 no HT #endif #endif // __MIC__ #endif #include "misc.h" #include "common.h" #include "formats.h" #include "memory.h" #include "unicode.h" #include <openssl/des.h> #include "memdbg.h" #ifndef uchar #define uchar unsigned char #endif #define FORMAT_LABEL "netlm" #define FORMAT_NAME "LM C/R" #define FORMAT_TAG "$NETLM$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "DES 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 14 #define PARTIAL_BINARY_SIZE 8 #define BINARY_SIZE 24 #define BINARY_ALIGN 4 #define SALT_SIZE 8 #define SALT_ALIGN 4 #define CIPHERTEXT_LENGTH 48 #define TOTAL_LENGTH 8 + 2 * SALT_SIZE + CIPHERTEXT_LENGTH #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests[] = { {"", "G3RG3P00!", {"User", "", "", "6E1EC36D3417CE9E09A4424309F116C4C991948DAEB4ADAD", "ntlm-hash", "1122334455667788"} }, {"$NETLM$1122334455667788$16A7FDFE0CA109B937BFFB041F0E5B2D8B94A97D3FCA1A18", "hiyagerge"}, {"$NETLM$1122334455667788$B3A1B87DBBD4DF3CFA296198DD390C2F4E2E93C5C07B1D8B", "MEDUSAFGDUMP12"}, {"$NETLM$1122334455667788$0836F085B124F33895875FB1951905DD2F85252CC731BB25", "cory21"}, {"$NETLM$1122334455667788$6E1EC36D3417CE9E09A4424309F116C4C991948DAEB4ADAD", "G3RG3P00!"}, {"", "HIYAGERGE", {"User", "", "", "16A7FDFE0CA109B937BFFB041F0E5B2D8B94A97D3FCA1A18", "ntlm-hash", "1122334455667788"} }, {"", "MEDUSAFGDUMP12", {"User", "", "", "B3A1B87DBBD4DF3CFA296198DD390C2F4E2E93C5C07B1D8B", "ntlm-hash", "1122334455667788"} }, {"", "CORY21", {"User", "", "", "0836F085B124F33895875FB1951905DD2F85252CC731BB25", "ntlm-hash", "1122334455667788"} }, // repeat in exactly the same format that is used in john.pot (lower case hex) {"$NETLM$1122334455667788$0836f085b124f33895875fb1951905dd2f85252cc731bb25", "CORY21"}, {NULL} }; static uchar (*saved_key)[21]; static uchar (*saved_plain)[PLAINTEXT_LENGTH + 1]; static uchar (*output)[PARTIAL_BINARY_SIZE]; static uchar *challenge; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); saved_plain = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_plain)); output = mem_calloc(self->params.max_keys_per_crypt, sizeof(*output)); } static void done(void) { MEM_FREE(output); MEM_FREE(saved_plain); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *pos; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0; if (strlen(ciphertext) < TOTAL_LENGTH) return 0; if (ciphertext[23] != '$') return 0; if (strncmp(&ciphertext[24 + 2 * SALT_SIZE], "00000000000000000000000000000000", 32) == 0) return 0; // This is NTLM ESS C/R for (pos = &ciphertext[24]; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++) ; if (!*pos && pos - ciphertext - 24 == CIPHERTEXT_LENGTH) return 1; else return 0; } static char *prepare(char *split_fields[10], struct fmt_main *self) { char *cp; if (!strncmp(split_fields[1], FORMAT_TAG, FORMAT_TAG_LEN)) return split_fields[1]; if (!split_fields[3]||!split_fields[4]||!split_fields[5]) return split_fields[1]; if (strlen(split_fields[3]) != CIPHERTEXT_LENGTH) return split_fields[1]; // if LMresp == NTresp then it's NTLM-only, not LM if (!strncmp(split_fields[3], split_fields[4], 48)) return split_fields[1]; // this string suggests we have an improperly formatted NTLMv2 if (strlen(split_fields[4]) > 31) { if (!strncmp(&split_fields[4][32], "0101000000000000", 16)) return split_fields[1]; } cp = mem_alloc(7+strlen(split_fields[3])+1+strlen(split_fields[5])+1); sprintf(cp, "%s%s$%s", FORMAT_TAG, split_fields[5], split_fields[3]); if (valid(cp,self)) { char *cp2 = str_alloc_copy(cp); MEM_FREE(cp); return cp2; } MEM_FREE(cp); return split_fields[1]; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TOTAL_LENGTH + 1]; memset(out, 0, TOTAL_LENGTH + 1); memcpy(out, ciphertext, TOTAL_LENGTH); strlwr(&out[FORMAT_TAG_LEN]); /* Exclude: $NETLM$ */ return out; } static void *get_binary(char *ciphertext) { static uchar *binary; int i; if (!binary) binary = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); ciphertext+=24; for (i=0; i<BINARY_SIZE; i++) { binary[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])])<<4; binary[i] |= (atoi16[ARCH_INDEX(ciphertext[i*2+1])]); } return binary; } static inline void setup_des_key(unsigned char key_56[], DES_key_schedule *ks) { DES_cblock key; key[0] = key_56[0]; key[1] = (key_56[0] << 7) | (key_56[1] >> 1); key[2] = (key_56[1] << 6) | (key_56[2] >> 2); key[3] = (key_56[2] << 5) | (key_56[3] >> 3); key[4] = (key_56[3] << 4) | (key_56[4] >> 4); key[5] = (key_56[4] << 3) | (key_56[5] >> 5); key[6] = (key_56[5] << 2) | (key_56[6] >> 6); key[7] = (key_56[6] << 1); DES_set_key(&key, ks); } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; DES_key_schedule ks; int i = 0; #ifdef _OPENMP #pragma omp parallel for default(none) private(i, ks) shared(count, output, challenge, saved_key) #endif #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for(i = 0; i < count; i++) #endif { /* Just do a partial binary, the first DES operation */ setup_des_key(saved_key[i], &ks); DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)output[i], &ks, DES_ENCRYPT); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for(index=0; index<count; index++) #endif if (!memcmp(output[index], binary, PARTIAL_BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(output[index], binary, PARTIAL_BINARY_SIZE); } static int cmp_exact(char *source, int index) { DES_key_schedule ks; uchar binary[BINARY_SIZE]; /* NULL-pad 16-byte LM hash to 21-bytes (we postponed it until now) */ memset(&saved_key[index][16], 0, 5); /* Split padded LM hash into three 7-byte thirds DES-encrypt challenge using each third as a key Concatenate three 8-byte resulting values to form 24-byte LM response */ setup_des_key(saved_key[index], &ks); DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)binary, &ks, DES_ENCRYPT); setup_des_key(&saved_key[index][7], &ks); DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)&binary[8], &ks, DES_ENCRYPT); setup_des_key(&saved_key[index][14], &ks); DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)&binary[16], &ks, DES_ENCRYPT); return (!memcmp(binary, get_binary(source), BINARY_SIZE)); } static void *get_salt(char *ciphertext) { static unsigned char *binary_salt; int i; if (!binary_salt) binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD); ciphertext += FORMAT_TAG_LEN; for (i = 0; i < SALT_SIZE; ++i) binary_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])]; return (void*)binary_salt; } static void set_salt(void *salt) { challenge = salt; } static void netlm_set_key(char *key, int index) { const unsigned char magic[] = {0x4b, 0x47, 0x53, 0x21, 0x40, 0x23, 0x24, 0x25}; DES_key_schedule ks; strncpy((char *)saved_plain[index], key, sizeof(saved_plain[index])); saved_plain[index][sizeof(saved_plain[index])-1] = 0; /* Upper-case password */ enc_strupper((char*)saved_plain[index]); /* Generate 16-byte LM hash */ setup_des_key(saved_plain[index], &ks); DES_ecb_encrypt((DES_cblock*)magic, (DES_cblock*)saved_key[index], &ks, DES_ENCRYPT); setup_des_key(&saved_plain[index][7], &ks); DES_ecb_encrypt((DES_cblock*)magic, (DES_cblock*)&saved_key[index][8], &ks, DES_ENCRYPT); /* NULL-padding the 16-byte LM hash to 21-bytes is done in cmp_exact */ } static char *get_key(int index) { return (char*)saved_plain[index]; } static int salt_hash(void *salt) { return *(ARCH_WORD_32 *)salt & (SALT_HASH_SIZE - 1); } static int get_hash_0(int index) { return *(ARCH_WORD_32 *)output[index] & PH_MASK_0; } static int get_hash_1(int index) { return *(ARCH_WORD_32 *)output[index] & PH_MASK_1; } static int get_hash_2(int index) { return *(ARCH_WORD_32 *)output[index] & PH_MASK_2; } static int get_hash_3(int index) { return *(ARCH_WORD_32 *)output[index] & PH_MASK_3; } static int get_hash_4(int index) { return *(ARCH_WORD_32 *)output[index] & PH_MASK_4; } static int get_hash_5(int index) { return *(ARCH_WORD_32 *)output[index] & PH_MASK_5; } static int get_hash_6(int index) { return *(ARCH_WORD_32 *)output[index] & PH_MASK_6; } struct fmt_main fmt_NETLM = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_8_BIT | FMT_TRUNC | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_OMP_BAD, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, netlm_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
elemwise_binary_scalar_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file elemwise_binary_scalar_op.h * \brief Function definition of elementwise binary scalar operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_ #include <mxnet/operator_util.h> #include <limits> #include <vector> #include <utility> #include <string> #include "../mshadow_op.h" #include "../elemwise_op_common.h" #include "elemwise_unary_op.h" namespace mxnet { namespace op { struct NumpyBinaryScalarParam : public dmlc::Parameter<NumpyBinaryScalarParam> { double scalar; bool is_int; DMLC_DECLARE_PARAMETER(NumpyBinaryScalarParam) { DMLC_DECLARE_FIELD(scalar).set_default(1).describe("Scalar input value"); DMLC_DECLARE_FIELD(is_int).set_default(true).describe( "Indicate whether scalar input is int type"); } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream scalar_s, is_int_s; scalar_s << std::setprecision(std::numeric_limits<double>::max_digits10) << scalar; is_int_s << is_int; (*dict)["scalar"] = scalar_s.str(); (*dict)["is_int"] = is_int_s.str(); } }; inline bool NumpyBinaryScalarType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed); bool scalar_is_int = param.is_int; if (common::is_int(in_attrs->at(0)) && !scalar_is_int) { TYPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::kFloat64); } else if (in_attrs->at(0) == mshadow::kBool) { TYPE_ASSIGN_CHECK(*out_attrs, 0, scalar_is_int ? mshadow::kInt64 : mshadow::kFloat64); } else { TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0)); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0)); } return out_attrs->at(0) != -1; } class BinaryScalarOp : public UnaryOp { /*! \brief Tensor operation against a scalar with a dense result */ template <typename OP, typename DType, typename IType> static void ComputeExDenseResultRsp(mshadow::Stream<cpu>* stream, const nnvm::NodeAttrs& attrs, const OpContext& ctx, const NDArray& input, const OpReqType req, const NDArray& output) { const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed); const double alpha = param.scalar; CHECK_EQ(output.shape(), input.shape()); const int64_t row_count = output.shape()[0]; const int64_t items_per_row = output.shape().Size() / row_count; const DType result_for_zero = OP::Map(DType(0), DType(alpha)); mshadow::Tensor<cpu, 1, DType> input_data = input.data().FlatTo1D<cpu, DType>(stream); mshadow::Tensor<cpu, 1, DType> output_data = output.data().FlatTo1D<cpu, DType>(stream); const int64_t sparse_row_count = input.aux_shape(rowsparse::kIdx).Size(); if (sparse_row_count != row_count) { mshadow::Tensor<cpu, 1, IType> row_indexes = input.aux_data(rowsparse::kIdx).FlatTo1D<cpu, IType>(stream); int64_t input_iter = 0; int64_t output_row = 0; IType next_input_row = 0; while (output_row < row_count) { next_input_row = input_iter < sparse_row_count ? int64_t(row_indexes[input_iter]) : row_count; // Split up into blocks of contiguous data and do those together // Do contiguous dense blocks const int64_t dense_block_count = next_input_row - output_row; if (dense_block_count > 0) { MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, cpu>::Launch( stream, items_per_row * dense_block_count, output_data.dptr_ + items_per_row * output_row, result_for_zero); }); output_row += dense_block_count; continue; } // Do contiguous sparse blocks int64_t next_non_contiguous_sparse = input_iter; while (next_non_contiguous_sparse < sparse_row_count - 1) { if (row_indexes[next_non_contiguous_sparse + 1] != row_indexes[next_non_contiguous_sparse] + 1) { break; } ++next_non_contiguous_sparse; } const int64_t sparse_block_count = next_non_contiguous_sparse - input_iter + 1; if (sparse_block_count > 0) { MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( stream, items_per_row * sparse_block_count, &output_data.dptr_[items_per_row * output_row], &input_data.dptr_[items_per_row * input_iter], DType(alpha)); }); output_row += sparse_block_count; input_iter += sparse_block_count; continue; } } } else { // All rows exist (eventually we don't have to do complex // things to call GPU kernels because we don't need to access row indices) MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( stream, items_per_row * row_count, output_data.dptr_, input_data.dptr_, DType(alpha)); }); } } /*! \brief Tensor operation against a scalar with a dense result */ template <typename OP, typename DType, typename IType> static void ComputeExDenseResultRsp(mshadow::Stream<gpu>* stream, const nnvm::NodeAttrs& attrs, const OpContext& ctx, const NDArray& input, const OpReqType req, const NDArray& output) { LOG(FATAL) << "NOT IMPLEMENTED"; } /*! \brief Tensor operation against a scalar with a dense result */ template <typename OP, typename DType, typename IType, typename CType> static void ComputeExDenseResultCsr(mshadow::Stream<cpu>* stream, const nnvm::NodeAttrs& attrs, const OpContext& ctx, const NDArray& input, const OpReqType req, const NDArray& output) { CHECK_EQ(output.shape(), input.shape()); const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed); const double alpha = param.scalar; const DType dense_fill_val = OP::Map(DType(0), DType(alpha)); const TBlob column_indexes = input.aux_data(csr::kIdx); const size_t item_count = column_indexes.Size(); // Pre-fill dense with 0-input/output value FillDense<DType>( stream, output.shape().Size(), dense_fill_val, req, output.data().dptr<DType>()); mshadow::Tensor<cpu, 2, DType> out = AsRowise2D<DType>(stream, output.data()); if (item_count) { const DType* in = input.data().dptr<DType>(); const IType* column_indexes_ptr = column_indexes.dptr<IType>(); const auto row_count = static_cast<size_t>(input.shape()[0]); const TBlob row_starts = input.aux_data(csr::kIndPtr); const CType* row_starts_ptr = row_starts.dptr<CType>(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(row_count); ++i) { const bool last_row = i == static_cast<int>(row_count) - 1; // Split up into blocks of contiguous data and do those together const size_t row_item_start_iter = row_starts_ptr[i]; const size_t input_items_this_row = !last_row ? static_cast<size_t>(row_starts_ptr[i + 1]) - row_item_start_iter : item_count - row_item_start_iter; if (input_items_this_row) { const IType* this_row_column_indexes = column_indexes_ptr + row_item_start_iter; const DType* row_data_start = in + row_item_start_iter; DType* output_this_row = out[i].dptr_; // More overhead to use OMP for small loops, so don't if (input_items_this_row > 1000) { #pragma omp parallel for for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) { const IType col = this_row_column_indexes[j]; const DType val = row_data_start[j]; output_this_row[col] = OP::Map(val, DType(alpha)); } } else { for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) { const IType col = this_row_column_indexes[j]; const DType val = row_data_start[j]; output_this_row[col] = OP::Map(val, DType(alpha)); } } } } } } /*! \brief Tensor operation against a scalar with a dense result */ template <typename OP, typename DType, typename IType, typename CType> static void ComputeExDenseResultCsr(mshadow::Stream<gpu>* stream, const nnvm::NodeAttrs& attrs, const OpContext& ctx, const NDArray& input, const OpReqType req, const NDArray& output) { LOG(FATAL) << "NOT IMPLEMENTED"; } template <typename xpu, typename OP, typename DType, typename IType> static void ComputeExDenseResult(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const NDArray& input, const OpReqType req, const NDArray output) { mshadow::Stream<xpu>* stream = ctx.get_stream<xpu>(); CHECK_EQ(output.storage_type(), kDefaultStorage); switch (input.storage_type()) { case kRowSparseStorage: { ComputeExDenseResultRsp<OP, DType, IType>(stream, attrs, ctx, input, req, output); break; } case kCSRStorage: { MSHADOW_IDX_TYPE_SWITCH(input.aux_data(csr::kIndPtr).type_flag_, CType, { ComputeExDenseResultCsr<OP, DType, IType, CType>(stream, attrs, ctx, input, req, output); }); break; } default: CHECK(false) << "Unsupported sparse storage type"; break; } } public: template <typename OP> static void Compute_(const nnvm::NodeAttrs& attrs, const OpContext& ctx, mshadow::Stream<cpu>* s, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); using namespace mshadow; using namespace mshadow::expr; TBlob temp_tblob; const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed); bool scalar_is_int = param.is_int; const double alpha = param.scalar; MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { if ((common::is_int(inputs[0].type_flag_) && !scalar_is_int) || (inputs[0].type_flag_ == kBool)) { Tensor<cpu, 1, DType> temp_tensor = ctx.requested[0].get_space_typed<cpu, 1, DType>(Shape1(inputs[0].Size()), s); temp_tblob = TBlob(temp_tensor); CastCompute<cpu>(attrs, ctx, {inputs[0]}, {kWriteTo}, {temp_tblob}); } else { temp_tblob = inputs[0]; } MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( s, inputs[0].Size(), outputs[0].dptr<DType>(), temp_tblob.dptr<DType>(), DType(alpha)); }); }); } template <typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { mshadow::Stream<xpu>* s = ctx.get_stream<xpu>(); Compute_<OP>(attrs, ctx, s, inputs, req, outputs); } template <typename xpu, typename OP> static void ComputeInt(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); using namespace mshadow; using namespace mshadow::expr; Stream<xpu>* s = ctx.get_stream<xpu>(); const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed); const double alpha = param.scalar; MXNET_INT_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch( s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha)); }); }); } template <typename xpu, typename OP> static void ComputeLogic(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); using namespace mshadow; using namespace mshadow::expr; Stream<xpu>* s = ctx.get_stream<xpu>(); const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed); bool scalar_is_int = param.is_int; const double alpha = param.scalar; TBlob temp_tblob; if (common::is_int(inputs[0].type_flag_) && !scalar_is_int) { Tensor<xpu, 1, double> temp_tensor = ctx.requested[0].get_space_typed<xpu, 1, double>(Shape1(inputs[0].Size()), s); temp_tblob = TBlob(temp_tensor); CastCompute<xpu>(attrs, ctx, {inputs[0]}, {kWriteTo}, {temp_tblob}); } else { temp_tblob = inputs[0]; } MSHADOW_TYPE_SWITCH_EXT_WITH_BOOL(temp_tblob.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch( s, inputs[0].Size(), outputs[0].dptr<bool>(), temp_tblob.dptr<DType>(), DType(alpha)); }); }); } template <typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); const auto in_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); if (req[0] == kNullOp) { return; } if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) || (in_stype == kCSRStorage && out_stype == kCSRStorage)) { // csr -> csr, or rsp -> rsp UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>); } else if (out_stype == kDefaultStorage && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { MSHADOW_TYPE_SWITCH(outputs[0].data().type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { ComputeExDenseResult<xpu, OP, DType, IType>(attrs, ctx, inputs[0], req[0], outputs[0]); }); }); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template <typename xpu, typename OP> static void LogicComputeEx(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); const auto in_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); if (req[0] == kNullOp) { return; } if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) || (in_stype == kCSRStorage && out_stype == kCSRStorage)) { // csr -> csr, or rsp -> rsp UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template <typename OP> static void Backward_(const nnvm::NodeAttrs& attrs, mshadow::Stream<cpu>* s, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed); const double alpha = param.scalar; MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet::op::mxnet_op::Kernel< mxnet::op::mxnet_op::op_with_req<mxnet::op::mxnet_op::backward_grad_tuned<OP>, Req>, cpu>::Launch(s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>(), DType(alpha)); }); }); } template <typename xpu, typename OP> static void Backward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; Stream<xpu>* s = ctx.get_stream<xpu>(); Backward_<OP>(attrs, s, inputs, req, outputs); } }; #define MXNET_OPERATOR_REGISTER_BINARY_SCALAR(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(1) \ .set_num_outputs(1) \ .set_attr_parser(ParamParser<NumpyBinaryScalarParam>) \ .set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \ .set_attr<nnvm::FInferType>("FInferType", NumpyBinaryScalarType) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs) { \ return std::vector<std::pair<int, int> >{{0, 0}}; \ }) \ .set_attr<FResourceRequest>( \ "FResourceRequest", \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace}; \ }) \ .add_argument("data", "NDArray-or-Symbol", "source input") \ .add_arguments(NumpyBinaryScalarParam::__FIELDS__()) #if MXNET_USE_CUDA struct BinaryScalarRTCCompute { std::string OP; void operator()(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs); void operator()(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs); }; struct BinaryScalarRTCBackward { std::string OP; void operator()(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs); }; #endif } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
GB_reduce_to_scalar_template.c
//------------------------------------------------------------------------------ // GB_reduce_to_scalar_template: s=reduce(A), reduce a matrix to a scalar //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Reduce a matrix to a scalar, with typecasting and generic operators. // No panel is used. { //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- const int8_t *GB_RESTRICT Ab = A->b ; const int64_t *GB_RESTRICT Ai = A->i ; const GB_ATYPE *GB_RESTRICT Ax = (GB_ATYPE *) A->x ; int64_t anz = GB_NNZ_HELD (A) ; ASSERT (anz > 0) ; const bool A_has_zombies = (A->nzombies > 0) ; //-------------------------------------------------------------------------- // reduce A to a scalar //-------------------------------------------------------------------------- if (nthreads == 1) { //---------------------------------------------------------------------- // single thread //---------------------------------------------------------------------- for (int64_t p = 0 ; p < anz ; p++) { // skip if the entry is a zombie or if not in the bitmap if (A_has_zombies && GB_IS_ZOMBIE (Ai [p])) continue ; if (!GBB (Ab, p)) continue ; // s = op (s, (ztype) Ax [p]) GB_ADD_CAST_ARRAY_TO_SCALAR (s, Ax, p) ; // check for early exit #if GB_HAS_TERMINAL if (GB_IS_TERMINAL (s)) break ; #endif } } else { //---------------------------------------------------------------------- // each thread reduces its own slice in parallel //---------------------------------------------------------------------- bool early_exit = false ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t pstart, pend ; GB_PARTITION (pstart, pend, anz, tid, ntasks) ; // ztype t = identity GB_SCALAR_IDENTITY (t) ; bool my_exit, found = false ; GB_ATOMIC_READ my_exit = early_exit ; if (!my_exit) { for (int64_t p = pstart ; p < pend ; p++) { // skip if the entry is a zombie or if not in the bitmap if (A_has_zombies && GB_IS_ZOMBIE (Ai [p])) continue ; if (!GBB (Ab, p)) continue ; found = true ; // t = op (t, (ztype) Ax [p]), with typecast GB_ADD_CAST_ARRAY_TO_SCALAR (t, Ax, p) ; // check for early exit #if GB_HAS_TERMINAL if (GB_IS_TERMINAL (t)) { // tell the other tasks to exit early GB_ATOMIC_WRITE early_exit = true ; break ; } #endif } } F [tid] = found ; // W [tid] = t, no typecast GB_COPY_SCALAR_TO_ARRAY (W, tid, t) ; } //---------------------------------------------------------------------- // sum up the results of each slice using a single thread //---------------------------------------------------------------------- for (int tid = 0 ; tid < ntasks ; tid++) { if (F [tid]) { // s = op (s, W [tid]), no typecast GB_ADD_ARRAY_TO_SCALAR (s, W, tid) ; } } } }
matrixfreeoperator.h
/* * Copyright 2009-2020 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #pragma once #ifndef VOTCA_XTP_MATRIXFREEOPERATOR_H #define VOTCA_XTP_MATRIXFREEOPERATOR_H // Local VOTCA includes #include "eigen.h" namespace votca { namespace xtp { class MatrixFreeOperator; } } // namespace votca namespace Eigen { namespace internal { // MatrixReplacement looks-like a Matrix, so let's inherits its traits: template <> struct traits<votca::xtp::MatrixFreeOperator> : public Eigen::internal::traits<Eigen::MatrixXd> {}; } // namespace internal } // namespace Eigen namespace votca { namespace xtp { class MatrixFreeOperator : public Eigen::EigenBase<MatrixFreeOperator> { public: enum { ColsAtCompileTime = Eigen::Dynamic, MaxColsAtCompileTime = Eigen::Dynamic, IsRowMajor = false }; Index rows() const { return this->_size; } Index cols() const { return this->_size; } template <typename Vtype> Eigen::Product<MatrixFreeOperator, Vtype, Eigen::AliasFreeProduct> operator*( const Eigen::MatrixBase<Vtype>& x) const { return Eigen::Product<MatrixFreeOperator, Vtype, Eigen::AliasFreeProduct>( *this, x.derived()); } // convenience function Eigen::MatrixXd get_full_matrix() const; Eigen::VectorXd diagonal() const; Index size() const; void set_size(Index size); virtual bool useRow() const { return true; } virtual bool useBlock() const { return false; } virtual Index getBlocksize() const { return 0; } // extract row/col of the operator virtual Eigen::RowVectorXd OperatorRow(Index index) const; virtual Eigen::MatrixXd OperatorBlock(Index row, Index col) const; private: Index _size; }; } // namespace xtp } // namespace votca namespace Eigen { namespace internal { // replacement of the mat*vect operation template <typename Vtype> struct generic_product_impl<votca::xtp::MatrixFreeOperator, Vtype, DenseShape, DenseShape, GemvProduct> : generic_product_impl_base< votca::xtp::MatrixFreeOperator, Vtype, generic_product_impl<votca::xtp::MatrixFreeOperator, Vtype>> { typedef typename Product<votca::xtp::MatrixFreeOperator, Vtype>::Scalar Scalar; template <typename Dest> static void scaleAndAddTo(Dest& dst, const votca::xtp::MatrixFreeOperator& op, const Vtype& v, const Scalar& alpha) { // returns dst = alpha * op * v // alpha must be 1 here assert(alpha == Scalar(1) && "scaling is not implemented"); EIGEN_ONLY_USED_FOR_DEBUG(alpha); if (op.useRow()) { // make the mat vect product #pragma omp parallel for schedule(guided) for (Index i = 0; i < op.rows(); i++) { dst(i) = op.OperatorRow(i) * v; } } if (op.useBlock()) { Index blocksize = op.getBlocksize(); if (op.size() % blocksize != 0) { throw std::runtime_error("blocksize is not a multiple of matrix size"); } Index blocks = op.size() / blocksize; // this is inefficient if blocks<num_ofthreads #pragma omp parallel for schedule(guided) for (Index i_row = 0; i_row < blocks; i_row++) { for (Index i_col = 0; i_col < blocks; i_col++) { dst.segment(i_row * blocksize, blocksize) += op.OperatorBlock(i_row, i_col) * v.segment(i_col * blocksize, blocksize); } } } } }; // replacement of the mat*mat operation template <typename Mtype> struct generic_product_impl<votca::xtp::MatrixFreeOperator, Mtype, DenseShape, DenseShape, GemmProduct> : generic_product_impl_base< votca::xtp::MatrixFreeOperator, Mtype, generic_product_impl<votca::xtp::MatrixFreeOperator, Mtype>> { typedef typename Product<votca::xtp::MatrixFreeOperator, Mtype>::Scalar Scalar; template <typename Dest> static void scaleAndAddTo(Dest& dst, const votca::xtp::MatrixFreeOperator& op, const Mtype& m, const Scalar& alpha) { // returns dst = alpha * op * v // alpha must be 1 here assert(alpha == Scalar(1) && "scaling is not implemented"); EIGEN_ONLY_USED_FOR_DEBUG(alpha); // make the mat mat product if (op.useRow()) { #pragma omp parallel for for (Index i = 0; i < op.rows(); i++) { const Eigen::RowVectorXd row = op.OperatorRow(i) * m; dst.row(i) = row; } } if (op.useBlock()) { Index blocksize = op.getBlocksize(); if (op.size() % blocksize != 0) { throw std::runtime_error("blocksize is not a multiple of matrix size"); } Index blocks = op.size() / blocksize; // this uses the fact that all our matrices are symmetric, i.e we can // reuse half the blocks Eigen::MatrixXd result = Eigen::MatrixXd::Zero(dst.rows(), dst.cols()); #pragma omp declare reduction (+: Eigen::MatrixXd: omp_out=omp_out+omp_in)\ initializer(omp_priv=Eigen::MatrixXd::Zero(omp_orig.rows(),omp_orig.cols())) #pragma omp parallel for schedule(guided) reduction(+ : result) for (Index i_row = 0; i_row < blocks; i_row++) { for (Index i_col = i_row; i_col < blocks; i_col++) { const Eigen::MatrixXd blockmat = op.OperatorBlock(i_row, i_col); result.block(i_row * blocksize, 0, blocksize, dst.cols()) += blockmat * m.block(i_col * blocksize, 0, blocksize, m.cols()); if (i_row != i_col) { result.block(i_col * blocksize, 0, blocksize, dst.cols()) += blockmat.transpose() * m.block(i_row * blocksize, 0, blocksize, m.cols()); } } } dst += result; } } }; } // namespace internal } // namespace Eigen #endif // VOTCA_XTP_MATRIXFREEOPERATOR_H
io_esdm.c
#include <mpi.h> #ifdef USE_ESDM #include <esdm.h> #include <esdm-mpi.h> #include <netcdf.h> #include "grid.h" #include "io.h" #include <assert.h> static esdm_container_t *container = NULL; int ncid_r; // NOTE: do we need extra dimensions for edge variables? static int Dims_2D_Cell[2]; static int Dims_1D_Cell[1]; static int Dims_2D_Edge[2]; static int Dims_1D_Edge[1]; write_queue WriteQueue; // NOTE: this is for buffering the IO int IOBuffersInitialized = 0; float **CellBuffer2D; float **EdgeBuffer2D; float *CellBuffer1D; float *EdgeBuffer1D; void InitIOBuffers(GRID * g) { if (!IOBuffersInitialized) { CellBuffer2D = malloc((sizeof(float *) * g->height)); for (int i = 0; i < g->height; i++) CellBuffer2D[i] = malloc((sizeof(float) * g->cellCount)); EdgeBuffer2D = malloc((sizeof(float *) * g->height)); for (int i = 0; i < g->height; i++) EdgeBuffer2D[i] = malloc((sizeof(float) * g->edgeCount)); CellBuffer1D = malloc((sizeof(float) * g->cellCount)); EdgeBuffer1D = malloc((sizeof(float) * g->edgeCount)); IOBuffersInitialized = 1; } } void FreeIOBuffers(GRID * g) { if (IOBuffersInitialized) { for (int i = 0; i < g->height; i++) free(CellBuffer2D[i]); for (int i = 0; i < g->height; i++) free(EdgeBuffer2D[i]); free(CellBuffer1D); free(EdgeBuffer1D); free(CellBuffer2D); free(EdgeBuffer2D); IOBuffersInitialized = 0; } } /////////////////////////////////////////// // THIS IS BROKEN FOR MULTIPLE PROCESSES // /////////////////////////////////////////// /* we either need to implement pnetcdf or some kind of mpi sync */ // NOTE: just for testing #include <sys/time.h> double stime() { struct timeval tv; gettimeofday(&tv, NULL); double r = 1000000.0 * tv.tv_sec + tv.tv_usec; return r / 1000000.0; } #define PrintErrorAndExit(error) if(Error){printf("NetCDF ERROR: Received errorcode %d in function %s (Line: %d).\n - Error message: %s\n", Error, __func__ ,__LINE__, nc_strerror(Error));exit(1);} void io_write_init(GRID * g, const char *filename) { char* configFile = "esdm.conf"; esdm_mpi_init(); esdm_mpi_distribute_config_file(configFile); esdm_status ret = esdm_init(); assert(ret == ESDM_SUCCESS); int Error; ret = esdm_mpi_container_create(MPI_COMM_WORLD, "output", true, & container); assert(ret == ESDM_SUCCESS); int DimHeight, DimCell, DimEdge; // Create 2D dimensions Dims_2D_Cell[0] = DimHeight; Dims_2D_Cell[1] = DimCell; Dims_2D_Edge[0] = DimHeight; Dims_2D_Edge[1] = DimEdge; // Create 1D dimensions Dims_1D_Cell[0] = DimCell; Dims_1D_Edge[0] = DimEdge; WriteQueue.Head = WriteQueue.Tail = WriteQueue.isLooped = 0; InitIOBuffers(g); } void io_read_init(GRID * g, const char *filename) { int Error = nc_open(filename, NC_CLOBBER, &ncid_r); InitIOBuffers(g); } // TODO: time this function void io_read_register(GRID * g, const char *variable_name, GVAL * out_buff, datatype_t file_type, datatype_t memory_type, GRID_VAR_POSITION var_pos, GRID_VAR_DIMENSION var_dim) { int ret; int varid; // Get the variable ID based on its name int Error = nc_inq_varid(ncid_r, variable_name, &varid); // Check the position and dimesion int Pos, Dim; Error = nc_get_att_int(ncid_r, varid, "position", &Pos); Error = nc_get_att_int(ncid_r, varid, "dimension", &Dim); assert(Pos == var_pos); assert(Dim == var_dim); if (var_dim == GRID_DIM_3D) { if (var_pos == GRID_POS_CELL) { // NOTE: read data... still need to find out why get_var does not work for (int h = 0; h < g->height; h++) { size_t Start[] = { h, 0 }; size_t Count[] = { 1, g->cellCount }; Error = nc_get_vara_float(ncid_r, varid, Start, Count, &CellBuffer2D[h][0]); } struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *var = (struct { char *name; int loc; int dim; union { GVAL * restrict * restrict p2; GVAL * restrict * restrict * restrict p3;} data_pointer;} * *restrict * restrict * restrict) out_buff; /* TODO: find out why memcpy does not work FOREACH cell IN grid | cell{0..0} { int cb,ce; get_indices_c(g,cell.block,&cb,&ce); memcpy(&var[cell],&CellBuffer2D[cell.height][cell.block*g->blkSize], ce); } */ { size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t height_index = (0); height_index < (g->height); height_index++) { for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) { var->data_pointer.p3[(block_index)][(height_index)][(cell_index)] = CellBuffer2D[height_index][(block_index * g->blkSize) + cell_index]; } } } } } else { for (int h = 0; h < g->height; h++) { size_t Start[] = { h, 0 }; size_t Count[] = { 1, g->edgeCount }; Error = nc_get_vara_float(ncid_r, varid, Start, Count, &EdgeBuffer2D[h][0]); } struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *var = (struct { char *name; int loc; int dim; union { GVAL * restrict * restrict p2; GVAL * restrict * restrict * restrict p3;} data_pointer;} * *restrict * restrict * restrict) out_buff; //Error = nc_get_var_float(ncid_r, varid, &EdgeBuffer2D[0][0]); /* FOREACH edge IN grid | edge{0..0} { int eb,ee; get_indices_e(g,edge.block,&eb,&ee); memcpy(&var[edge],&EdgeBuffer2D[edge.height][edge.block*g->blkSize], ee); } */ { size_t min_block = g->mpi_rank == (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t height_index = (0); height_index < (g->height); height_index++) { for (size_t edge_index = (0); edge_index < (g->blkSize); edge_index++) { var->data_pointer.p3[(block_index)][(height_index)][(edge_index)] = EdgeBuffer2D[height_index][(block_index * g->blkSize) + edge_index]; } } } } } } else { if (var_pos == GRID_POS_CELL) { Error = nc_get_var_float(ncid_r, varid, &CellBuffer1D[0]); struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *var = (struct { char *name; int loc; int dim; union { GVAL * restrict * restrict p2; GVAL * restrict * restrict * restrict p3;} data_pointer;} * *restrict * restrict) out_buff; /* FOREACH cell IN gridCELL2D | cell{0..0} { int cb,ce; get_indices_c(g,cell.block,&cb,&ce); memcpy(&var[cell],&CellBuffer1D[cell.block*g->blkSize], ce); } */ { size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) { var->data_pointer.p2[(block_index)][(cell_index)] = CellBuffer1D[(block_index * g->blkSize) + cell_index]; } } } } else { Error = nc_get_var_float(ncid_r, varid, &EdgeBuffer1D[0]); struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *var = (struct { char *name; int loc; int dim; union { GVAL * restrict * restrict p2; GVAL * restrict * restrict * restrict p3;} data_pointer;} * *restrict * restrict) out_buff; /* FOREACH edge IN gridEDGE2D | edge{0..0} { int eb,ee; get_indices_e(g,edge.block,&eb,&ee); memcpy(&var[edge],&EdgeBuffer1D[edge.block*g->blkSize], ee); } */ { size_t min_block = g->mpi_rank == (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t edge_index = (0); edge_index < (g->blkSize); edge_index++) { var->data_pointer.p2[(block_index)][(edge_index)] = EdgeBuffer1D[(block_index * g->blkSize) + edge_index]; } } } } } } void io_read_start() { } // does nothing in this implementation void io_write_define(GRID * g, const char *variable_name, GVAL * out_buff, datatype_t file_type, GRID_VAR_POSITION var_pos, GRID_VAR_DIMENSION var_dim, io_var_t * out_varid) { int ret, Error; int *Dims_2D = Dims_2D_Cell; int *Dims_1D = Dims_1D_Cell; if (var_pos == GRID_POS_EDGE) { Dims_2D = Dims_2D_Edge; Dims_1D = Dims_1D_Edge; } //TODO: implement other datatypes! assert(file_type == FLOAT32); Error = esdm_mpi_dataset_open(MPI_COMM_WORLD, container, variable_name, & out_varid->dset); out_varid->memory = out_buff; out_varid->Dim = var_dim; out_varid->Pos = var_pos; out_varid->Name = variable_name; // Write out attributes // Error = nc_put_att_int(ncid_w, out_varid->id, "position", NC_INT, 1, (int *) &(out_varid->Pos)); // Error = nc_put_att_int(ncid_w, out_varid->id, "dimension", NC_INT, 1, (int *) &(out_varid->Dim)); } void io_write_registration_complete(GRID * g) { } void io_write_announce(GRID * g, io_var_t * var) { // NOTE: check if variables is already in the queue int Cur = WriteQueue.Tail; int isLooped = WriteQueue.isLooped; int isEnqueued = 0; while (!(isLooped == 0 && Cur == WriteQueue.Head)) // NOTE: while !isEmpty { esdm_dataset_t * Id = WriteQueue.Elements[Cur].dset; if (Id == var->dset) isEnqueued = 1; if (Cur == ArrayCount(WriteQueue.Elements) - 1) isLooped = 0; Cur = ((Cur + 1) % (ArrayCount(WriteQueue.Elements))); } // NOTE: if not, enqueue it if (!isEnqueued) { WriteQueue.Elements[WriteQueue.Head] = *var; WriteQueue.Head = (WriteQueue.Head + 1); if (WriteQueue.Head == ArrayCount(WriteQueue.Elements)) { WriteQueue.Head = 0; WriteQueue.isLooped = 1; } assert(!(WriteQueue.isLooped && (WriteQueue.Head == WriteQueue.Tail))); } } // NOTE: if this fires, // the queue is to small. // either increase the size // or move to better data structure void io_write_start(GRID * g) { while (!(WriteQueue.isLooped == 0 && WriteQueue.Head == WriteQueue.Tail)) // NOTE: while !isEmpty { double StartTime = stime(); io_var_t IoVar = WriteQueue.Elements[WriteQueue.Tail]; // is not used anywhere. remove? int ret, Error; int BlkCnt = g->cBlkCnt; if (IoVar.Pos == GRID_POS_EDGE) { BlkCnt = g->eBlkCnt; } // Write out data if (IoVar.Dim == GRID_DIM_3D) { size_t Start[2]; size_t Count[2]; if (IoVar.Pos == GRID_POS_CELL) { struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *p = IoVar.memory; /* TODO: why can't I use memcpy FOREACH cell IN grid | cell{0..0} { int cb,ce; get_indices_c(g,cell.block,&cb,&ce); memcpy(&Buffer[cell.height][cell.block*g->blkSize], &p[cell], ce); } */ // Buffer = CellBuffer2D; { size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t height_index = (0); height_index < (g->height); height_index++) { for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) { CellBuffer2D[height_index][(block_index * g->blkSize) + cell_index] = p->data_pointer.p3[(block_index)][(height_index)][(cell_index)]; } } } } for (int h = 0; h < g->height; h++) { size_t Start[] = { h, 0 }; size_t Count[] = { 1, g->cellCount }; Error = nc_put_vara_float(ncid_w, IoVar.id, Start, Count, &CellBuffer2D[h][0]); } } else { struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *p = IoVar.memory; /* FOREACH edge IN grid | edge{0..0} { int eb,ee; get_indices_e(g,edge.block,&eb,&ee); memcpy(&EdgeBuffer2D[edge.height][edge.block*g->blkSize], &p[edge], ee); } */ { size_t min_block = g->mpi_rank == (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t height_index = (0); height_index < (g->height); height_index++) { for (size_t edge_index = (0); edge_index < (g->blkSize); edge_index++) { EdgeBuffer2D[height_index][(block_index * g->blkSize) + edge_index] = p->data_pointer.p3[(block_index)][(height_index)][(edge_index)]; } } } } for (int h = 0; h < g->height; h++) { size_t Start[] = { h, 0 }; size_t Count[] = { 1, g->edgeCount }; Error = nc_put_vara_float(ncid_w, IoVar.id, Start, Count, &EdgeBuffer2D[h][0]); } } } else //Error = nc_put_var_float(ncid_w, IoVar.id, &Buffer[0][0]); { if (IoVar.Pos == GRID_POS_CELL) { struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *p = IoVar.memory; /* FOREACH cell IN gridCELL2D | cell{0..0} { int cb,ce; get_indices_c(g,cell.block,&cb,&ce); memcpy(&Buffer[cell.block*g->blkSize], &p[cell], ce); } */ { size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) { CellBuffer1D[(block_index * g->blkSize) + cell_index] = p->data_pointer.p2[(block_index)][(cell_index)]; } } } Error = nc_put_var_float(ncid_w, IoVar.id, &CellBuffer1D[0]); } else { struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *p = IoVar.memory; /* FOREACH edge IN gridEDGE2D | edge{0..0} { int eb,ee; get_indices_e(g,edge.block,&eb,&ee); memcpy(&Buffer[edge.block*g->blkSize], &p[edge], ee); } */ { size_t min_block = g->mpi_rank == (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t edge_index = (0); edge_index < (g->blkSize); edge_index++) { EdgeBuffer1D[(block_index * g->blkSize) + edge_index] = p->data_pointer.p2[(block_index)][(edge_index)]; } } } Error = nc_put_var_float(ncid_w, IoVar.id, &EdgeBuffer1D[0]); } } if (WriteQueue.Tail == ArrayCount(WriteQueue.Elements) - 1) WriteQueue.isLooped = 0; WriteQueue.Tail = ((WriteQueue.Tail + 1) % (ArrayCount(WriteQueue.Elements))); double ElapsedTime = stime() - StartTime; printf("ElapsedTime: %f\n", ElapsedTime); } } void io_cleanup(io_var_t * var){ if(var->dset){ esdm_status ret = esdm_dataset_close(var->dset); eassert(ret == ESDM_SUCCESS); } var->dset = NULL; } void io_write_finalize(GRID * g) { FreeIOBuffers(g); esdm_status ret; ret = esdm_mpi_container_commit(MPI_COMM_WORLD, container); assert(ret == ESDM_SUCCESS); ret = esdm_container_close(container); assert(ret == ESDM_SUCCESS); esdm_mpi_finalize(); } #endif
pointer2Array2.c
// array types from a parameter list have to be converted to corresponding pointer types // to avoid segmentation fault. // Kernel is extracted from cg of npb2.3 omp c benchmarks. static int colidx[100][100]; static void makea (int colidx[100][100]) { int i,j; #pragma omp parallel for private(i,j) for (i = 1; i <= 100; i++) for (j = 1; j <= 100; j++) colidx[i][j] = 0; } int main() { makea(colidx); }
omp_foreign_thread_team_reuse.c
// RUN: %libomp-compile-and-run // REQUIRES: !abt #include <stdio.h> #include "omp_testsuite.h" #define NUM_THREADS 10 /* After hot teams were enabled by default, the library started using levels kept in the team structure. The levels are broken in case foreign thread exits and puts its team into the pool which is then re-used by another foreign thread. The broken behavior observed is when printing the levels for each new team, one gets 1, 2, 1, 2, 1, 2, etc. This makes the library believe that every other team is nested which is incorrect. What is wanted is for the levels to be 1, 1, 1, etc. */ int a = 0; int level; typedef struct thread_arg_t { int iterations; } thread_arg_t; void* thread_function(void* arg) { int i; thread_arg_t* targ = (thread_arg_t*)arg; int iterations = targ->iterations; #pragma omp parallel private(i) { // level should always be 1 #pragma omp single level = omp_get_level(); #pragma omp for for(i = 0; i < iterations; i++) { #pragma omp atomic a++; } } } int test_omp_team_reuse() { int i; int success = 1; pthread_t thread[NUM_THREADS]; thread_arg_t thread_arg[NUM_THREADS]; // launch NUM_THREADS threads, one at a time to perform thread_function() for(i = 0; i < NUM_THREADS; i++) { thread_arg[i].iterations = i + 1; pthread_create(thread+i, NULL, thread_function, thread_arg+i); pthread_join(*(thread+i), NULL); // level read in thread_function()'s parallel region should be 1 if(level != 1) { fprintf(stderr, "error: for pthread %d level should be 1 but " "instead equals %d\n", i, level); success = 0; } } // make sure the for loop works int known_sum = (NUM_THREADS * (NUM_THREADS+1)) / 2; if(a != known_sum) { fprintf(stderr, "a should be %d but instead equals %d\n", known_sum, a); success = 0; } return success; } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { a = 0; if(!test_omp_team_reuse()) { num_failed++; } } return num_failed; }
GB_unop__identity_fc64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc64_fp64) // op(A') function: GB (_unop_tran__identity_fc64_fp64) // C type: GxB_FC64_t // A type: double // cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc64_fp64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
par_vector.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Member functions for hypre_Vector class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #ifdef HYPRE_NO_GLOBAL_PARTITION HYPRE_Int hypre_FillResponseParToVectorAll(void*, HYPRE_Int, HYPRE_Int, void*, MPI_Comm, void**, HYPRE_Int*); #endif /*-------------------------------------------------------------------------- * hypre_ParVectorCreate *--------------------------------------------------------------------------*/ /* If create is called for HYPRE_NO_GLOBAL_PARTITION and partitioning is NOT null, then it is assumed that it is array of length 2 containing the start row of the calling processor followed by the start row of the next processor - AHB 6/05 */ hypre_ParVector * hypre_ParVectorCreate( MPI_Comm comm, HYPRE_BigInt global_size, HYPRE_BigInt *partitioning ) { hypre_ParVector *vector; HYPRE_Int num_procs, my_id; if (global_size < 0) { hypre_error_in_arg(2); return NULL; } vector = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); hypre_MPI_Comm_rank(comm,&my_id); if (!partitioning) { hypre_MPI_Comm_size(comm,&num_procs); #ifdef HYPRE_NO_GLOBAL_PARTITION hypre_GenerateLocalPartitioning(global_size, num_procs, my_id, &partitioning); #else hypre_GeneratePartitioning(global_size, num_procs, &partitioning); #endif } hypre_ParVectorAssumedPartition(vector) = NULL; hypre_ParVectorComm(vector) = comm; hypre_ParVectorGlobalSize(vector) = global_size; #ifdef HYPRE_NO_GLOBAL_PARTITION hypre_ParVectorFirstIndex(vector) = partitioning[0]; hypre_ParVectorLastIndex(vector) = partitioning[1]-1; hypre_ParVectorPartitioning(vector) = partitioning; hypre_ParVectorLocalVector(vector) = hypre_SeqVectorCreate(partitioning[1]-partitioning[0]); #else hypre_ParVectorFirstIndex(vector) = partitioning[my_id]; hypre_ParVectorLastIndex(vector) = partitioning[my_id+1] -1; hypre_ParVectorPartitioning(vector) = partitioning; hypre_ParVectorLocalVector(vector) = hypre_SeqVectorCreate(partitioning[my_id+1]-partitioning[my_id]); #endif /* set defaults */ hypre_ParVectorOwnsData(vector) = 1; hypre_ParVectorOwnsPartitioning(vector) = 1; hypre_ParVectorActualLocalSize(vector) = 0; return vector; } /*-------------------------------------------------------------------------- * hypre_ParMultiVectorCreate *--------------------------------------------------------------------------*/ hypre_ParVector * hypre_ParMultiVectorCreate( MPI_Comm comm, HYPRE_BigInt global_size, HYPRE_BigInt *partitioning, HYPRE_Int num_vectors ) { /* note that global_size is the global length of a single vector */ hypre_ParVector * vector = hypre_ParVectorCreate( comm, global_size, partitioning ); hypre_ParVectorNumVectors(vector) = num_vectors; return vector; } /*-------------------------------------------------------------------------- * hypre_ParVectorDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorDestroy( hypre_ParVector *vector ) { if (vector) { if ( hypre_ParVectorOwnsData(vector) ) { hypre_SeqVectorDestroy(hypre_ParVectorLocalVector(vector)); } if ( hypre_ParVectorOwnsPartitioning(vector) ) { hypre_TFree(hypre_ParVectorPartitioning(vector), HYPRE_MEMORY_HOST); } if (hypre_ParVectorAssumedPartition(vector)) { hypre_AssumedPartitionDestroy(hypre_ParVectorAssumedPartition(vector)); } hypre_TFree(vector, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorInitialize( hypre_ParVector *vector ) { if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_SeqVectorInitialize(hypre_ParVectorLocalVector(vector)); hypre_ParVectorActualLocalSize(vector) = hypre_VectorSize(hypre_ParVectorLocalVector(vector)); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorSetDataOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorSetDataOwner( hypre_ParVector *vector, HYPRE_Int owns_data ) { if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParVectorOwnsData(vector) = owns_data; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorSetPartitioningOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorSetPartitioningOwner( hypre_ParVector *vector, HYPRE_Int owns_partitioning ) { if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParVectorOwnsPartitioning(vector) = owns_partitioning; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorSetNumVectors * call before calling hypre_ParVectorInitialize * probably this will do more harm than good, use hypre_ParMultiVectorCreate *--------------------------------------------------------------------------*/ #if 0 HYPRE_Int hypre_ParVectorSetNumVectors( hypre_ParVector *vector, HYPRE_Int num_vectors ) { HYPRE_Int ierr=0; hypre_Vector *local_vector = hypre_ParVectorLocalVector(v); hypre_SeqVectorSetNumVectors( local_vector, num_vectors ); return ierr; } #endif /*-------------------------------------------------------------------------- * hypre_ParVectorRead *--------------------------------------------------------------------------*/ hypre_ParVector *hypre_ParVectorRead( MPI_Comm comm, const char *file_name ) { char new_file_name[80]; hypre_ParVector *par_vector; HYPRE_Int my_id, num_procs; HYPRE_BigInt *partitioning; HYPRE_BigInt global_size; HYPRE_Int i; FILE *fp; hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); partitioning = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); hypre_sprintf(new_file_name,"%s.INFO.%d",file_name,my_id); fp = fopen(new_file_name, "r"); hypre_fscanf(fp, "%b\n", &global_size); #ifdef HYPRE_NO_GLOBAL_PARTITION for (i=0; i < 2; i++) hypre_fscanf(fp, "%b\n", &partitioning[i]); fclose (fp); #else for (i=0; i < num_procs; i++) hypre_fscanf(fp, "%b\n", &partitioning[i]); fclose (fp); partitioning[num_procs] = global_size; #endif par_vector = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); hypre_ParVectorComm(par_vector) = comm; hypre_ParVectorGlobalSize(par_vector) = global_size; #ifdef HYPRE_NO_GLOBAL_PARTITION hypre_ParVectorFirstIndex(par_vector) = partitioning[0]; hypre_ParVectorLastIndex(par_vector) = partitioning[1]-1; #else hypre_ParVectorFirstIndex(par_vector) = partitioning[my_id]; hypre_ParVectorLastIndex(par_vector) = partitioning[my_id+1]-1; #endif hypre_ParVectorPartitioning(par_vector) = partitioning; hypre_ParVectorOwnsData(par_vector) = 1; hypre_ParVectorOwnsPartitioning(par_vector) = 1; hypre_sprintf(new_file_name,"%s.%d",file_name,my_id); hypre_ParVectorLocalVector(par_vector) = hypre_SeqVectorRead(new_file_name); /* multivector code not written yet */ hypre_assert( hypre_ParVectorNumVectors(par_vector) == 1 ); return par_vector; } /*-------------------------------------------------------------------------- * hypre_ParVectorPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorPrint( hypre_ParVector *vector, const char *file_name ) { char new_file_name[80]; hypre_Vector *local_vector; MPI_Comm comm; HYPRE_Int my_id, num_procs, i; HYPRE_BigInt *partitioning; HYPRE_BigInt global_size; FILE *fp; if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } local_vector = hypre_ParVectorLocalVector(vector); comm = hypre_ParVectorComm(vector); partitioning = hypre_ParVectorPartitioning(vector); global_size = hypre_ParVectorGlobalSize(vector); hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); hypre_sprintf(new_file_name,"%s.%d",file_name,my_id); hypre_SeqVectorPrint(local_vector,new_file_name); hypre_sprintf(new_file_name,"%s.INFO.%d",file_name,my_id); fp = fopen(new_file_name, "w"); hypre_fprintf(fp, "%b\n", global_size); #ifdef HYPRE_NO_GLOBAL_PARTITION for (i=0; i < 2; i++) hypre_fprintf(fp, "%b\n", partitioning[i]); #else for (i=0; i < num_procs; i++) hypre_fprintf(fp, "%b\n", partitioning[i]); #endif fclose (fp); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorSetConstantValues *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorSetConstantValues( hypre_ParVector *v, HYPRE_Complex value ) { hypre_Vector *v_local = hypre_ParVectorLocalVector(v); return hypre_SeqVectorSetConstantValues(v_local,value); } /*-------------------------------------------------------------------------- * hypre_ParVectorSetRandomValues *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorSetRandomValues( hypre_ParVector *v, HYPRE_Int seed ) { HYPRE_Int my_id; hypre_Vector *v_local = hypre_ParVectorLocalVector(v); MPI_Comm comm = hypre_ParVectorComm(v); hypre_MPI_Comm_rank(comm,&my_id); seed *= (my_id+1); return hypre_SeqVectorSetRandomValues(v_local,seed); } /*-------------------------------------------------------------------------- * hypre_ParVectorCopy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorCopy( hypre_ParVector *x, hypre_ParVector *y ) { hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); return hypre_SeqVectorCopy(x_local, y_local); } /*-------------------------------------------------------------------------- * hypre_ParVectorCloneShallow * returns a complete copy of a hypre_ParVector x - a shallow copy, re-using * the partitioning and data arrays of x *--------------------------------------------------------------------------*/ hypre_ParVector * hypre_ParVectorCloneShallow( hypre_ParVector *x ) { hypre_ParVector * y = hypre_ParVectorCreate(hypre_ParVectorComm(x), hypre_ParVectorGlobalSize(x), hypre_ParVectorPartitioning(x)); hypre_ParVectorOwnsData(y) = 1; /* ...This vector owns its local vector, although the local vector doesn't * own _its_ data */ hypre_ParVectorOwnsPartitioning(y) = 0; hypre_SeqVectorDestroy( hypre_ParVectorLocalVector(y) ); hypre_ParVectorLocalVector(y) = hypre_SeqVectorCloneShallow( hypre_ParVectorLocalVector(x) ); hypre_ParVectorFirstIndex(y) = hypre_ParVectorFirstIndex(x); return y; } /*-------------------------------------------------------------------------- * hypre_ParVectorScale *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorScale( HYPRE_Complex alpha, hypre_ParVector *y ) { hypre_Vector *y_local = hypre_ParVectorLocalVector(y); return hypre_SeqVectorScale( alpha, y_local); } /*-------------------------------------------------------------------------- * hypre_ParVectorAxpy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorAxpy( HYPRE_Complex alpha, hypre_ParVector *x, hypre_ParVector *y ) { hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); return hypre_SeqVectorAxpy( alpha, x_local, y_local); } /*-------------------------------------------------------------------------- * hypre_ParVectorMassAxpy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorMassAxpy( HYPRE_Complex *alpha, hypre_ParVector **x, hypre_ParVector *y, HYPRE_Int k, HYPRE_Int unroll ) { HYPRE_Int i; hypre_Vector **x_local; hypre_Vector *y_local = hypre_ParVectorLocalVector(y); x_local = hypre_TAlloc(hypre_Vector *, k, HYPRE_MEMORY_SHARED); for (i=0; i < k; i++) x_local[i] = hypre_ParVectorLocalVector(x[i]); hypre_SeqVectorMassAxpy( alpha, x_local, y_local, k, unroll); hypre_TFree(x_local, HYPRE_MEMORY_SHARED); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorInnerProd *--------------------------------------------------------------------------*/ HYPRE_Real hypre_ParVectorInnerProd( hypre_ParVector *x, hypre_ParVector *y ) { MPI_Comm comm = hypre_ParVectorComm(x); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_Real result = 0.0; HYPRE_Real local_result = hypre_SeqVectorInnerProd(x_local, y_local); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] -= hypre_MPI_Wtime(); #endif hypre_MPI_Allreduce(&local_result, &result, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] += hypre_MPI_Wtime(); #endif return result; } /*-------------------------------------------------------------------------- * hypre_ParVectorMassInnerProd *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorMassInnerProd( hypre_ParVector *x, hypre_ParVector **y, HYPRE_Int k, HYPRE_Int unroll, HYPRE_Real *result ) { MPI_Comm comm = hypre_ParVectorComm(x); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); HYPRE_Real *local_result; HYPRE_Int i; hypre_Vector **y_local; y_local = hypre_TAlloc(hypre_Vector *, k, HYPRE_MEMORY_SHARED); for (i=0; i < k; i++) y_local[i] = (hypre_Vector *) hypre_ParVectorLocalVector(y[i]); local_result = hypre_CTAlloc(HYPRE_Real, k, HYPRE_MEMORY_SHARED); hypre_SeqVectorMassInnerProd(x_local, y_local, k, unroll, local_result); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] -= hypre_MPI_Wtime(); #endif hypre_MPI_Allreduce(local_result, result, k, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] += hypre_MPI_Wtime(); #endif hypre_TFree(y_local, HYPRE_MEMORY_SHARED); hypre_TFree(local_result, HYPRE_MEMORY_SHARED); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorMassDotpTwo *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorMassDotpTwo ( hypre_ParVector *x, hypre_ParVector *y, hypre_ParVector **z, HYPRE_Int k, HYPRE_Int unroll, HYPRE_Real *result_x, HYPRE_Real *result_y ) { MPI_Comm comm = hypre_ParVectorComm(x); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_Real *local_result, *result; HYPRE_Int i; hypre_Vector **z_local; z_local = hypre_TAlloc(hypre_Vector *, k, HYPRE_MEMORY_SHARED); for (i=0; i < k; i++) z_local[i] = (hypre_Vector *) hypre_ParVectorLocalVector(z[i]); local_result = hypre_CTAlloc(HYPRE_Real, 2*k, HYPRE_MEMORY_SHARED); result = hypre_CTAlloc(HYPRE_Real, 2*k, HYPRE_MEMORY_SHARED); hypre_SeqVectorMassDotpTwo(x_local, y_local, z_local, k, unroll, &local_result[0], &local_result[k]); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] -= hypre_MPI_Wtime(); #endif hypre_MPI_Allreduce(local_result, result, 2*k, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] += hypre_MPI_Wtime(); #endif for (i=0; i < k; i++) { result_x[i] = result[i]; result_y[i] = result[k+i]; } hypre_TFree(z_local, HYPRE_MEMORY_SHARED); hypre_TFree(local_result, HYPRE_MEMORY_SHARED); hypre_TFree(result, HYPRE_MEMORY_SHARED); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_VectorToParVector: * generates a ParVector from a Vector on proc 0 and distributes the pieces * to the other procs in comm * * this is not being optimized to use HYPRE_NO_GLOBAL_PARTITION *--------------------------------------------------------------------------*/ hypre_ParVector * hypre_VectorToParVector ( MPI_Comm comm, hypre_Vector *v, HYPRE_BigInt *vec_starts ) { HYPRE_BigInt global_size; HYPRE_Int local_size; HYPRE_Int num_vectors; HYPRE_Int num_procs, my_id; HYPRE_Int global_vecstride, vecstride, idxstride; hypre_ParVector *par_vector; hypre_Vector *local_vector; HYPRE_Complex *v_data; HYPRE_Complex *local_data; hypre_MPI_Request *requests; hypre_MPI_Status *status, status0; HYPRE_Int i, j, k, p; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); if (my_id == 0) { global_size = (HYPRE_BigInt)hypre_VectorSize(v); v_data = hypre_VectorData(v); num_vectors = hypre_VectorNumVectors(v); /* for multivectors */ global_vecstride = hypre_VectorVectorStride(v); } hypre_MPI_Bcast(&global_size,1,HYPRE_MPI_INT,0,comm); hypre_MPI_Bcast(&num_vectors,1,HYPRE_MPI_INT,0,comm); hypre_MPI_Bcast(&global_vecstride,1,HYPRE_MPI_INT,0,comm); if ( num_vectors==1 ) par_vector = hypre_ParVectorCreate(comm, global_size, vec_starts); else par_vector = hypre_ParMultiVectorCreate(comm, global_size, vec_starts, num_vectors); vec_starts = hypre_ParVectorPartitioning(par_vector); local_size = (HYPRE_Int)(vec_starts[my_id+1] - vec_starts[my_id]); hypre_ParVectorInitialize(par_vector); local_vector = hypre_ParVectorLocalVector(par_vector); local_data = hypre_VectorData(local_vector); vecstride = hypre_VectorVectorStride(local_vector); idxstride = hypre_VectorIndexStride(local_vector); /* so far the only implemented multivector StorageMethod is 0 */ hypre_assert( idxstride==1 ); if (my_id == 0) { requests = hypre_CTAlloc(hypre_MPI_Request, num_vectors*(num_procs-1), HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_vectors*(num_procs-1), HYPRE_MEMORY_HOST); k = 0; for ( p=1; p<num_procs; p++) for ( j=0; j<num_vectors; ++j ) { hypre_MPI_Isend( &v_data[(HYPRE_Int)vec_starts[p]]+j*global_vecstride, (HYPRE_Int)(vec_starts[p+1]-vec_starts[p]), HYPRE_MPI_COMPLEX, p, 0, comm, &requests[k++] ); } if ( num_vectors==1 ) { for (i=0; i < local_size; i++) local_data[i] = v_data[i]; } else for ( j=0; j<num_vectors; ++j ) { for (i=0; i < local_size; i++) local_data[i+j*vecstride] = v_data[i+j*global_vecstride]; } hypre_MPI_Waitall(num_procs-1,requests, status); hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); } else { for ( j=0; j<num_vectors; ++j ) hypre_MPI_Recv( local_data+j*vecstride, local_size, HYPRE_MPI_COMPLEX, 0, 0, comm,&status0 ); } return par_vector; } /*-------------------------------------------------------------------------- * hypre_ParVectorToVectorAll: * generates a Vector on every proc which has a piece of the data * from a ParVector on several procs in comm, * vec_starts needs to contain the partitioning across all procs in comm *--------------------------------------------------------------------------*/ hypre_Vector * hypre_ParVectorToVectorAll( hypre_ParVector *par_v ) { MPI_Comm comm = hypre_ParVectorComm(par_v); HYPRE_BigInt global_size = hypre_ParVectorGlobalSize(par_v); #ifndef HYPRE_NO_GLOBAL_PARTITION HYPRE_BigInt *vec_starts = hypre_ParVectorPartitioning(par_v); #endif hypre_Vector *local_vector = hypre_ParVectorLocalVector(par_v); HYPRE_Int num_procs, my_id; HYPRE_Int num_vectors = hypre_ParVectorNumVectors(par_v); hypre_Vector *vector; HYPRE_Complex *vector_data; HYPRE_Complex *local_data; HYPRE_Int local_size; hypre_MPI_Request *requests; hypre_MPI_Status *status; HYPRE_Int i, j; HYPRE_Int *used_procs; HYPRE_Int num_types, num_requests; HYPRE_Int vec_len, proc_id; #ifdef HYPRE_NO_GLOBAL_PARTITION HYPRE_Int *new_vec_starts; HYPRE_Int num_contacts; HYPRE_Int contact_proc_list[1]; HYPRE_Int contact_send_buf[1]; HYPRE_Int contact_send_buf_starts[2]; HYPRE_Int max_response_size; HYPRE_Int *response_recv_buf=NULL; HYPRE_Int *response_recv_buf_starts = NULL; hypre_DataExchangeResponse response_obj; hypre_ProcListElements send_proc_obj; HYPRE_Int *send_info = NULL; hypre_MPI_Status status1; HYPRE_Int count, tag1 = 112, tag2 = 223; HYPRE_Int start; #endif hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION local_size = (HYPRE_Int)(hypre_ParVectorLastIndex(par_v) - hypre_ParVectorFirstIndex(par_v) + 1); /* determine procs which hold data of par_v and store ids in used_procs */ /* we need to do an exchange data for this. If I own row then I will contact processor 0 with the endpoint of my local range */ if (local_size > 0) { num_contacts = 1; contact_proc_list[0] = 0; contact_send_buf[0] = hypre_ParVectorLastIndex(par_v); contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 1; } else { num_contacts = 0; contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 0; } /*build the response object*/ /*send_proc_obj will be for saving info from contacts */ send_proc_obj.length = 0; send_proc_obj.storage_length = 10; send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts[0] = 0; send_proc_obj.element_storage_length = 10; send_proc_obj.elements = hypre_CTAlloc(HYPRE_BigInt, send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST); max_response_size = 0; /* each response is null */ response_obj.fill_response = hypre_FillResponseParToVectorAll; response_obj.data1 = NULL; response_obj.data2 = &send_proc_obj; /*this is where we keep info from contacts*/ hypre_DataExchangeList(num_contacts, contact_proc_list, contact_send_buf, contact_send_buf_starts, sizeof(HYPRE_Int), //0, &response_obj, sizeof(HYPRE_Int), &response_obj, max_response_size, 1, comm, (void**) &response_recv_buf, &response_recv_buf_starts); /* now processor 0 should have a list of ranges for processors that have rows - these are in send_proc_obj - it needs to create the new list of processors and also an array of vec starts - and send to those who own row*/ if (my_id) { if (local_size) { /* look for a message from processor 0 */ hypre_MPI_Probe(0, tag1, comm, &status1); hypre_MPI_Get_count(&status1, HYPRE_MPI_INT, &count); send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); hypre_MPI_Recv(send_info, count, HYPRE_MPI_INT, 0, tag1, comm, &status1); /* now unpack */ num_types = send_info[0]; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST); for (i=1; i<= num_types; i++) { used_procs[i-1] = (HYPRE_Int)send_info[i]; } for (i=num_types+1; i< count; i++) { new_vec_starts[i-num_types-1] = send_info[i] ; } } else /* clean up and exit */ { hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); return NULL; } } else /* my_id ==0 */ { num_types = send_proc_obj.length; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST); new_vec_starts[0] = 0; for (i=0; i< num_types; i++) { used_procs[i] = send_proc_obj.id[i]; new_vec_starts[i+1] = send_proc_obj.elements[i]+1; } hypre_qsort0(used_procs, 0, num_types-1); hypre_qsort0(new_vec_starts, 0, num_types); /*now we need to put into an array to send */ count = 2*num_types+2; send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); send_info[0] = num_types; for (i=1; i<= num_types; i++) { send_info[i] = (HYPRE_Int)used_procs[i-1]; } for (i=num_types+1; i< count; i++) { send_info[i] = new_vec_starts[i-num_types-1]; } requests = hypre_CTAlloc(hypre_MPI_Request, num_types, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_types, HYPRE_MEMORY_HOST); /* don't send to myself - these are sorted so my id would be first*/ start = 0; if (used_procs[0] == 0) { start = 1; } for (i=start; i < num_types; i++) { hypre_MPI_Isend(send_info, count, HYPRE_MPI_INT, used_procs[i], tag1, comm, &requests[i-start]); } hypre_MPI_Waitall(num_types-start, requests, status); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); } /* clean up */ hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); hypre_TFree(send_info, HYPRE_MEMORY_HOST); if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); /* now proc 0 can exit if it has no rows */ if (!local_size) { hypre_TFree(used_procs, HYPRE_MEMORY_HOST); hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); return NULL; } /* everyone left has rows and knows: new_vec_starts, num_types, and used_procs */ /* this vector should be rather small */ local_data = hypre_VectorData(local_vector); vector = hypre_SeqVectorCreate((HYPRE_Int)global_size); hypre_VectorNumVectors(vector) = num_vectors; hypre_SeqVectorInitialize(vector); vector_data = hypre_VectorData(vector); num_requests = 2*num_types; requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST); /* initialize data exchange among used_procs and generate vector - here we send to ourself also*/ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; vec_len = (HYPRE_Int)(new_vec_starts[i+1] - new_vec_starts[i]); hypre_MPI_Irecv(&vector_data[(HYPRE_Int)new_vec_starts[i]], num_vectors*vec_len, HYPRE_MPI_COMPLEX, proc_id, tag2, comm, &requests[j++]); } for (i = 0; i < num_types; i++) { hypre_MPI_Isend(local_data, num_vectors*local_size, HYPRE_MPI_COMPLEX, used_procs[i], tag2, comm, &requests[j++]); } hypre_MPI_Waitall(num_requests, requests, status); if (num_requests) { hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); } hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); #else local_size = (HYPRE_Int)(vec_starts[my_id+1] - vec_starts[my_id]); /* if my_id contains no data, return NULL */ if (!local_size) return NULL; local_data = hypre_VectorData(local_vector); vector = hypre_SeqVectorCreate(global_size); hypre_VectorNumVectors(vector) = num_vectors; hypre_SeqVectorInitialize(vector); vector_data = hypre_VectorData(vector); /* determine procs which hold data of par_v and store ids in used_procs */ num_types = -1; for (i=0; i < num_procs; i++) if (vec_starts[i+1]-vec_starts[i]) num_types++; num_requests = 2*num_types; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); j = 0; for (i=0; i < num_procs; i++) if (vec_starts[i+1]-vec_starts[i] && i-my_id) used_procs[j++] = i; requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST); /* initialize data exchange among used_procs and generate vector */ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; vec_len = (HYPRE_Int)(vec_starts[proc_id+1] - vec_starts[proc_id]); hypre_MPI_Irecv(&vector_data[vec_starts[proc_id]], num_vectors*vec_len, HYPRE_MPI_COMPLEX, proc_id, 0, comm, &requests[j++]); } for (i = 0; i < num_types; i++) { hypre_MPI_Isend(local_data, num_vectors*local_size, HYPRE_MPI_COMPLEX, used_procs[i], 0, comm, &requests[j++]); } for (i=0; i < num_vectors*local_size; i++) vector_data[vec_starts[my_id]+i] = local_data[i]; hypre_MPI_Waitall(num_requests, requests, status); if (num_requests) { hypre_TFree(used_procs, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); } #endif return vector; } /*-------------------------------------------------------------------------- * hypre_ParVectorPrintIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorPrintIJ( hypre_ParVector *vector, HYPRE_Int base_j, const char *filename ) { MPI_Comm comm; HYPRE_BigInt global_size, j; HYPRE_BigInt *partitioning; HYPRE_Complex *local_data; HYPRE_Int myid, num_procs, i, part0; char new_filename[255]; FILE *file; if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParVectorComm(vector); global_size = hypre_ParVectorGlobalSize(vector); partitioning = hypre_ParVectorPartitioning(vector); /* multivector code not written yet */ hypre_assert( hypre_ParVectorNumVectors(vector) == 1 ); if ( hypre_ParVectorNumVectors(vector) != 1 ) hypre_error_in_arg(1); hypre_MPI_Comm_rank(comm, &myid); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "w")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n"); return hypre_error_flag; } local_data = hypre_VectorData(hypre_ParVectorLocalVector(vector)); hypre_fprintf(file, "%b \n", global_size); #ifdef HYPRE_NO_GLOBAL_PARTITION for (i=0; i < 2; i++) { hypre_fprintf(file, "%b ", partitioning[i] + base_j); } #else for (i=0; i <= num_procs; i++) { hypre_fprintf(file, "%b ", partitioning[i] + base_j); } #endif hypre_fprintf(file, "\n"); #ifdef HYPRE_NO_GLOBAL_PARTITION part0 = partitioning[0]; for (j = part0; j < partitioning[1]; j++) { hypre_fprintf(file, "%b %.14e\n", j + base_j, local_data[(HYPRE_Int)(j-part0)]); } #else part0 = partitioning[myid]; for (j = part0; j < partitioning[myid+1]; j++) { hypre_fprintf(file, "%b %.14e\n", j + base_j, local_data[(HYPRE_Int)(j-part0)]); } #endif fclose(file); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorReadIJ * Warning: wrong base for assumed partition if base > 0 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorReadIJ( MPI_Comm comm, const char *filename, HYPRE_Int *base_j_ptr, hypre_ParVector **vector_ptr ) { HYPRE_BigInt global_size, J; hypre_ParVector *vector; hypre_Vector *local_vector; HYPRE_Complex *local_data; HYPRE_BigInt *partitioning; HYPRE_Int base_j; HYPRE_Int myid, num_procs, i, j; char new_filename[255]; FILE *file; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &myid); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "r")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n"); return hypre_error_flag; } hypre_fscanf(file, "%b", &global_size); #ifdef HYPRE_NO_GLOBAL_PARTITION /* this may need to be changed so that the base is available in the file! */ partitioning = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_fscanf(file, "%b", partitioning); for (i = 0; i < 2; i++) { hypre_fscanf(file, "%b", partitioning+i); } /* This is not yet implemented correctly! */ base_j = 0; #else partitioning = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); hypre_fscanf(file, "%b", partitioning); for (i = 1; i <= num_procs; i++) { hypre_fscanf(file, "%b", partitioning+i); partitioning[i] -= partitioning[0]; } base_j = (HYPRE_Int)partitioning[0]; partitioning[0] = 0; #endif vector = hypre_ParVectorCreate(comm, global_size, partitioning); hypre_ParVectorInitialize(vector); local_vector = hypre_ParVectorLocalVector(vector); local_data = hypre_VectorData(local_vector); #ifdef HYPRE_NO_GLOBAL_PARTITION for (j = 0; j < (HYPRE_Int)(partitioning[1] - partitioning[0]); j++) { hypre_fscanf(file, "%b %le", &J, local_data + j); } #else for (j = 0; j < (HYPRE_Int)(partitioning[myid+1] - partitioning[myid]); j++) { hypre_fscanf(file, "%b %le", &J, local_data + j); } #endif fclose(file); *base_j_ptr = base_j; *vector_ptr = vector; /* multivector code not written yet */ hypre_assert( hypre_ParVectorNumVectors(vector) == 1 ); if ( hypre_ParVectorNumVectors(vector) != 1 ) hypre_error(HYPRE_ERROR_GENERIC); return hypre_error_flag; } /*-------------------------------------------------------------------- * hypre_FillResponseParToVectorAll * Fill response function for determining the send processors * data exchange *--------------------------------------------------------------------*/ HYPRE_Int hypre_FillResponseParToVectorAll( void *p_recv_contact_buf, HYPRE_Int contact_size, HYPRE_Int contact_proc, void *ro, MPI_Comm comm, void **p_send_response_buf, HYPRE_Int *response_message_size ) { HYPRE_Int myid; HYPRE_Int i, index, count, elength; HYPRE_BigInt *recv_contact_buf = (HYPRE_BigInt * ) p_recv_contact_buf; hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*)ro; hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*)response_obj->data2; hypre_MPI_Comm_rank(comm, &myid ); /*check to see if we need to allocate more space in send_proc_obj for ids*/ if (send_proc_obj->length == send_proc_obj->storage_length) { send_proc_obj->storage_length +=10; /*add space for 10 more processors*/ send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int, send_proc_obj->storage_length, HYPRE_MEMORY_HOST); send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts, HYPRE_Int, send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST); } /*initialize*/ count = send_proc_obj->length; index = send_proc_obj->vec_starts[count]; /*this is the number of elements*/ /*send proc*/ send_proc_obj->id[count] = contact_proc; /*do we need more storage for the elements?*/ if (send_proc_obj->element_storage_length < index + contact_size) { elength = hypre_max(contact_size, 10); elength += index; send_proc_obj->elements = hypre_TReAlloc(send_proc_obj->elements, HYPRE_BigInt, elength, HYPRE_MEMORY_HOST); send_proc_obj->element_storage_length = elength; } /*populate send_proc_obj*/ for (i=0; i< contact_size; i++) { send_proc_obj->elements[index++] = recv_contact_buf[i]; } send_proc_obj->vec_starts[count+1] = index; send_proc_obj->length++; /*output - no message to return (confirmation) */ *response_message_size = 0; return hypre_error_flag; } /* ----------------------------------------------------------------------------- * return the sum of all local elements of the vector * ----------------------------------------------------------------------------- */ HYPRE_Complex hypre_ParVectorLocalSumElts( hypre_ParVector * vector ) { return hypre_SeqVectorSumElts( hypre_ParVectorLocalVector(vector) ); } /* #ifdef HYPRE_USING_UNIFIED_MEMORY hypre_int hypre_ParVectorIsManaged(hypre_ParVector *vector){ if (vector==NULL) return 1; return hypre_SeqVectorIsManaged(hypre_ParVectorLocalVector(vector)); } #endif */ HYPRE_Int hypre_ParVectorGetValues(hypre_ParVector *vector, HYPRE_Int num_values, HYPRE_BigInt *indices, HYPRE_Complex *values) { HYPRE_Int i, j; HYPRE_BigInt first_index, last_index, index; hypre_Vector *local_vector; HYPRE_Complex *data; first_index = hypre_ParVectorFirstIndex(vector); last_index = hypre_ParVectorLastIndex(vector); local_vector = hypre_ParVectorLocalVector(vector); data = hypre_VectorData(local_vector); if (hypre_VectorOwnsData(local_vector) == 0) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Vector does not own data! -- hypre_ParVectorGetValues."); return hypre_error_flag; } if (indices) { for (i=0; i < num_values; i++) { index = indices[i]; if (index < first_index || index > last_index) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Index out of range! -- hypre_ParVectorGetValues."); return hypre_error_flag; } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_values; j++) { i = (HYPRE_Int)(indices[j] - first_index); values[j] = data[i]; } } else { if (num_values > hypre_VectorSize(local_vector)) { hypre_error_in_arg(2); return hypre_error_flag; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_values; j++) values[j] = data[j]; } return hypre_error_flag; }
ratecontrol.c
/***************************************************-*- coding: iso-8859-1 -*- * ratecontrol.c: h264 encoder library (Rate Control) ***************************************************************************** * Copyright (C) 2005-2008 x264 project * * Authors: Loren Merritt <lorenm@u.washington.edu> * Michael Niedermayer <michaelni@gmx.at> * Gabriel Bouvigne <gabriel.bouvigne@joost.com> * Jason Garrett-Glaser <darkshikari@gmail.com> * M�ns Rullg�rd <mru@mru.ath.cx> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA. *****************************************************************************/ #define _ISOC99_SOURCE #undef NDEBUG // always check asserts, the speed effect is far too small to disable them #include <math.h> #include <omp.h> #include <limits.h> #include <assert.h> #include "common/common.h" #include "common/cpu.h" #include "ratecontrol.h" typedef struct { int pict_type; int kept_as_ref; float qscale; int mv_bits; int tex_bits; int misc_bits; uint64_t expected_bits; /*total expected bits up to the current frame (current one excluded)*/ double expected_vbv; float new_qscale; int new_qp; int i_count; int p_count; int s_count; float blurred_complexity; char direct_mode; } ratecontrol_entry_t; typedef struct { double coeff; double count; double decay; } predictor_t; struct x264_ratecontrol_t { /* constants */ int b_abr; int b_2pass; int b_vbv; int b_vbv_min_rate; double fps; double bitrate; double rate_tolerance; int nmb; /* number of macroblocks in a frame */ int qp_constant[5]; /* current frame */ ratecontrol_entry_t *rce; int qp; /* qp for current frame */ int qpm; /* qp for current macroblock */ float f_qpm; /* qp for current macroblock: precise float for AQ */ float qpa_rc; /* average of macroblocks' qp before aq */ float qpa_aq; /* average of macroblocks' qp after aq */ int qp_force; /* VBV stuff */ double buffer_size; double buffer_fill_final; /* real buffer as of the last finished frame */ double buffer_fill; /* planned buffer, if all in-progress frames hit their bit budget */ double buffer_rate; /* # of bits added to buffer_fill after each frame */ predictor_t *pred; /* predict frame size from satd */ /* ABR stuff */ int last_satd; double last_rceq; double cplxr_sum; /* sum of bits*qscale/rceq */ double expected_bits_sum; /* sum of qscale2bits after rceq, ratefactor, and overflow, only includes finished frames */ double wanted_bits_window; /* target bitrate * window */ double cbr_decay; double short_term_cplxsum; double short_term_cplxcount; double rate_factor_constant; double ip_offset; double pb_offset; /* 2pass stuff */ FILE *p_stat_file_out; char *psz_stat_file_tmpname; int num_entries; /* number of ratecontrol_entry_ts */ ratecontrol_entry_t *entry; /* FIXME: copy needed data and free this once init is done */ double last_qscale; double last_qscale_for[5]; /* last qscale for a specific pict type, used for max_diff & ipb factor stuff */ int last_non_b_pict_type; double accum_p_qp; /* for determining I-frame quant */ double accum_p_norm; double last_accum_p_norm; double lmin[5]; /* min qscale by frame type */ double lmax[5]; double lstep; /* max change (multiply) in qscale per frame */ /* MBRC stuff */ double frame_size_estimated; double frame_size_planned; predictor_t *row_pred; predictor_t row_preds[5]; predictor_t *pred_b_from_p; /* predict B-frame size from P-frame satd */ int bframes; /* # consecutive B-frames before this P-frame */ int bframe_bits; /* total cost of those frames */ int i_zones; x264_zone_t *zones; x264_zone_t *prev_zone; }; static int parse_zones( x264_t *h ); static int init_pass2(x264_t *); static float rate_estimate_qscale( x264_t *h ); static void update_vbv( x264_t *h, int bits ); static void update_vbv_plan( x264_t *h ); static double predict_size( predictor_t *p, double q, double var ); static void update_predictor( predictor_t *p, double q, double var, double bits ); /* Terminology: * qp = h.264's quantizer * qscale = linearized quantizer = Lagrange multiplier */ static inline double qp2qscale(double qp) { return 0.85 * pow(2.0, ( qp - 12.0 ) / 6.0); } static inline double qscale2qp(double qscale) { return 12.0 + 6.0 * log(qscale/0.85) / log(2.0); } /* Texture bitrate is not quite inversely proportional to qscale, * probably due the the changing number of SKIP blocks. * MV bits level off at about qp<=12, because the lambda used * for motion estimation is constant there. */ static inline double qscale2bits(ratecontrol_entry_t *rce, double qscale) { if(qscale<0.1) qscale = 0.1; return (rce->tex_bits + .1) * pow( rce->qscale / qscale, 1.1 ) + rce->mv_bits * pow( X264_MAX(rce->qscale, 1) / X264_MAX(qscale, 1), 0.5 ) + rce->misc_bits; } // Find the total AC energy of the block in all planes. static NOINLINE int ac_energy_mb( x264_t *h, int mb_x, int mb_y, x264_frame_t *frame ) { /* This function contains annoying hacks because GCC has a habit of reordering emms * and putting it after floating point ops. As a result, we put the emms at the end of the * function and make sure that its always called before the float math. Noinline makes * sure no reordering goes on. */ unsigned int var=0, sad, i; for( i=0; i<3; i++ ) { int w = i ? 8 : 16; int stride = frame->i_stride[i]; int offset = h->mb.b_interlaced ? w * (mb_x + (mb_y&~1) * stride) + (mb_y&1) * stride : w * (mb_x + mb_y * stride); int pix = i ? PIXEL_8x8 : PIXEL_16x16; stride <<= h->mb.b_interlaced; var += h->pixf.var[pix]( frame->plane[i]+offset, stride, &sad ); } var = X264_MAX(var,1); x264_emms(); return var; } void x264_adaptive_quant_frame( x264_t *h, x264_frame_t *frame ) { int mb_x, mb_y; for( mb_y=0; mb_y<h->sps->i_mb_height; mb_y++ ) for( mb_x=0; mb_x<h->sps->i_mb_width; mb_x++ ) { int energy = ac_energy_mb( h, mb_x, mb_y, frame ); /* 10 constant chosen to result in approximately the same overall bitrate as without AQ. */ float qp_adj = h->param.rc.f_aq_strength * 1.5 * (logf(energy) - 10.0); frame->f_qp_offset[mb_x + mb_y*h->mb.i_mb_stride] = qp_adj; if( h->frames.b_have_lowres ) frame->i_inv_qscale_factor[mb_x+mb_y*h->mb.i_mb_stride] = FIX8(pow(2.0,-qp_adj/6.0)); } } /***************************************************************************** * x264_adaptive_quant: * adjust macroblock QP based on variance (AC energy) of the MB. * high variance = higher QP * low variance = lower QP * This generally increases SSIM and lowers PSNR. *****************************************************************************/ void x264_adaptive_quant( x264_t *h ) { float qp, qp_adj; x264_emms(); qp = h->rc->f_qpm; qp_adj = h->fenc->f_qp_offset[h->mb.i_mb_x + h->mb.i_mb_y*h->mb.i_mb_stride]; h->mb.i_qp = x264_clip3( qp + qp_adj + .5, h->param.rc.i_qp_min, h->param.rc.i_qp_max ); /* If the QP of this MB is within 1 of the previous MB, code the same QP as the previous MB, * to lower the bit cost of the qp_delta. */ if( abs(h->mb.i_qp - h->mb.i_last_qp) == 1 ) h->mb.i_qp = h->mb.i_last_qp; h->mb.i_chroma_qp = h->chroma_qp_table[h->mb.i_qp]; } int x264_ratecontrol_new( x264_t *h ) { x264_ratecontrol_t *rc; int i; x264_emms(); rc = h->rc = x264_malloc( h->param.i_threads * sizeof(x264_ratecontrol_t) ); memset( rc, 0, h->param.i_threads * sizeof(x264_ratecontrol_t) ); rc->b_abr = h->param.rc.i_rc_method != X264_RC_CQP && !h->param.rc.b_stat_read; rc->b_2pass = h->param.rc.i_rc_method == X264_RC_ABR && h->param.rc.b_stat_read; /* FIXME: use integers */ if(h->param.i_fps_num > 0 && h->param.i_fps_den > 0) rc->fps = (float) h->param.i_fps_num / h->param.i_fps_den; else rc->fps = 25.0; rc->bitrate = h->param.rc.i_bitrate * 1000.; rc->rate_tolerance = h->param.rc.f_rate_tolerance; rc->nmb = h->mb.i_mb_count; rc->last_non_b_pict_type = -1; rc->cbr_decay = 1.0; if( h->param.rc.i_rc_method == X264_RC_CRF && h->param.rc.b_stat_read ) { x264_log(h, X264_LOG_ERROR, "constant rate-factor is incompatible with 2pass.\n"); return -1; } if( h->param.rc.i_vbv_buffer_size ) { if( h->param.rc.i_rc_method == X264_RC_CQP ) { x264_log(h, X264_LOG_WARNING, "VBV is incompatible with constant QP, ignored.\n"); h->param.rc.i_vbv_max_bitrate = 0; h->param.rc.i_vbv_buffer_size = 0; } else if( h->param.rc.i_vbv_max_bitrate == 0 ) { x264_log( h, X264_LOG_DEBUG, "VBV maxrate unspecified, assuming CBR\n" ); h->param.rc.i_vbv_max_bitrate = h->param.rc.i_bitrate; } } if( h->param.rc.i_vbv_max_bitrate < h->param.rc.i_bitrate && h->param.rc.i_vbv_max_bitrate > 0) x264_log(h, X264_LOG_WARNING, "max bitrate less than average bitrate, ignored.\n"); else if( h->param.rc.i_vbv_max_bitrate > 0 && h->param.rc.i_vbv_buffer_size > 0 ) { if( h->param.rc.i_vbv_buffer_size < 3 * h->param.rc.i_vbv_max_bitrate / rc->fps ) { h->param.rc.i_vbv_buffer_size = 3 * h->param.rc.i_vbv_max_bitrate / rc->fps; x264_log( h, X264_LOG_WARNING, "VBV buffer size too small, using %d kbit\n", h->param.rc.i_vbv_buffer_size ); } if( h->param.rc.f_vbv_buffer_init > 1. ) h->param.rc.f_vbv_buffer_init = x264_clip3f( h->param.rc.f_vbv_buffer_init / h->param.rc.i_vbv_buffer_size, 0, 1 ); rc->buffer_rate = h->param.rc.i_vbv_max_bitrate * 1000. / rc->fps; rc->buffer_size = h->param.rc.i_vbv_buffer_size * 1000.; rc->buffer_fill_final = rc->buffer_size * h->param.rc.f_vbv_buffer_init; rc->cbr_decay = 1.0 - rc->buffer_rate / rc->buffer_size * 0.5 * X264_MAX(0, 1.5 - rc->buffer_rate * rc->fps / rc->bitrate); rc->b_vbv = 1; rc->b_vbv_min_rate = !rc->b_2pass && h->param.rc.i_rc_method == X264_RC_ABR && h->param.rc.i_vbv_max_bitrate <= h->param.rc.i_bitrate; } else if( h->param.rc.i_vbv_max_bitrate ) { x264_log(h, X264_LOG_WARNING, "VBV maxrate specified, but no bufsize.\n"); h->param.rc.i_vbv_max_bitrate = 0; } if(rc->rate_tolerance < 0.01) { x264_log(h, X264_LOG_WARNING, "bitrate tolerance too small, using .01\n"); rc->rate_tolerance = 0.01; } h->mb.b_variable_qp = rc->b_vbv || h->param.rc.i_aq_mode; if( rc->b_abr ) { /* FIXME ABR_INIT_QP is actually used only in CRF */ #define ABR_INIT_QP ( h->param.rc.i_rc_method == X264_RC_CRF ? h->param.rc.f_rf_constant : 24 ) rc->accum_p_norm = .01; rc->accum_p_qp = ABR_INIT_QP * rc->accum_p_norm; /* estimated ratio that produces a reasonable QP for the first I-frame */ rc->cplxr_sum = .01 * pow( 7.0e5, h->param.rc.f_qcompress ) * pow( h->mb.i_mb_count, 0.5 ); rc->wanted_bits_window = 1.0 * rc->bitrate / rc->fps; rc->last_non_b_pict_type = SLICE_TYPE_I; } if( h->param.rc.i_rc_method == X264_RC_CRF ) { /* arbitrary rescaling to make CRF somewhat similar to QP */ double base_cplx = h->mb.i_mb_count * (h->param.i_bframe ? 120 : 80); rc->rate_factor_constant = pow( base_cplx, 1 - h->param.rc.f_qcompress ) / qp2qscale( h->param.rc.f_rf_constant ); } rc->ip_offset = 6.0 * log(h->param.rc.f_ip_factor) / log(2.0); rc->pb_offset = 6.0 * log(h->param.rc.f_pb_factor) / log(2.0); rc->qp_constant[SLICE_TYPE_P] = h->param.rc.i_qp_constant; rc->qp_constant[SLICE_TYPE_I] = x264_clip3( h->param.rc.i_qp_constant - rc->ip_offset + 0.5, 0, 51 ); rc->qp_constant[SLICE_TYPE_B] = x264_clip3( h->param.rc.i_qp_constant + rc->pb_offset + 0.5, 0, 51 ); rc->lstep = pow( 2, h->param.rc.i_qp_step / 6.0 ); rc->last_qscale = qp2qscale(26); rc->pred = x264_malloc( 5*sizeof(predictor_t) ); rc->pred_b_from_p = x264_malloc( sizeof(predictor_t) ); for( i = 0; i < 5; i++ ) { rc->last_qscale_for[i] = qp2qscale( ABR_INIT_QP ); rc->lmin[i] = qp2qscale( h->param.rc.i_qp_min ); rc->lmax[i] = qp2qscale( h->param.rc.i_qp_max ); rc->pred[i].coeff= 2.0; rc->pred[i].count= 1.0; rc->pred[i].decay= 0.5; rc->row_preds[i].coeff= .25; rc->row_preds[i].count= 1.0; rc->row_preds[i].decay= 0.5; } *rc->pred_b_from_p = rc->pred[0]; if( parse_zones( h ) < 0 ) { x264_log( h, X264_LOG_ERROR, "failed to parse zones\n" ); return -1; } /* Load stat file and init 2pass algo */ if( h->param.rc.b_stat_read ) { char *p, *stats_in, *stats_buf; /* read 1st pass stats */ assert( h->param.rc.psz_stat_in ); stats_buf = stats_in = x264_slurp_file( h->param.rc.psz_stat_in ); if( !stats_buf ) { x264_log(h, X264_LOG_ERROR, "ratecontrol_init: can't open stats file\n"); return -1; } /* check whether 1st pass options were compatible with current options */ if( !strncmp( stats_buf, "#options:", 9 ) ) { int i; char *opts = stats_buf; stats_in = strchr( stats_buf, '\n' ); if( !stats_in ) return -1; *stats_in = '\0'; stats_in++; if( ( p = strstr( opts, "bframes=" ) ) && sscanf( p, "bframes=%d", &i ) && h->param.i_bframe != i ) { x264_log( h, X264_LOG_ERROR, "different number of B-frames than 1st pass (%d vs %d)\n", h->param.i_bframe, i ); return -1; } /* since B-adapt doesn't (yet) take into account B-pyramid, * the converse is not a problem */ if( strstr( opts, "b_pyramid=1" ) && !h->param.b_bframe_pyramid ) x264_log( h, X264_LOG_WARNING, "1st pass used B-pyramid, 2nd doesn't\n" ); if( ( p = strstr( opts, "keyint=" ) ) && sscanf( p, "keyint=%d", &i ) && h->param.i_keyint_max != i ) x264_log( h, X264_LOG_WARNING, "different keyint than 1st pass (%d vs %d)\n", h->param.i_keyint_max, i ); if( strstr( opts, "qp=0" ) && h->param.rc.i_rc_method == X264_RC_ABR ) x264_log( h, X264_LOG_WARNING, "1st pass was lossless, bitrate prediction will be inaccurate\n" ); if( ( p = strstr( opts, "b_adapt=" ) ) && sscanf( p, "b_adapt=%d", &i ) && i >= X264_B_ADAPT_NONE && i <= X264_B_ADAPT_TRELLIS ) h->param.i_bframe_adaptive = i; else if( h->param.i_bframe ) { x264_log( h, X264_LOG_ERROR, "b_adapt method specified in stats file not valid\n" ); return -1; } if( ( p = strstr( opts, "scenecut=" ) ) && sscanf( p, "scenecut=%d", &i ) && i >= -1 && i <= 100 ) { h->param.i_scenecut_threshold = i; h->param.b_pre_scenecut = !!strstr( p, "(pre)" ); } else { x264_log( h, X264_LOG_ERROR, "scenecut method specified in stats file not valid\n" ); return -1; } } /* find number of pics */ p = stats_in; for(i=-1; p; i++) p = strchr(p+1, ';'); if(i==0) { x264_log(h, X264_LOG_ERROR, "empty stats file\n"); return -1; } rc->num_entries = i; if( h->param.i_frame_total < rc->num_entries && h->param.i_frame_total > 0 ) { x264_log( h, X264_LOG_WARNING, "2nd pass has fewer frames than 1st pass (%d vs %d)\n", h->param.i_frame_total, rc->num_entries ); } if( h->param.i_frame_total > rc->num_entries ) { x264_log( h, X264_LOG_ERROR, "2nd pass has more frames than 1st pass (%d vs %d)\n", h->param.i_frame_total, rc->num_entries ); return -1; } rc->entry = (ratecontrol_entry_t*) x264_malloc(rc->num_entries * sizeof(ratecontrol_entry_t)); memset(rc->entry, 0, rc->num_entries * sizeof(ratecontrol_entry_t)); /* init all to skipped p frames */ for(i=0; i<rc->num_entries; i++) { ratecontrol_entry_t *rce = &rc->entry[i]; rce->pict_type = SLICE_TYPE_P; rce->qscale = rce->new_qscale = qp2qscale(20); rce->misc_bits = rc->nmb + 10; rce->new_qp = 0; } /* read stats */ p = stats_in; for(i=0; i < rc->num_entries; i++) { ratecontrol_entry_t *rce; int frame_number; char pict_type; int e; char *next; float qp; next= strchr(p, ';'); if(next) { (*next)=0; //sscanf is unbelievably slow on long strings next++; } e = sscanf(p, " in:%d ", &frame_number); if(frame_number < 0 || frame_number >= rc->num_entries) { x264_log(h, X264_LOG_ERROR, "bad frame number (%d) at stats line %d\n", frame_number, i); return -1; } rce = &rc->entry[frame_number]; rce->direct_mode = 0; e += sscanf(p, " in:%*d out:%*d type:%c q:%f tex:%d mv:%d misc:%d imb:%d pmb:%d smb:%d d:%c", &pict_type, &qp, &rce->tex_bits, &rce->mv_bits, &rce->misc_bits, &rce->i_count, &rce->p_count, &rce->s_count, &rce->direct_mode); switch(pict_type) { case 'I': rce->kept_as_ref = 1; case 'i': rce->pict_type = SLICE_TYPE_I; break; case 'P': rce->pict_type = SLICE_TYPE_P; break; case 'B': rce->kept_as_ref = 1; case 'b': rce->pict_type = SLICE_TYPE_B; break; default: e = -1; break; } if(e < 10) { x264_log(h, X264_LOG_ERROR, "statistics are damaged at line %d, parser out=%d\n", i, e); return -1; } rce->qscale = qp2qscale(qp); p = next; } x264_free(stats_buf); if(h->param.rc.i_rc_method == X264_RC_ABR) { if(init_pass2(h) < 0) return -1; } /* else we're using constant quant, so no need to run the bitrate allocation */ } /* Open output file */ /* If input and output files are the same, output to a temp file * and move it to the real name only when it's complete */ if( h->param.rc.b_stat_write ) { char *p; rc->psz_stat_file_tmpname = x264_malloc( strlen(h->param.rc.psz_stat_out) + 6 ); strcpy( rc->psz_stat_file_tmpname, h->param.rc.psz_stat_out ); strcat( rc->psz_stat_file_tmpname, ".temp" ); rc->p_stat_file_out = fopen( rc->psz_stat_file_tmpname, "wb" ); if( rc->p_stat_file_out == NULL ) { x264_log(h, X264_LOG_ERROR, "ratecontrol_init: can't open stats file\n"); return -1; } p = x264_param2string( &h->param, 1 ); fprintf( rc->p_stat_file_out, "#options: %s\n", p ); x264_free( p ); } for( i=0; i<h->param.i_threads; i++ ) { h->thread[i]->rc = rc+i; if( i ) { rc[i] = rc[0]; memcpy( &h->thread[i]->param, &h->param, sizeof( x264_param_t ) ); h->thread[i]->mb.b_variable_qp = h->mb.b_variable_qp; } } return 0; } static int parse_zone( x264_t *h, x264_zone_t *z, char *p ) { int len = 0; char *tok, *saveptr; z->param = NULL; z->f_bitrate_factor = 1; if( 3 <= sscanf(p, "%u,%u,q=%u%n", &z->i_start, &z->i_end, &z->i_qp, &len) ) z->b_force_qp = 1; else if( 3 <= sscanf(p, "%u,%u,b=%f%n", &z->i_start, &z->i_end, &z->f_bitrate_factor, &len) ) z->b_force_qp = 0; else if( 2 <= sscanf(p, "%u,%u%n", &z->i_start, &z->i_end, &len) ) z->b_force_qp = 0; else { x264_log( h, X264_LOG_ERROR, "invalid zone: \"%s\"\n", p ); return -1; } p += len; if( !*p ) return 0; z->param = malloc( sizeof(x264_param_t) ); memcpy( z->param, &h->param, sizeof(x264_param_t) ); while( (tok = strtok_r( p, ",", &saveptr )) ) { char *val = strchr( tok, '=' ); if( val ) { *val = '\0'; val++; } if( x264_param_parse( z->param, tok, val ) ) { x264_log( h, X264_LOG_ERROR, "invalid zone param: %s = %s\n", tok, val ); return -1; } p = NULL; } return 0; } static int parse_zones( x264_t *h ) { x264_ratecontrol_t *rc = h->rc; int i; if( h->param.rc.psz_zones && !h->param.rc.i_zones ) { char *p, *tok, *saveptr; char *psz_zones = x264_malloc( strlen(h->param.rc.psz_zones)+1 ); strcpy( psz_zones, h->param.rc.psz_zones ); h->param.rc.i_zones = 1; for( p = psz_zones; *p; p++ ) h->param.rc.i_zones += (*p == '/'); h->param.rc.zones = x264_malloc( h->param.rc.i_zones * sizeof(x264_zone_t) ); p = psz_zones; for( i = 0; i < h->param.rc.i_zones; i++ ) { tok = strtok_r( p, "/", &saveptr ); if( !tok || parse_zone( h, &h->param.rc.zones[i], tok ) ) return -1; p = NULL; } x264_free( psz_zones ); } if( h->param.rc.i_zones > 0 ) { for( i = 0; i < h->param.rc.i_zones; i++ ) { x264_zone_t z = h->param.rc.zones[i]; if( z.i_start < 0 || z.i_start > z.i_end ) { x264_log( h, X264_LOG_ERROR, "invalid zone: start=%d end=%d\n", z.i_start, z.i_end ); return -1; } else if( !z.b_force_qp && z.f_bitrate_factor <= 0 ) { x264_log( h, X264_LOG_ERROR, "invalid zone: bitrate_factor=%f\n", z.f_bitrate_factor ); return -1; } } rc->i_zones = h->param.rc.i_zones + 1; rc->zones = x264_malloc( rc->i_zones * sizeof(x264_zone_t) ); memcpy( rc->zones+1, h->param.rc.zones, (rc->i_zones-1) * sizeof(x264_zone_t) ); // default zone to fall back to if none of the others match rc->zones[0].i_start = 0; rc->zones[0].i_end = INT_MAX; rc->zones[0].b_force_qp = 0; rc->zones[0].f_bitrate_factor = 1; rc->zones[0].param = x264_malloc( sizeof(x264_param_t) ); memcpy( rc->zones[0].param, &h->param, sizeof(x264_param_t) ); for( i = 1; i < rc->i_zones; i++ ) { if( !rc->zones[i].param ) rc->zones[i].param = rc->zones[0].param; } } return 0; } static x264_zone_t *get_zone( x264_t *h, int frame_num ) { int i; for( i = h->rc->i_zones-1; i >= 0; i-- ) { x264_zone_t *z = &h->rc->zones[i]; if( frame_num >= z->i_start && frame_num <= z->i_end ) return z; } return NULL; } void x264_ratecontrol_summary( x264_t *h ) { x264_ratecontrol_t *rc = h->rc; if( rc->b_abr && h->param.rc.i_rc_method == X264_RC_ABR && rc->cbr_decay > .9999 ) { double base_cplx = h->mb.i_mb_count * (h->param.i_bframe ? 120 : 80); x264_log( h, X264_LOG_INFO, "final ratefactor: %.2f\n", qscale2qp( pow( base_cplx, 1 - h->param.rc.f_qcompress ) * rc->cplxr_sum / rc->wanted_bits_window ) ); } } void x264_ratecontrol_delete( x264_t *h ) { x264_ratecontrol_t *rc = h->rc; int i; if( rc->p_stat_file_out ) { fclose( rc->p_stat_file_out ); if( h->i_frame >= rc->num_entries ) if( rename( rc->psz_stat_file_tmpname, h->param.rc.psz_stat_out ) != 0 ) { x264_log( h, X264_LOG_ERROR, "failed to rename \"%s\" to \"%s\"\n", rc->psz_stat_file_tmpname, h->param.rc.psz_stat_out ); } x264_free( rc->psz_stat_file_tmpname ); } x264_free( rc->pred ); x264_free( rc->pred_b_from_p ); x264_free( rc->entry ); if( rc->zones ) { x264_free( rc->zones[0].param ); if( h->param.rc.psz_zones ) for( i=1; i<rc->i_zones; i++ ) if( rc->zones[i].param != rc->zones[0].param ) x264_free( rc->zones[i].param ); x264_free( rc->zones ); } x264_free( rc ); } void x264_ratecontrol_set_estimated_size( x264_t *h, int bits ) { #pragma omp critical h->rc->frame_size_estimated = bits; } int x264_ratecontrol_get_estimated_size( x264_t const *h) { int size; #pragma omp critical size = h->rc->frame_size_estimated; return size; } static void accum_p_qp_update( x264_t *h, float qp ) { x264_ratecontrol_t *rc = h->rc; rc->accum_p_qp *= .95; rc->accum_p_norm *= .95; rc->accum_p_norm += 1; if( h->sh.i_type == SLICE_TYPE_I ) rc->accum_p_qp += qp + rc->ip_offset; else rc->accum_p_qp += qp; } /* Before encoding a frame, choose a QP for it */ void x264_ratecontrol_start( x264_t *h, int i_force_qp ) { x264_ratecontrol_t *rc = h->rc; ratecontrol_entry_t *rce = NULL; x264_zone_t *zone = get_zone( h, h->fenc->i_frame ); float q; x264_emms(); if( zone && (!rc->prev_zone || zone->param != rc->prev_zone->param) ) x264_encoder_reconfig( h, zone->param ); rc->prev_zone = zone; rc->qp_force = i_force_qp; if( h->param.rc.b_stat_read ) { int frame = h->fenc->i_frame; assert( frame >= 0 && frame < rc->num_entries ); rce = h->rc->rce = &h->rc->entry[frame]; if( h->sh.i_type == SLICE_TYPE_B && h->param.analyse.i_direct_mv_pred == X264_DIRECT_PRED_AUTO ) { h->sh.b_direct_spatial_mv_pred = ( rce->direct_mode == 's' ); h->mb.b_direct_auto_read = ( rce->direct_mode == 's' || rce->direct_mode == 't' ); } } if( rc->b_vbv ) { memset( h->fdec->i_row_bits, 0, h->sps->i_mb_height * sizeof(int) ); rc->row_pred = &rc->row_preds[h->sh.i_type]; update_vbv_plan( h ); } if( h->sh.i_type != SLICE_TYPE_B ) { rc->bframes = 0; while( h->frames.current[rc->bframes] && IS_X264_TYPE_B(h->frames.current[rc->bframes]->i_type) ) rc->bframes++; } if( i_force_qp ) { q = i_force_qp - 1; } else if( rc->b_abr ) { q = qscale2qp( rate_estimate_qscale( h ) ); } else if( rc->b_2pass ) { rce->new_qscale = rate_estimate_qscale( h ); q = qscale2qp( rce->new_qscale ); } else /* CQP */ { if( h->sh.i_type == SLICE_TYPE_B && h->fdec->b_kept_as_ref ) q = ( rc->qp_constant[ SLICE_TYPE_B ] + rc->qp_constant[ SLICE_TYPE_P ] ) / 2; else q = rc->qp_constant[ h->sh.i_type ]; if( zone ) { if( zone->b_force_qp ) q += zone->i_qp - rc->qp_constant[SLICE_TYPE_P]; else q -= 6*log(zone->f_bitrate_factor)/log(2); } } rc->qpa_rc = rc->qpa_aq = 0; h->fdec->f_qp_avg_rc = h->fdec->f_qp_avg_aq = rc->qpm = rc->qp = x264_clip3( (int)(q + 0.5), 0, 51 ); rc->f_qpm = q; if( rce ) rce->new_qp = rc->qp; /* accum_p_qp needs to be here so that future frames can benefit from the * data before this frame is done. but this only works because threading * guarantees to not re-encode any frames. so the non-threaded case does * accum_p_qp later. */ if( h->param.i_threads > 1 ) accum_p_qp_update( h, rc->qp ); if( h->sh.i_type != SLICE_TYPE_B ) rc->last_non_b_pict_type = h->sh.i_type; } static double predict_row_size( x264_t *h, int y, int qp ) { /* average between two predictors: * absolute SATD, and scaled bit cost of the colocated row in the previous frame */ x264_ratecontrol_t *rc = h->rc; double pred_s = predict_size( rc->row_pred, qp2qscale(qp), h->fdec->i_row_satd[y] ); double pred_t = 0; if( h->sh.i_type != SLICE_TYPE_I && h->fref0[0]->i_type == h->fdec->i_type && h->fref0[0]->i_row_satd[y] > 0 && (abs(h->fref0[0]->i_row_satd[y] - h->fdec->i_row_satd[y]) < h->fdec->i_row_satd[y]/2)) { pred_t = h->fref0[0]->i_row_bits[y] * h->fdec->i_row_satd[y] / h->fref0[0]->i_row_satd[y] * qp2qscale(h->fref0[0]->i_row_qp[y]) / qp2qscale(qp); } if( pred_t == 0 ) pred_t = pred_s; return (pred_s + pred_t) / 2; } static double row_bits_so_far( x264_t *h, int y ) { int i; double bits = 0; for( i = 0; i <= y; i++ ) bits += h->fdec->i_row_bits[i]; return bits; } static double predict_row_size_sum( x264_t *h, int y, int qp ) { int i; double bits = row_bits_so_far(h, y); for( i = y+1; i < h->sps->i_mb_height; i++ ) bits += predict_row_size( h, i, qp ); return bits; } void x264_ratecontrol_mb( x264_t *h, int bits ) { x264_ratecontrol_t *rc = h->rc; const int y = h->mb.i_mb_y; x264_emms(); h->fdec->i_row_bits[y] += bits; rc->qpa_rc += rc->f_qpm; rc->qpa_aq += h->mb.i_qp; if( h->mb.i_mb_x != h->sps->i_mb_width - 1 || !rc->b_vbv) return; h->fdec->i_row_qp[y] = rc->qpm; if( h->sh.i_type == SLICE_TYPE_B ) { /* B-frames shouldn't use lower QP than their reference frames. * This code is a bit overzealous in limiting B-frame quantizers, but it helps avoid * underflows due to the fact that B-frames are not explicitly covered by VBV. */ if( y < h->sps->i_mb_height-1 ) { int i_estimated; int avg_qp = X264_MAX(h->fref0[0]->i_row_qp[y+1], h->fref1[0]->i_row_qp[y+1]) + rc->pb_offset * ((h->fenc->i_type == X264_TYPE_BREF) ? 0.5 : 1); rc->qpm = X264_MIN(X264_MAX( rc->qp, avg_qp), 51); //avg_qp could go higher than 51 due to pb_offset i_estimated = row_bits_so_far(h, y); //FIXME: compute full estimated size if (i_estimated > h->rc->frame_size_planned) x264_ratecontrol_set_estimated_size(h, i_estimated); } } else { update_predictor( rc->row_pred, qp2qscale(rc->qpm), h->fdec->i_row_satd[y], h->fdec->i_row_bits[y] ); /* tweak quality based on difference from predicted size */ if( y < h->sps->i_mb_height-1 && h->stat.i_slice_count[h->sh.i_type] > 0 ) { int prev_row_qp = h->fdec->i_row_qp[y]; int b0 = predict_row_size_sum( h, y, rc->qpm ); int b1 = b0; int i_qp_max = X264_MIN( prev_row_qp + h->param.rc.i_qp_step, h->param.rc.i_qp_max ); int i_qp_min = X264_MAX( prev_row_qp - h->param.rc.i_qp_step, h->param.rc.i_qp_min ); float buffer_left_planned = rc->buffer_fill - rc->frame_size_planned; float rc_tol = 1; float headroom = 0; /* Don't modify the row QPs until a sufficent amount of the bits of the frame have been processed, in case a flat */ /* area at the top of the frame was measured inaccurately. */ if(row_bits_so_far(h,y) < 0.05 * rc->frame_size_planned) return; headroom = buffer_left_planned/rc->buffer_size; if(h->sh.i_type != SLICE_TYPE_I) headroom /= 2; rc_tol += headroom; if( !rc->b_vbv_min_rate ) i_qp_min = X264_MAX( i_qp_min, h->sh.i_qp ); while( rc->qpm < i_qp_max && (b1 > rc->frame_size_planned * rc_tol || (rc->buffer_fill - b1 < buffer_left_planned * 0.5))) { rc->qpm ++; b1 = predict_row_size_sum( h, y, rc->qpm ); } /* avoid VBV underflow */ while( (rc->qpm < h->param.rc.i_qp_max) && (rc->buffer_fill - b1 < rc->buffer_size * 0.005)) { rc->qpm ++; b1 = predict_row_size_sum( h, y, rc->qpm ); } while( rc->qpm > i_qp_min && rc->qpm > h->fdec->i_row_qp[0] && ((b1 < rc->frame_size_planned * 0.8 && rc->qpm <= prev_row_qp) || b1 < (rc->buffer_fill - rc->buffer_size + rc->buffer_rate) * 1.1) ) { rc->qpm --; b1 = predict_row_size_sum( h, y, rc->qpm ); } x264_ratecontrol_set_estimated_size(h, b1); } } /* loses the fractional part of the frame-wise qp */ rc->f_qpm = rc->qpm; } int x264_ratecontrol_qp( x264_t *h ) { return h->rc->qpm; } /* In 2pass, force the same frame types as in the 1st pass */ int x264_ratecontrol_slice_type( x264_t *h, int frame_num ) { x264_ratecontrol_t *rc = h->rc; if( h->param.rc.b_stat_read ) { if( frame_num >= rc->num_entries ) { /* We could try to initialize everything required for ABR and * adaptive B-frames, but that would be complicated. * So just calculate the average QP used so far. */ int i; h->param.rc.i_qp_constant = (h->stat.i_slice_count[SLICE_TYPE_P] == 0) ? 24 : 1 + h->stat.f_slice_qp[SLICE_TYPE_P] / h->stat.i_slice_count[SLICE_TYPE_P]; rc->qp_constant[SLICE_TYPE_P] = x264_clip3( h->param.rc.i_qp_constant, 0, 51 ); rc->qp_constant[SLICE_TYPE_I] = x264_clip3( (int)( qscale2qp( qp2qscale( h->param.rc.i_qp_constant ) / fabs( h->param.rc.f_ip_factor )) + 0.5 ), 0, 51 ); rc->qp_constant[SLICE_TYPE_B] = x264_clip3( (int)( qscale2qp( qp2qscale( h->param.rc.i_qp_constant ) * fabs( h->param.rc.f_pb_factor )) + 0.5 ), 0, 51 ); x264_log(h, X264_LOG_ERROR, "2nd pass has more frames than 1st pass (%d)\n", rc->num_entries); x264_log(h, X264_LOG_ERROR, "continuing anyway, at constant QP=%d\n", h->param.rc.i_qp_constant); if( h->param.i_bframe_adaptive ) x264_log(h, X264_LOG_ERROR, "disabling adaptive B-frames\n"); for( i = 0; i < h->param.i_threads; i++ ) { h->thread[i]->rc->b_abr = 0; h->thread[i]->rc->b_2pass = 0; h->thread[i]->param.rc.i_rc_method = X264_RC_CQP; h->thread[i]->param.rc.b_stat_read = 0; h->thread[i]->param.i_bframe_adaptive = 0; h->thread[i]->param.b_pre_scenecut = 0; h->thread[i]->param.i_scenecut_threshold = -1; if( h->thread[i]->param.i_bframe > 1 ) h->thread[i]->param.i_bframe = 1; } return X264_TYPE_AUTO; } switch( rc->entry[frame_num].pict_type ) { case SLICE_TYPE_I: return rc->entry[frame_num].kept_as_ref ? X264_TYPE_IDR : X264_TYPE_I; case SLICE_TYPE_B: return rc->entry[frame_num].kept_as_ref ? X264_TYPE_BREF : X264_TYPE_B; case SLICE_TYPE_P: default: return X264_TYPE_P; } } else { return X264_TYPE_AUTO; } } /* After encoding one frame, save stats and update ratecontrol state */ void x264_ratecontrol_end( x264_t *h, int bits ) { x264_ratecontrol_t *rc = h->rc; const int *mbs = h->stat.frame.i_mb_count; int i; x264_emms(); h->stat.frame.i_mb_count_skip = mbs[P_SKIP] + mbs[B_SKIP]; h->stat.frame.i_mb_count_i = mbs[I_16x16] + mbs[I_8x8] + mbs[I_4x4]; h->stat.frame.i_mb_count_p = mbs[P_L0] + mbs[P_8x8]; for( i = B_DIRECT; i < B_8x8; i++ ) h->stat.frame.i_mb_count_p += mbs[i]; h->fdec->f_qp_avg_rc = rc->qpa_rc /= h->mb.i_mb_count; h->fdec->f_qp_avg_aq = rc->qpa_aq /= h->mb.i_mb_count; if( h->param.rc.b_stat_write ) { char c_type = h->sh.i_type==SLICE_TYPE_I ? (h->fenc->i_poc==0 ? 'I' : 'i') : h->sh.i_type==SLICE_TYPE_P ? 'P' : h->fenc->b_kept_as_ref ? 'B' : 'b'; int dir_frame = h->stat.frame.i_direct_score[1] - h->stat.frame.i_direct_score[0]; int dir_avg = h->stat.i_direct_score[1] - h->stat.i_direct_score[0]; char c_direct = h->mb.b_direct_auto_write ? ( dir_frame>0 ? 's' : dir_frame<0 ? 't' : dir_avg>0 ? 's' : dir_avg<0 ? 't' : '-' ) : '-'; fprintf( rc->p_stat_file_out, "in:%d out:%d type:%c q:%.2f tex:%d mv:%d misc:%d imb:%d pmb:%d smb:%d d:%c;\n", h->fenc->i_frame, h->i_frame, c_type, rc->qpa_rc, h->stat.frame.i_tex_bits, h->stat.frame.i_mv_bits, h->stat.frame.i_misc_bits, h->stat.frame.i_mb_count_i, h->stat.frame.i_mb_count_p, h->stat.frame.i_mb_count_skip, c_direct); } if( rc->b_abr ) { if( h->sh.i_type != SLICE_TYPE_B ) rc->cplxr_sum += bits * qp2qscale(rc->qpa_rc) / rc->last_rceq; else { /* Depends on the fact that B-frame's QP is an offset from the following P-frame's. * Not perfectly accurate with B-refs, but good enough. */ rc->cplxr_sum += bits * qp2qscale(rc->qpa_rc) / (rc->last_rceq * fabs(h->param.rc.f_pb_factor)); } rc->cplxr_sum *= rc->cbr_decay; rc->wanted_bits_window += rc->bitrate / rc->fps; rc->wanted_bits_window *= rc->cbr_decay; if( h->param.i_threads == 1 ) accum_p_qp_update( h, rc->qpa_rc ); } if( rc->b_2pass ) { rc->expected_bits_sum += qscale2bits( rc->rce, qp2qscale(rc->rce->new_qp) ); } if( h->mb.b_variable_qp ) { if( h->sh.i_type == SLICE_TYPE_B ) { rc->bframe_bits += bits; if( !h->frames.current[0] || !IS_X264_TYPE_B(h->frames.current[0]->i_type) ) { update_predictor( rc->pred_b_from_p, qp2qscale(rc->qpa_rc), h->fref1[h->i_ref1-1]->i_satd, rc->bframe_bits / rc->bframes ); rc->bframe_bits = 0; } } } update_vbv( h, bits ); } /**************************************************************************** * 2 pass functions ***************************************************************************/ /** * modify the bitrate curve from pass1 for one frame */ static double get_qscale(x264_t *h, ratecontrol_entry_t *rce, double rate_factor, int frame_num) { x264_ratecontrol_t *rcc= h->rc; double q; x264_zone_t *zone = get_zone( h, frame_num ); q = pow( rce->blurred_complexity, 1 - h->param.rc.f_qcompress ); // avoid NaN's in the rc_eq if(!isfinite(q) || rce->tex_bits + rce->mv_bits == 0) q = rcc->last_qscale; else { rcc->last_rceq = q; q /= rate_factor; rcc->last_qscale = q; } if( zone ) { if( zone->b_force_qp ) q = qp2qscale(zone->i_qp); else q /= zone->f_bitrate_factor; } return q; } static double get_diff_limited_q(x264_t *h, ratecontrol_entry_t *rce, double q) { x264_ratecontrol_t *rcc = h->rc; const int pict_type = rce->pict_type; // force I/B quants as a function of P quants const double last_p_q = rcc->last_qscale_for[SLICE_TYPE_P]; const double last_non_b_q= rcc->last_qscale_for[rcc->last_non_b_pict_type]; if( pict_type == SLICE_TYPE_I ) { double iq = q; double pq = qp2qscale( rcc->accum_p_qp / rcc->accum_p_norm ); double ip_factor = fabs( h->param.rc.f_ip_factor ); /* don't apply ip_factor if the following frame is also I */ if( rcc->accum_p_norm <= 0 ) q = iq; else if( h->param.rc.f_ip_factor < 0 ) q = iq / ip_factor; else if( rcc->accum_p_norm >= 1 ) q = pq / ip_factor; else q = rcc->accum_p_norm * pq / ip_factor + (1 - rcc->accum_p_norm) * iq; } else if( pict_type == SLICE_TYPE_B ) { if( h->param.rc.f_pb_factor > 0 ) q = last_non_b_q; if( !rce->kept_as_ref ) q *= fabs( h->param.rc.f_pb_factor ); } else if( pict_type == SLICE_TYPE_P && rcc->last_non_b_pict_type == SLICE_TYPE_P && rce->tex_bits == 0 ) { q = last_p_q; } /* last qscale / qdiff stuff */ if(rcc->last_non_b_pict_type==pict_type && (pict_type!=SLICE_TYPE_I || rcc->last_accum_p_norm < 1)) { double last_q = rcc->last_qscale_for[pict_type]; double max_qscale = last_q * rcc->lstep; double min_qscale = last_q / rcc->lstep; if (q > max_qscale) q = max_qscale; else if(q < min_qscale) q = min_qscale; } rcc->last_qscale_for[pict_type] = q; if(pict_type!=SLICE_TYPE_B) rcc->last_non_b_pict_type = pict_type; if(pict_type==SLICE_TYPE_I) { rcc->last_accum_p_norm = rcc->accum_p_norm; rcc->accum_p_norm = 0; rcc->accum_p_qp = 0; } if(pict_type==SLICE_TYPE_P) { float mask = 1 - pow( (float)rce->i_count / rcc->nmb, 2 ); rcc->accum_p_qp = mask * (qscale2qp(q) + rcc->accum_p_qp); rcc->accum_p_norm = mask * (1 + rcc->accum_p_norm); } return q; } static double predict_size( predictor_t *p, double q, double var ) { return p->coeff*var / (q*p->count); } static void update_predictor( predictor_t *p, double q, double var, double bits ) { if( var < 10 ) return; p->count *= p->decay; p->coeff *= p->decay; p->count ++; p->coeff += bits*q / var; } // update VBV after encoding a frame static void update_vbv( x264_t *h, int bits ) { x264_ratecontrol_t *rcc = h->rc; x264_ratecontrol_t *rct = h->thread[0]->rc; if( rcc->last_satd >= h->mb.i_mb_count ) update_predictor( &rct->pred[h->sh.i_type], qp2qscale(rcc->qpa_rc), rcc->last_satd, bits ); if( !rcc->b_vbv ) return; rct->buffer_fill_final += rct->buffer_rate - bits; if( rct->buffer_fill_final < 0 ) x264_log( h, X264_LOG_WARNING, "VBV underflow (%.0f bits)\n", rct->buffer_fill_final ); rct->buffer_fill_final = x264_clip3f( rct->buffer_fill_final, 0, rct->buffer_size ); } // provisionally update VBV according to the planned size of all frames currently in progress static void update_vbv_plan( x264_t *h ) { x264_ratecontrol_t *rcc = h->rc; rcc->buffer_fill = h->thread[0]->rc->buffer_fill_final; if( h->param.i_threads > 1 ) { int j = h->rc - h->thread[0]->rc; int i; for( i=1; i<h->param.i_threads; i++ ) { x264_t *t = h->thread[ (j+i)%h->param.i_threads ]; double bits = t->rc->frame_size_planned; if( !t->b_thread_active ) continue; bits = X264_MAX(bits, x264_ratecontrol_get_estimated_size(t)); rcc->buffer_fill += rcc->buffer_rate - bits; rcc->buffer_fill = x264_clip3( rcc->buffer_fill, 0, rcc->buffer_size ); } } } // apply VBV constraints and clip qscale to between lmin and lmax static double clip_qscale( x264_t *h, int pict_type, double q ) { x264_ratecontrol_t *rcc = h->rc; double lmin = rcc->lmin[pict_type]; double lmax = rcc->lmax[pict_type]; double q0 = q; /* B-frames are not directly subject to VBV, * since they are controlled by the P-frames' QPs. * FIXME: in 2pass we could modify previous frames' QP too, * instead of waiting for the buffer to fill */ if( rcc->b_vbv && ( pict_type == SLICE_TYPE_P || ( pict_type == SLICE_TYPE_I && rcc->last_non_b_pict_type == SLICE_TYPE_I ) ) ) { if( rcc->buffer_fill/rcc->buffer_size < 0.5 ) q /= x264_clip3f( 2.0*rcc->buffer_fill/rcc->buffer_size, 0.5, 1.0 ); } if( rcc->b_vbv && rcc->last_satd > 0 ) { /* Now a hard threshold to make sure the frame fits in VBV. * This one is mostly for I-frames. */ double bits = predict_size( &rcc->pred[h->sh.i_type], q, rcc->last_satd ); double qf = 1.0; if( bits > rcc->buffer_fill/2 ) qf = x264_clip3f( rcc->buffer_fill/(2*bits), 0.2, 1.0 ); q /= qf; bits *= qf; if( bits < rcc->buffer_rate/2 ) q *= bits*2/rcc->buffer_rate; q = X264_MAX( q0, q ); /* Check B-frame complexity, and use up any bits that would * overflow before the next P-frame. */ if( h->sh.i_type == SLICE_TYPE_P ) { int nb = rcc->bframes; double pbbits = bits; double bbits = predict_size( rcc->pred_b_from_p, q * h->param.rc.f_pb_factor, rcc->last_satd ); double space; if( bbits > rcc->buffer_rate ) nb = 0; pbbits += nb * bbits; space = rcc->buffer_fill + (1+nb)*rcc->buffer_rate - rcc->buffer_size; if( pbbits < space ) { q *= X264_MAX( pbbits / space, bits / (0.5 * rcc->buffer_size) ); } q = X264_MAX( q0-5, q ); } if( !rcc->b_vbv_min_rate ) q = X264_MAX( q0, q ); } if(lmin==lmax) return lmin; else if(rcc->b_2pass) { double min2 = log(lmin); double max2 = log(lmax); q = (log(q) - min2)/(max2-min2) - 0.5; q = 1.0/(1.0 + exp(-4*q)); q = q*(max2-min2) + min2; return exp(q); } else return x264_clip3f(q, lmin, lmax); } // update qscale for 1 frame based on actual bits used so far static float rate_estimate_qscale( x264_t *h ) { float q; x264_ratecontrol_t *rcc = h->rc; ratecontrol_entry_t rce; int pict_type = h->sh.i_type; double lmin = rcc->lmin[pict_type]; double lmax = rcc->lmax[pict_type]; int64_t total_bits = 8*(h->stat.i_slice_size[SLICE_TYPE_I] + h->stat.i_slice_size[SLICE_TYPE_P] + h->stat.i_slice_size[SLICE_TYPE_B]); if( rcc->b_2pass ) { rce = *rcc->rce; if(pict_type != rce.pict_type) { x264_log(h, X264_LOG_ERROR, "slice=%c but 2pass stats say %c\n", slice_type_to_char[pict_type], slice_type_to_char[rce.pict_type]); } } if( pict_type == SLICE_TYPE_B ) { /* B-frames don't have independent ratecontrol, but rather get the * average QP of the two adjacent P-frames + an offset */ int i0 = IS_X264_TYPE_I(h->fref0[0]->i_type); int i1 = IS_X264_TYPE_I(h->fref1[0]->i_type); int dt0 = abs(h->fenc->i_poc - h->fref0[0]->i_poc); int dt1 = abs(h->fenc->i_poc - h->fref1[0]->i_poc); float q0 = h->fref0[0]->f_qp_avg_rc; float q1 = h->fref1[0]->f_qp_avg_rc; if( h->fref0[0]->i_type == X264_TYPE_BREF ) q0 -= rcc->pb_offset/2; if( h->fref1[0]->i_type == X264_TYPE_BREF ) q1 -= rcc->pb_offset/2; if(i0 && i1) q = (q0 + q1) / 2 + rcc->ip_offset; else if(i0) q = q1; else if(i1) q = q0; else q = (q0*dt1 + q1*dt0) / (dt0 + dt1); if(h->fenc->b_kept_as_ref) q += rcc->pb_offset/2; else q += rcc->pb_offset; rcc->frame_size_planned = predict_size( rcc->pred_b_from_p, q, h->fref1[h->i_ref1-1]->i_satd ); x264_ratecontrol_set_estimated_size(h, rcc->frame_size_planned); rcc->last_satd = 0; return qp2qscale(q); } else { double abr_buffer = 2 * rcc->rate_tolerance * rcc->bitrate; if( rcc->b_2pass ) { //FIXME adjust abr_buffer based on distance to the end of the video int64_t diff; int64_t predicted_bits = total_bits; if( rcc->b_vbv ) { if( h->param.i_threads > 1 ) { int j = h->rc - h->thread[0]->rc; int i; for( i=1; i<h->param.i_threads; i++ ) { x264_t *t = h->thread[ (j+i)%h->param.i_threads ]; double bits = t->rc->frame_size_planned; if( !t->b_thread_active ) continue; bits = X264_MAX(bits, x264_ratecontrol_get_estimated_size(t)); predicted_bits += (int64_t)bits; } } } else { if( h->fenc->i_frame < h->param.i_threads ) predicted_bits += (int64_t)h->fenc->i_frame * rcc->bitrate / rcc->fps; else predicted_bits += (int64_t)(h->param.i_threads - 1) * rcc->bitrate / rcc->fps; } diff = predicted_bits - (int64_t)rce.expected_bits; q = rce.new_qscale; q /= x264_clip3f((double)(abr_buffer - diff) / abr_buffer, .5, 2); if( ((h->fenc->i_frame + 1 - h->param.i_threads) >= rcc->fps) && (rcc->expected_bits_sum > 0)) { /* Adjust quant based on the difference between * achieved and expected bitrate so far */ double time = (double)h->fenc->i_frame / rcc->num_entries; double w = x264_clip3f( time*100, 0.0, 1.0 ); q *= pow( (double)total_bits / rcc->expected_bits_sum, w ); } if( rcc->b_vbv ) { /* Do not overflow vbv */ double expected_size = qscale2bits(&rce, q); double expected_vbv = rcc->buffer_fill + rcc->buffer_rate - expected_size; double expected_fullness = rce.expected_vbv / rcc->buffer_size; double qmax = q*(2 - expected_fullness); double size_constraint = 1 + expected_fullness; qmax = X264_MAX(qmax, rce.new_qscale); if (expected_fullness < .05) qmax = lmax; qmax = X264_MIN(qmax, lmax); while( ((expected_vbv < rce.expected_vbv/size_constraint) && (q < qmax)) || ((expected_vbv < 0) && (q < lmax))) { q *= 1.05; expected_size = qscale2bits(&rce, q); expected_vbv = rcc->buffer_fill + rcc->buffer_rate - expected_size; } rcc->last_satd = x264_rc_analyse_slice( h ); } q = x264_clip3f( q, lmin, lmax ); } else /* 1pass ABR */ { /* Calculate the quantizer which would have produced the desired * average bitrate if it had been applied to all frames so far. * Then modulate that quant based on the current frame's complexity * relative to the average complexity so far (using the 2pass RCEQ). * Then bias the quant up or down if total size so far was far from * the target. * Result: Depending on the value of rate_tolerance, there is a * tradeoff between quality and bitrate precision. But at large * tolerances, the bit distribution approaches that of 2pass. */ double wanted_bits, overflow=1, lmin, lmax; rcc->last_satd = x264_rc_analyse_slice( h ); rcc->short_term_cplxsum *= 0.5; rcc->short_term_cplxcount *= 0.5; rcc->short_term_cplxsum += rcc->last_satd; rcc->short_term_cplxcount ++; rce.tex_bits = rcc->last_satd; rce.blurred_complexity = rcc->short_term_cplxsum / rcc->short_term_cplxcount; rce.mv_bits = 0; rce.p_count = rcc->nmb; rce.i_count = 0; rce.s_count = 0; rce.qscale = 1; rce.pict_type = pict_type; if( h->param.rc.i_rc_method == X264_RC_CRF ) { q = get_qscale( h, &rce, rcc->rate_factor_constant, h->fenc->i_frame ); } else { int i_frame_done = h->fenc->i_frame + 1 - h->param.i_threads; q = get_qscale( h, &rce, rcc->wanted_bits_window / rcc->cplxr_sum, h->fenc->i_frame ); // FIXME is it simpler to keep track of wanted_bits in ratecontrol_end? wanted_bits = i_frame_done * rcc->bitrate / rcc->fps; if( wanted_bits > 0 ) { abr_buffer *= X264_MAX( 1, sqrt(i_frame_done/25) ); overflow = x264_clip3f( 1.0 + (total_bits - wanted_bits) / abr_buffer, .5, 2 ); q *= overflow; } } if( pict_type == SLICE_TYPE_I && h->param.i_keyint_max > 1 /* should test _next_ pict type, but that isn't decided yet */ && rcc->last_non_b_pict_type != SLICE_TYPE_I ) { q = qp2qscale( rcc->accum_p_qp / rcc->accum_p_norm ); q /= fabs( h->param.rc.f_ip_factor ); } else if( h->i_frame > 0 ) { /* Asymmetric clipping, because symmetric would prevent * overflow control in areas of rapidly oscillating complexity */ lmin = rcc->last_qscale_for[pict_type] / rcc->lstep; lmax = rcc->last_qscale_for[pict_type] * rcc->lstep; if( overflow > 1.1 && h->i_frame > 3 ) lmax *= rcc->lstep; else if( overflow < 0.9 ) lmin /= rcc->lstep; q = x264_clip3f(q, lmin, lmax); } else if( h->param.rc.i_rc_method == X264_RC_CRF ) { q = qp2qscale( ABR_INIT_QP ) / fabs( h->param.rc.f_ip_factor ); } //FIXME use get_diff_limited_q() ? q = clip_qscale( h, pict_type, q ); } rcc->last_qscale_for[pict_type] = rcc->last_qscale = q; if( !(rcc->b_2pass && !rcc->b_vbv) && h->fenc->i_frame == 0 ) rcc->last_qscale_for[SLICE_TYPE_P] = q; if( rcc->b_2pass && rcc->b_vbv) rcc->frame_size_planned = qscale2bits(&rce, q); else rcc->frame_size_planned = predict_size( &rcc->pred[h->sh.i_type], q, rcc->last_satd ); x264_ratecontrol_set_estimated_size(h, rcc->frame_size_planned); return q; } } void x264_thread_sync_ratecontrol( x264_t *cur, x264_t *prev, x264_t *next ) { if( cur != prev ) { #define COPY(var) memcpy(&cur->rc->var, &prev->rc->var, sizeof(cur->rc->var)) /* these vars are updated in x264_ratecontrol_start() * so copy them from the context that most recently started (prev) * to the context that's about to start (cur). */ COPY(accum_p_qp); COPY(accum_p_norm); COPY(last_satd); COPY(last_rceq); COPY(last_qscale_for); COPY(last_non_b_pict_type); COPY(short_term_cplxsum); COPY(short_term_cplxcount); COPY(bframes); COPY(prev_zone); #undef COPY } if( cur != next ) { #define COPY(var) next->rc->var = cur->rc->var /* these vars are updated in x264_ratecontrol_end() * so copy them from the context that most recently ended (cur) * to the context that's about to end (next) */ COPY(cplxr_sum); COPY(expected_bits_sum); COPY(wanted_bits_window); COPY(bframe_bits); #undef COPY } //FIXME row_preds[] (not strictly necessary, but would improve prediction) /* the rest of the variables are either constant or thread-local */ } static int find_underflow( x264_t *h, double *fills, int *t0, int *t1, int over ) { /* find an interval ending on an overflow or underflow (depending on whether * we're adding or removing bits), and starting on the earliest frame that * can influence the buffer fill of that end frame. */ x264_ratecontrol_t *rcc = h->rc; const double buffer_min = (over ? .1 : .1) * rcc->buffer_size; const double buffer_max = .9 * rcc->buffer_size; double fill = fills[*t0-1]; double parity = over ? 1. : -1.; int i, start=-1, end=-1; for(i = *t0; i < rcc->num_entries; i++) { fill += (rcc->buffer_rate - qscale2bits(&rcc->entry[i], rcc->entry[i].new_qscale)) * parity; fill = x264_clip3f(fill, 0, rcc->buffer_size); fills[i] = fill; if(fill <= buffer_min || i == 0) { if(end >= 0) break; start = i; } else if(fill >= buffer_max && start >= 0) end = i; } *t0 = start; *t1 = end; return start>=0 && end>=0; } static int fix_underflow( x264_t *h, int t0, int t1, double adjustment, double qscale_min, double qscale_max) { x264_ratecontrol_t *rcc = h->rc; double qscale_orig, qscale_new; int i; int adjusted = 0; if(t0 > 0) t0++; for(i = t0; i <= t1; i++) { qscale_orig = rcc->entry[i].new_qscale; qscale_orig = x264_clip3f(qscale_orig, qscale_min, qscale_max); qscale_new = qscale_orig * adjustment; qscale_new = x264_clip3f(qscale_new, qscale_min, qscale_max); rcc->entry[i].new_qscale = qscale_new; adjusted = adjusted || (qscale_new != qscale_orig); } return adjusted; } static double count_expected_bits( x264_t *h ) { x264_ratecontrol_t *rcc = h->rc; double expected_bits = 0; int i; for(i = 0; i < rcc->num_entries; i++) { ratecontrol_entry_t *rce = &rcc->entry[i]; rce->expected_bits = expected_bits; expected_bits += qscale2bits(rce, rce->new_qscale); } return expected_bits; } static void vbv_pass2( x264_t *h ) { /* for each interval of buffer_full .. underflow, uniformly increase the qp of all * frames in the interval until either buffer is full at some intermediate frame or the * last frame in the interval no longer underflows. Recompute intervals and repeat. * Then do the converse to put bits back into overflow areas until target size is met */ x264_ratecontrol_t *rcc = h->rc; double *fills = x264_malloc((rcc->num_entries+1)*sizeof(double)); double all_available_bits = h->param.rc.i_bitrate * 1000. * rcc->num_entries / rcc->fps; double expected_bits = 0; double adjustment; double prev_bits = 0; int i, t0, t1; double qscale_min = qp2qscale(h->param.rc.i_qp_min); double qscale_max = qp2qscale(h->param.rc.i_qp_max); int iterations = 0; int adj_min, adj_max; fills++; /* adjust overall stream size */ do { iterations++; prev_bits = expected_bits; if(expected_bits != 0) { /* not first iteration */ adjustment = X264_MAX(X264_MIN(expected_bits / all_available_bits, 0.999), 0.9); fills[-1] = rcc->buffer_size * h->param.rc.f_vbv_buffer_init; t0 = 0; /* fix overflows */ adj_min = 1; while(adj_min && find_underflow(h, fills, &t0, &t1, 1)) { adj_min = fix_underflow(h, t0, t1, adjustment, qscale_min, qscale_max); t0 = t1; } } fills[-1] = rcc->buffer_size * (1. - h->param.rc.f_vbv_buffer_init); t0 = 0; /* fix underflows -- should be done after overflow, as we'd better undersize target than underflowing VBV */ adj_max = 1; while(adj_max && find_underflow(h, fills, &t0, &t1, 0)) adj_max = fix_underflow(h, t0, t1, 1.001, qscale_min, qscale_max); expected_bits = count_expected_bits(h); } while((expected_bits < .995*all_available_bits) && ((int)(expected_bits+.5) > (int)(prev_bits+.5)) ); if (!adj_max) x264_log( h, X264_LOG_WARNING, "vbv-maxrate issue, qpmax or vbv-maxrate too low\n"); /* store expected vbv filling values for tracking when encoding */ for(i = 0; i < rcc->num_entries; i++) rcc->entry[i].expected_vbv = rcc->buffer_size - fills[i]; x264_free(fills-1); } static int init_pass2( x264_t *h ) { x264_ratecontrol_t *rcc = h->rc; uint64_t all_const_bits = 0; uint64_t all_available_bits = (uint64_t)(h->param.rc.i_bitrate * 1000. * rcc->num_entries / rcc->fps); double rate_factor, step, step_mult; double qblur = h->param.rc.f_qblur; double cplxblur = h->param.rc.f_complexity_blur; const int filter_size = (int)(qblur*4) | 1; double expected_bits; double *qscale, *blurred_qscale; int i; /* find total/average complexity & const_bits */ for(i=0; i<rcc->num_entries; i++) { ratecontrol_entry_t *rce = &rcc->entry[i]; all_const_bits += rce->misc_bits; } if( all_available_bits < all_const_bits) { x264_log(h, X264_LOG_ERROR, "requested bitrate is too low. estimated minimum is %d kbps\n", (int)(all_const_bits * rcc->fps / (rcc->num_entries * 1000.))); return -1; } /* Blur complexities, to reduce local fluctuation of QP. * We don't blur the QPs directly, because then one very simple frame * could drag down the QP of a nearby complex frame and give it more * bits than intended. */ for(i=0; i<rcc->num_entries; i++) { ratecontrol_entry_t *rce = &rcc->entry[i]; double weight_sum = 0; double cplx_sum = 0; double weight = 1.0; double gaussian_weight; int j; /* weighted average of cplx of future frames */ for(j=1; j<cplxblur*2 && j<rcc->num_entries-i; j++) { ratecontrol_entry_t *rcj = &rcc->entry[i+j]; weight *= 1 - pow( (float)rcj->i_count / rcc->nmb, 2 ); if(weight < .0001) break; gaussian_weight = weight * exp(-j*j/200.0); weight_sum += gaussian_weight; cplx_sum += gaussian_weight * (qscale2bits(rcj, 1) - rcj->misc_bits); } /* weighted average of cplx of past frames */ weight = 1.0; for(j=0; j<=cplxblur*2 && j<=i; j++) { ratecontrol_entry_t *rcj = &rcc->entry[i-j]; gaussian_weight = weight * exp(-j*j/200.0); weight_sum += gaussian_weight; cplx_sum += gaussian_weight * (qscale2bits(rcj, 1) - rcj->misc_bits); weight *= 1 - pow( (float)rcj->i_count / rcc->nmb, 2 ); if(weight < .0001) break; } rce->blurred_complexity = cplx_sum / weight_sum; } qscale = x264_malloc(sizeof(double)*rcc->num_entries); if(filter_size > 1) blurred_qscale = x264_malloc(sizeof(double)*rcc->num_entries); else blurred_qscale = qscale; /* Search for a factor which, when multiplied by the RCEQ values from * each frame, adds up to the desired total size. * There is no exact closed-form solution because of VBV constraints and * because qscale2bits is not invertible, but we can start with the simple * approximation of scaling the 1st pass by the ratio of bitrates. * The search range is probably overkill, but speed doesn't matter here. */ expected_bits = 1; for(i=0; i<rcc->num_entries; i++) expected_bits += qscale2bits(&rcc->entry[i], get_qscale(h, &rcc->entry[i], 1.0, i)); step_mult = all_available_bits / expected_bits; rate_factor = 0; for(step = 1E4 * step_mult; step > 1E-7 * step_mult; step *= 0.5) { expected_bits = 0; rate_factor += step; rcc->last_non_b_pict_type = -1; rcc->last_accum_p_norm = 1; rcc->accum_p_norm = 0; /* find qscale */ for(i=0; i<rcc->num_entries; i++) { qscale[i] = get_qscale(h, &rcc->entry[i], rate_factor, i); } /* fixed I/B qscale relative to P */ for(i=rcc->num_entries-1; i>=0; i--) { qscale[i] = get_diff_limited_q(h, &rcc->entry[i], qscale[i]); assert(qscale[i] >= 0); } /* smooth curve */ if(filter_size > 1) { assert(filter_size%2==1); for(i=0; i<rcc->num_entries; i++) { ratecontrol_entry_t *rce = &rcc->entry[i]; int j; double q=0.0, sum=0.0; for(j=0; j<filter_size; j++) { int index = i+j-filter_size/2; double d = index-i; double coeff = qblur==0 ? 1.0 : exp(-d*d/(qblur*qblur)); if(index < 0 || index >= rcc->num_entries) continue; if(rce->pict_type != rcc->entry[index].pict_type) continue; q += qscale[index] * coeff; sum += coeff; } blurred_qscale[i] = q/sum; } } /* find expected bits */ for(i=0; i<rcc->num_entries; i++) { ratecontrol_entry_t *rce = &rcc->entry[i]; rce->new_qscale = clip_qscale(h, rce->pict_type, blurred_qscale[i]); assert(rce->new_qscale >= 0); expected_bits += qscale2bits(rce, rce->new_qscale); } if(expected_bits > all_available_bits) rate_factor -= step; } x264_free(qscale); if(filter_size > 1) x264_free(blurred_qscale); if(rcc->b_vbv) vbv_pass2(h); expected_bits = count_expected_bits(h); if(fabs(expected_bits/all_available_bits - 1.0) > 0.01) { double avgq = 0; for(i=0; i<rcc->num_entries; i++) avgq += rcc->entry[i].new_qscale; avgq = qscale2qp(avgq / rcc->num_entries); if ((expected_bits > all_available_bits) || (!rcc->b_vbv)) x264_log(h, X264_LOG_WARNING, "Error: 2pass curve failed to converge\n"); x264_log(h, X264_LOG_WARNING, "target: %.2f kbit/s, expected: %.2f kbit/s, avg QP: %.4f\n", (float)h->param.rc.i_bitrate, expected_bits * rcc->fps / (rcc->num_entries * 1000.), avgq); if(expected_bits < all_available_bits && avgq < h->param.rc.i_qp_min + 2) { if(h->param.rc.i_qp_min > 0) x264_log(h, X264_LOG_WARNING, "try reducing target bitrate or reducing qp_min (currently %d)\n", h->param.rc.i_qp_min); else x264_log(h, X264_LOG_WARNING, "try reducing target bitrate\n"); } else if(expected_bits > all_available_bits && avgq > h->param.rc.i_qp_max - 2) { if(h->param.rc.i_qp_max < 51) x264_log(h, X264_LOG_WARNING, "try increasing target bitrate or increasing qp_max (currently %d)\n", h->param.rc.i_qp_max); else x264_log(h, X264_LOG_WARNING, "try increasing target bitrate\n"); } else if(!(rcc->b_2pass && rcc->b_vbv)) x264_log(h, X264_LOG_WARNING, "internal error\n"); } return 0; }
calculate_G.h
#pragma omp target teams distribute parallel for collapse(2) thread_limit(BLOCK_SIZE) for (int col = 1; col < NUM+1; col++) { for (int row = 1; row < NUM+1; row++) { if (row == NUM) { // top and bottom boundaries G(col, 0) = v(col, 0); G(col, NUM) = v(col, NUM); } else { // u velocities Real u_ij = u(col, row); Real u_ijp1 = u(col, row + 1); Real u_im1j = u(col - 1, row); Real u_im1jp1 = u(col - 1, row + 1); // v velocities Real v_ij = v(col, row); Real v_ijp1 = v(col, row + 1); Real v_ip1j = v(col + 1, row); Real v_ijm1 = v(col, row - 1); Real v_im1j = v(col - 1, row); // finite differences Real dv2dy, duvdx, d2vdx2, d2vdy2; dv2dy = ((v_ij + v_ijp1) * (v_ij + v_ijp1) - (v_ijm1 + v_ij) * (v_ijm1 + v_ij) + mix_param * (fabs(v_ij + v_ijp1) * (v_ij - v_ijp1) - fabs(v_ijm1 + v_ij) * (v_ijm1 - v_ij))) / (FOUR * dy); duvdx = ((u_ij + u_ijp1) * (v_ij + v_ip1j) - (u_im1j + u_im1jp1) * (v_im1j + v_ij) + mix_param * (fabs(u_ij + u_ijp1) * (v_ij - v_ip1j) - fabs(u_im1j + u_im1jp1) * (v_im1j - v_ij))) / (FOUR * dx); d2vdx2 = (v_ip1j - (TWO * v_ij) + v_im1j) / (dx * dx); d2vdy2 = (v_ijp1 - (TWO * v_ij) + v_ijm1) / (dy * dy); G(col, row) = v_ij + dt * (((d2vdx2 + d2vdy2) / Re_num) - dv2dy - duvdx + gy); } // end if } }
if-clauseModificado.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, char **argv) { int i, n=20, tid; int a[n],suma=0,sumalocal; int x; //a usar por la cláusula num_threads if(argc < 3) { fprintf(stderr,"[ERROR]-Falta iteraciones\n"); exit(-1); } n = atoi(argv[1]); if (n>20) n=20; x = atoi(argv[2]); if (n>8) n=20; if(n<=0) n=1; for (i=0; i<n; i++) { a[i] = i; } #pragma omp parallel num_threads(x) if(n>4) default(none) private(sumalocal,tid) shared(a,suma,n) { sumalocal=0; tid = omp_get_thread_num(); #pragma omp for private(i) schedule(static) nowait for(i=0; i<n; i++){ sumalocal+= a[i]; printf(" thread %d suma de a[%d]=%d sumalocal=%d \n", tid, i, a[i], sumalocal); } #pragma omp atomic suma+=sumalocal; #pragma omp barrier #pragma omp master printf("thread master=%d imprime suma=%d \n",tid, suma); } }
CGOpenMPRuntime.h
//===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This provides a class for OpenMP runtime code generation. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #include "CGValue.h" #include "clang/AST/DeclOpenMP.h" #include "clang/AST/GlobalDecl.h" #include "clang/AST/Type.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringSet.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" #include "llvm/IR/Function.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Support/AtomicOrdering.h" namespace llvm { class ArrayType; class Constant; class FunctionType; class GlobalVariable; class StructType; class Type; class Value; class OpenMPIRBuilder; } // namespace llvm namespace clang { class Expr; class OMPDependClause; class OMPExecutableDirective; class OMPLoopDirective; class VarDecl; class OMPDeclareReductionDecl; class IdentifierInfo; namespace CodeGen { class Address; class CodeGenFunction; class CodeGenModule; /// A basic class for pre|post-action for advanced codegen sequence for OpenMP /// region. class PrePostActionTy { public: explicit PrePostActionTy() {} virtual void Enter(CodeGenFunction &CGF) {} virtual void Exit(CodeGenFunction &CGF) {} virtual ~PrePostActionTy() {} }; /// Class provides a way to call simple version of codegen for OpenMP region, or /// an advanced with possible pre|post-actions in codegen. class RegionCodeGenTy final { intptr_t CodeGen; typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &); CodeGenTy Callback; mutable PrePostActionTy *PrePostAction; RegionCodeGenTy() = delete; RegionCodeGenTy &operator=(const RegionCodeGenTy &) = delete; template <typename Callable> static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF, PrePostActionTy &Action) { return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action); } public: template <typename Callable> RegionCodeGenTy( Callable &&CodeGen, std::enable_if_t<!std::is_same<std::remove_reference_t<Callable>, RegionCodeGenTy>::value> * = nullptr) : CodeGen(reinterpret_cast<intptr_t>(&CodeGen)), Callback(CallbackFn<std::remove_reference_t<Callable>>), PrePostAction(nullptr) {} void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; } void operator()(CodeGenFunction &CGF) const; }; struct OMPTaskDataTy final { SmallVector<const Expr *, 4> PrivateVars; SmallVector<const Expr *, 4> PrivateCopies; SmallVector<const Expr *, 4> FirstprivateVars; SmallVector<const Expr *, 4> FirstprivateCopies; SmallVector<const Expr *, 4> FirstprivateInits; SmallVector<const Expr *, 4> LastprivateVars; SmallVector<const Expr *, 4> LastprivateCopies; SmallVector<const Expr *, 4> ReductionVars; SmallVector<const Expr *, 4> ReductionOrigs; SmallVector<const Expr *, 4> ReductionCopies; SmallVector<const Expr *, 4> ReductionOps; SmallVector<CanonicalDeclPtr<const VarDecl>, 4> PrivateLocals; struct DependData { OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; const Expr *IteratorExpr = nullptr; SmallVector<const Expr *, 4> DepExprs; explicit DependData() = default; DependData(OpenMPDependClauseKind DepKind, const Expr *IteratorExpr) : DepKind(DepKind), IteratorExpr(IteratorExpr) {} }; SmallVector<DependData, 4> Dependences; llvm::PointerIntPair<llvm::Value *, 1, bool> Final; llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule; llvm::PointerIntPair<llvm::Value *, 1, bool> Priority; llvm::Value *Reductions = nullptr; unsigned NumberOfParts = 0; bool Tied = true; bool Nogroup = false; bool IsReductionWithTaskMod = false; bool IsWorksharingReduction = false; }; /// Class intended to support codegen of all kind of the reduction clauses. class ReductionCodeGen { private: /// Data required for codegen of reduction clauses. struct ReductionData { /// Reference to the item shared between tasks to reduce into. const Expr *Shared = nullptr; /// Reference to the original item. const Expr *Ref = nullptr; /// Helper expression for generation of private copy. const Expr *Private = nullptr; /// Helper expression for generation reduction operation. const Expr *ReductionOp = nullptr; ReductionData(const Expr *Shared, const Expr *Ref, const Expr *Private, const Expr *ReductionOp) : Shared(Shared), Ref(Ref), Private(Private), ReductionOp(ReductionOp) { } }; /// List of reduction-based clauses. SmallVector<ReductionData, 4> ClausesData; /// List of addresses of shared variables/expressions. SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses; /// List of addresses of original variables/expressions. SmallVector<std::pair<LValue, LValue>, 4> OrigAddresses; /// Sizes of the reduction items in chars. SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes; /// Base declarations for the reduction items. SmallVector<const VarDecl *, 4> BaseDecls; /// Emits lvalue for shared expression. LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E); /// Emits upper bound for shared expression (if array section). LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E); /// Performs aggregate initialization. /// \param N Number of reduction item in the common list. /// \param PrivateAddr Address of the corresponding private item. /// \param SharedLVal Address of the original shared variable. /// \param DRD Declare reduction construct used for reduction item. void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, const OMPDeclareReductionDecl *DRD); public: ReductionCodeGen(ArrayRef<const Expr *> Shareds, ArrayRef<const Expr *> Origs, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> ReductionOps); /// Emits lvalue for the shared and original reduction item. /// \param N Number of the reduction item. void emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N); /// Emits the code for the variable-modified type, if required. /// \param N Number of the reduction item. void emitAggregateType(CodeGenFunction &CGF, unsigned N); /// Emits the code for the variable-modified type, if required. /// \param N Number of the reduction item. /// \param Size Size of the type in chars. void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size); /// Performs initialization of the private copy for the reduction item. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. /// \param DefaultInit Default initialization sequence that should be /// performed if no reduction specific initialization is found. /// \param SharedLVal Address of the original shared variable. void emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, llvm::function_ref<bool(CodeGenFunction &)> DefaultInit); /// Returns true if the private copy requires cleanups. bool needCleanups(unsigned N); /// Emits cleanup code for the reduction item. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr); /// Adjusts \p PrivatedAddr for using instead of the original variable /// address in normal operations. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N, Address PrivateAddr); /// Returns LValue for the reduction item. LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; } /// Returns LValue for the original reduction item. LValue getOrigLValue(unsigned N) const { return OrigAddresses[N].first; } /// Returns the size of the reduction item (in chars and total number of /// elements in the item), or nullptr, if the size is a constant. std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const { return Sizes[N]; } /// Returns the base declaration of the reduction item. const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; } /// Returns the base declaration of the reduction item. const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; } /// Returns true if the initialization of the reduction item uses initializer /// from declare reduction construct. bool usesReductionInitializer(unsigned N) const; }; class CGOpenMPRuntime { public: /// Allows to disable automatic handling of functions used in target regions /// as those marked as `omp declare target`. class DisableAutoDeclareTargetRAII { CodeGenModule &CGM; bool SavedShouldMarkAsGlobal; public: DisableAutoDeclareTargetRAII(CodeGenModule &CGM); ~DisableAutoDeclareTargetRAII(); }; /// Manages list of nontemporal decls for the specified directive. class NontemporalDeclsRAII { CodeGenModule &CGM; const bool NeedToPush; public: NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S); ~NontemporalDeclsRAII(); }; /// Manages list of nontemporal decls for the specified directive. class UntiedTaskLocalDeclsRAII { CodeGenModule &CGM; const bool NeedToPush; public: UntiedTaskLocalDeclsRAII( CodeGenFunction &CGF, const llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, std::pair<Address, Address>> &LocalVars); ~UntiedTaskLocalDeclsRAII(); }; /// Maps the expression for the lastprivate variable to the global copy used /// to store new value because original variables are not mapped in inner /// parallel regions. Only private copies are captured but we need also to /// store private copy in shared address. /// Also, stores the expression for the private loop counter and it /// threaprivate name. struct LastprivateConditionalData { llvm::MapVector<CanonicalDeclPtr<const Decl>, SmallString<16>> DeclToUniqueName; LValue IVLVal; llvm::Function *Fn = nullptr; bool Disabled = false; }; /// Manages list of lastprivate conditional decls for the specified directive. class LastprivateConditionalRAII { enum class ActionToDo { DoNotPush, PushAsLastprivateConditional, DisableLastprivateConditional, }; CodeGenModule &CGM; ActionToDo Action = ActionToDo::DoNotPush; /// Check and try to disable analysis of inner regions for changes in /// lastprivate conditional. void tryToDisableInnerAnalysis(const OMPExecutableDirective &S, llvm::DenseSet<CanonicalDeclPtr<const Decl>> &NeedToAddForLPCsAsDisabled) const; LastprivateConditionalRAII(CodeGenFunction &CGF, const OMPExecutableDirective &S); public: explicit LastprivateConditionalRAII(CodeGenFunction &CGF, const OMPExecutableDirective &S, LValue IVLVal); static LastprivateConditionalRAII disable(CodeGenFunction &CGF, const OMPExecutableDirective &S); ~LastprivateConditionalRAII(); }; llvm::OpenMPIRBuilder &getOMPBuilder() { return OMPBuilder; } protected: CodeGenModule &CGM; StringRef FirstSeparator, Separator; /// Constructor allowing to redefine the name separator for the variables. explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator, StringRef Separator); /// Creates offloading entry for the provided entry ID \a ID, /// address \a Addr, size \a Size, and flags \a Flags. virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags, llvm::GlobalValue::LinkageTypes Linkage); /// Helper to emit outlined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Lambda codegen specific to an accelerator device. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen); /// Emits object of ident_t type with info for source location. /// \param Flags Flags for OpenMP location. /// llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc, unsigned Flags = 0); /// Returns pointer to ident_t type. llvm::Type *getIdentTyPointerTy(); /// Gets thread id value for the current thread. /// llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc); /// Get the function name of an outlined region. // The name can be customized depending on the target. // virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; } /// Emits \p Callee function call with arguments \p Args with location \p Loc. void emitCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee Callee, ArrayRef<llvm::Value *> Args = llvm::None) const; /// Emits address of the word in a memory where current thread id is /// stored. virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc); void setLocThreadIdInsertPt(CodeGenFunction &CGF, bool AtCurrentPoint = false); void clearLocThreadIdInsertPt(CodeGenFunction &CGF); /// Check if the default location must be constant. /// Default is false to support OMPT/OMPD. virtual bool isDefaultLocationConstant() const { return false; } /// Returns additional flags that can be stored in reserved_2 field of the /// default location. virtual unsigned getDefaultLocationReserved2Flags() const { return 0; } /// Returns default flags for the barriers depending on the directive, for /// which this barier is going to be emitted. static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind); /// Get the LLVM type for the critical name. llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;} /// Returns corresponding lock object for the specified critical region /// name. If the lock object does not exist it is created, otherwise the /// reference to the existing copy is returned. /// \param CriticalName Name of the critical region. /// llvm::Value *getCriticalRegionLock(StringRef CriticalName); private: /// An OpenMP-IR-Builder instance. llvm::OpenMPIRBuilder OMPBuilder; /// Map for SourceLocation and OpenMP runtime library debug locations. typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy; OpenMPDebugLocMapTy OpenMPDebugLocMap; /// The type for a microtask which gets passed to __kmpc_fork_call(). /// Original representation is: /// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...); llvm::FunctionType *Kmpc_MicroTy = nullptr; /// Stores debug location and ThreadID for the function. struct DebugLocThreadIdTy { llvm::Value *DebugLoc; llvm::Value *ThreadID; /// Insert point for the service instructions. llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr; }; /// Map of local debug location, ThreadId and functions. typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy> OpenMPLocThreadIDMapTy; OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap; /// Map of UDRs and corresponding combiner/initializer. typedef llvm::DenseMap<const OMPDeclareReductionDecl *, std::pair<llvm::Function *, llvm::Function *>> UDRMapTy; UDRMapTy UDRMap; /// Map of functions and locally defined UDRs. typedef llvm::DenseMap<llvm::Function *, SmallVector<const OMPDeclareReductionDecl *, 4>> FunctionUDRMapTy; FunctionUDRMapTy FunctionUDRMap; /// Map from the user-defined mapper declaration to its corresponding /// functions. llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap; /// Map of functions and their local user-defined mappers. using FunctionUDMMapTy = llvm::DenseMap<llvm::Function *, SmallVector<const OMPDeclareMapperDecl *, 4>>; FunctionUDMMapTy FunctionUDMMap; /// Maps local variables marked as lastprivate conditional to their internal /// types. llvm::DenseMap<llvm::Function *, llvm::DenseMap<CanonicalDeclPtr<const Decl>, std::tuple<QualType, const FieldDecl *, const FieldDecl *, LValue>>> LastprivateConditionalToTypes; /// Maps function to the position of the untied task locals stack. llvm::DenseMap<llvm::Function *, unsigned> FunctionToUntiedTaskStackMap; /// Type kmp_critical_name, originally defined as typedef kmp_int32 /// kmp_critical_name[8]; llvm::ArrayType *KmpCriticalNameTy; /// An ordered map of auto-generated variables to their unique names. /// It stores variables with the following names: 1) ".gomp_critical_user_" + /// <critical_section_name> + ".var" for "omp critical" directives; 2) /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate /// variables. llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator> InternalVars; /// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); llvm::Type *KmpRoutineEntryPtrTy = nullptr; QualType KmpRoutineEntryPtrQTy; /// Type typedef struct kmp_task { /// void * shareds; /**< pointer to block of pointers to /// shared vars */ /// kmp_routine_entry_t routine; /**< pointer to routine to call for /// executing task */ /// kmp_int32 part_id; /**< part id for the task */ /// kmp_routine_entry_t destructors; /* pointer to function to invoke /// deconstructors of firstprivate C++ objects */ /// } kmp_task_t; QualType KmpTaskTQTy; /// Saved kmp_task_t for task directive. QualType SavedKmpTaskTQTy; /// Saved kmp_task_t for taskloop-based directive. QualType SavedKmpTaskloopTQTy; /// Type typedef struct kmp_depend_info { /// kmp_intptr_t base_addr; /// size_t len; /// struct { /// bool in:1; /// bool out:1; /// } flags; /// } kmp_depend_info_t; QualType KmpDependInfoTy; /// Type typedef struct kmp_task_affinity_info { /// kmp_intptr_t base_addr; /// size_t len; /// struct { /// bool flag1 : 1; /// bool flag2 : 1; /// kmp_int32 reserved : 30; /// } flags; /// } kmp_task_affinity_info_t; QualType KmpTaskAffinityInfoTy; /// struct kmp_dim { // loop bounds info casted to kmp_int64 /// kmp_int64 lo; // lower /// kmp_int64 up; // upper /// kmp_int64 st; // stride /// }; QualType KmpDimTy; /// Type struct __tgt_offload_entry{ /// void *addr; // Pointer to the offload entry info. /// // (function or global) /// char *name; // Name of the function or global. /// size_t size; // Size of the entry info (0 if it a function). /// int32_t flags; /// int32_t reserved; /// }; QualType TgtOffloadEntryQTy; /// Entity that registers the offloading constants that were emitted so /// far. class OffloadEntriesInfoManagerTy { CodeGenModule &CGM; /// Number of entries registered so far. unsigned OffloadingEntriesNum = 0; public: /// Base class of the entries info. class OffloadEntryInfo { public: /// Kind of a given entry. enum OffloadingEntryInfoKinds : unsigned { /// Entry is a target region. OffloadingEntryInfoTargetRegion = 0, /// Entry is a declare target variable. OffloadingEntryInfoDeviceGlobalVar = 1, /// Invalid entry info. OffloadingEntryInfoInvalid = ~0u }; protected: OffloadEntryInfo() = delete; explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {} explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order, uint32_t Flags) : Flags(Flags), Order(Order), Kind(Kind) {} ~OffloadEntryInfo() = default; public: bool isValid() const { return Order != ~0u; } unsigned getOrder() const { return Order; } OffloadingEntryInfoKinds getKind() const { return Kind; } uint32_t getFlags() const { return Flags; } void setFlags(uint32_t NewFlags) { Flags = NewFlags; } llvm::Constant *getAddress() const { return cast_or_null<llvm::Constant>(Addr); } void setAddress(llvm::Constant *V) { assert(!Addr.pointsToAliveValue() && "Address has been set before!"); Addr = V; } static bool classof(const OffloadEntryInfo *Info) { return true; } private: /// Address of the entity that has to be mapped for offloading. llvm::WeakTrackingVH Addr; /// Flags associated with the device global. uint32_t Flags = 0u; /// Order this entry was emitted. unsigned Order = ~0u; OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid; }; /// Return true if a there are no entries defined. bool empty() const; /// Return number of entries defined so far. unsigned size() const { return OffloadingEntriesNum; } OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {} // // Target region entries related. // /// Kind of the target registry entry. enum OMPTargetRegionEntryKind : uint32_t { /// Mark the entry as target region. OMPTargetRegionEntryTargetRegion = 0x0, /// Mark the entry as a global constructor. OMPTargetRegionEntryCtor = 0x02, /// Mark the entry as a global destructor. OMPTargetRegionEntryDtor = 0x04, }; /// Target region entries info. class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo { /// Address that can be used as the ID of the entry. llvm::Constant *ID = nullptr; public: OffloadEntryInfoTargetRegion() : OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {} explicit OffloadEntryInfoTargetRegion(unsigned Order, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags) : OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags), ID(ID) { setAddress(Addr); } llvm::Constant *getID() const { return ID; } void setID(llvm::Constant *V) { assert(!ID && "ID has been set before!"); ID = V; } static bool classof(const OffloadEntryInfo *Info) { return Info->getKind() == OffloadingEntryInfoTargetRegion; } }; /// Initialize target region entry. void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, unsigned Order); /// Register target region entry. void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags); /// Return true if a target region entry with the provided information /// exists. bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum) const; /// brief Applies action \a Action on all registered entries. typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned, const OffloadEntryInfoTargetRegion &)> OffloadTargetRegionEntryInfoActTy; void actOnTargetRegionEntriesInfo( const OffloadTargetRegionEntryInfoActTy &Action); // // Device global variable entries related. // /// Kind of the global variable entry.. enum OMPTargetGlobalVarEntryKind : uint32_t { /// Mark the entry as a to declare target. OMPTargetGlobalVarEntryTo = 0x0, /// Mark the entry as a to declare target link. OMPTargetGlobalVarEntryLink = 0x1, }; /// Device global variable entries info. class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo { /// Type of the global variable. CharUnits VarSize; llvm::GlobalValue::LinkageTypes Linkage; public: OffloadEntryInfoDeviceGlobalVar() : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {} explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order, OMPTargetGlobalVarEntryKind Flags) : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {} explicit OffloadEntryInfoDeviceGlobalVar( unsigned Order, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage) : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags), VarSize(VarSize), Linkage(Linkage) { setAddress(Addr); } CharUnits getVarSize() const { return VarSize; } void setVarSize(CharUnits Size) { VarSize = Size; } llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; } void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; } static bool classof(const OffloadEntryInfo *Info) { return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar; } }; /// Initialize device global variable entry. void initializeDeviceGlobalVarEntryInfo(StringRef Name, OMPTargetGlobalVarEntryKind Flags, unsigned Order); /// Register device global variable entry. void registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage); /// Checks if the variable with the given name has been registered already. bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const { return OffloadEntriesDeviceGlobalVar.count(VarName) > 0; } /// Applies action \a Action on all registered entries. typedef llvm::function_ref<void(StringRef, const OffloadEntryInfoDeviceGlobalVar &)> OffloadDeviceGlobalVarEntryInfoActTy; void actOnDeviceGlobalVarEntriesInfo( const OffloadDeviceGlobalVarEntryInfoActTy &Action); private: // Storage for target region entries kind. The storage is to be indexed by // file ID, device ID, parent function name and line number. typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion> OffloadEntriesTargetRegionPerLine; typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine> OffloadEntriesTargetRegionPerParentName; typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName> OffloadEntriesTargetRegionPerFile; typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile> OffloadEntriesTargetRegionPerDevice; typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy; OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion; /// Storage for device global variable entries kind. The storage is to be /// indexed by mangled name. typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar> OffloadEntriesDeviceGlobalVarTy; OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar; }; OffloadEntriesInfoManagerTy OffloadEntriesInfoManager; bool ShouldMarkAsGlobal = true; /// List of the emitted declarations. llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls; /// List of the global variables with their addresses that should not be /// emitted for the target. llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables; /// List of variables that can become declare target implicitly and, thus, /// must be emitted. llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables; using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>; /// Stack for list of declarations in current context marked as nontemporal. /// The set is the union of all current stack elements. llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack; using UntiedLocalVarsAddressesMap = llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, std::pair<Address, Address>>; llvm::SmallVector<UntiedLocalVarsAddressesMap, 4> UntiedLocalVarsStack; /// Stack for list of addresses of declarations in current context marked as /// lastprivate conditional. The set is the union of all current stack /// elements. llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack; /// Flag for keeping track of weather a requires unified_shared_memory /// directive is present. bool HasRequiresUnifiedSharedMemory = false; /// Atomic ordering from the omp requires directive. llvm::AtomicOrdering RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic; /// Flag for keeping track of weather a target region has been emitted. bool HasEmittedTargetRegion = false; /// Flag for keeping track of weather a device routine has been emitted. /// Device routines are specific to the bool HasEmittedDeclareTargetRegion = false; /// Loads all the offload entries information from the host IR /// metadata. void loadOffloadInfoMetadata(); /// Returns __tgt_offload_entry type. QualType getTgtOffloadEntryQTy(); /// Start scanning from statement \a S and and emit all target regions /// found along the way. /// \param S Starting statement. /// \param ParentName Name of the function declaration that is being scanned. void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName); /// Build type kmp_routine_entry_t (if not built yet). void emitKmpRoutineEntryT(QualType KmpInt32Ty); /// Returns pointer to kmpc_micro type. llvm::Type *getKmpc_MicroPointerTy(); /// Returns __kmpc_for_static_init_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_init_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_next_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_fini_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize, bool IVSigned); /// If the specified mangled name is not in the module, create and /// return threadprivate cache object. This object is a pointer's worth of /// storage that's reserved for use by the OpenMP runtime. /// \param VD Threadprivate variable. /// \return Cache variable for the specified threadprivate. llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD); /// Gets (if variable with the given name already exist) or creates /// internal global variable with the specified Name. The created variable has /// linkage CommonLinkage by default and is initialized by null value. /// \param Ty Type of the global variable. If it is exist already the type /// must be the same. /// \param Name Name of the variable. llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace = 0); /// Set of threadprivate variables with the generated initializer. llvm::StringSet<> ThreadPrivateWithDefinition; /// Set of declare target variables with the generated initializer. llvm::StringSet<> DeclareTargetWithDefinition; /// Emits initialization code for the threadprivate variables. /// \param VDAddr Address of the global variable \a VD. /// \param Ctor Pointer to a global init function for \a VD. /// \param CopyCtor Pointer to a global copy function for \a VD. /// \param Dtor Pointer to a global destructor function for \a VD. /// \param Loc Location of threadprivate declaration. void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor, llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc); /// Emit the array initialization or deletion portion for user-defined mapper /// code generation. void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF, llvm::Value *Handle, llvm::Value *BasePtr, llvm::Value *Ptr, llvm::Value *Size, llvm::Value *MapType, CharUnits ElementSize, llvm::BasicBlock *ExitBB, bool IsInit); struct TaskResultTy { llvm::Value *NewTask = nullptr; llvm::Function *TaskEntry = nullptr; llvm::Value *NewTaskNewTaskTTy = nullptr; LValue TDBase; const RecordDecl *KmpTaskTQTyRD = nullptr; llvm::Value *TaskDupFn = nullptr; }; /// Emit task region for the task directive. The task region is emitted in /// several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const OMPTaskDataTy &Data); /// Returns default address space for the constant firstprivates, 0 by /// default. virtual unsigned getDefaultFirstprivateAddressSpace() const { return 0; } /// Emit code that pushes the trip count of loops associated with constructs /// 'target teams distribute' and 'teams distribute parallel for'. /// \param SizeEmitter Emits the int64 value for the number of iterations of /// the associated loop. void emitTargetNumIterationsCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Value *DeviceID, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter); /// Emit update for lastprivate conditional data. void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal, StringRef UniqueDeclName, LValue LVal, SourceLocation Loc); /// Returns the number of the elements and the address of the depobj /// dependency array. /// \return Number of elements in depobj array and the pointer to the array of /// dependencies. std::pair<llvm::Value *, LValue> getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc); public: explicit CGOpenMPRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM, ".", ".") {} virtual ~CGOpenMPRuntime() {} virtual void clear(); /// Emits code for OpenMP 'if' clause using specified \a CodeGen /// function. Here is the logic: /// if (Cond) { /// ThenGen(); /// } else { /// ElseGen(); /// } void emitIfClause(CodeGenFunction &CGF, const Expr *Cond, const RegionCodeGenTy &ThenGen, const RegionCodeGenTy &ElseGen); /// Checks if the \p Body is the \a CompoundStmt and returns its child /// statement iff there is only one that is not evaluatable at the compile /// time. static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body); /// Get the platform-specific name separator. std::string getName(ArrayRef<StringRef> Parts) const; /// Emit code for the specified user defined reduction construct. virtual void emitUserDefinedReduction(CodeGenFunction *CGF, const OMPDeclareReductionDecl *D); /// Get combiner/initializer for the specified user-defined reduction, if any. virtual std::pair<llvm::Function *, llvm::Function *> getUserDefinedReduction(const OMPDeclareReductionDecl *D); /// Emit the function for the user defined mapper construct. void emitUserDefinedMapper(const OMPDeclareMapperDecl *D, CodeGenFunction *CGF = nullptr); /// Get the function for the specified user-defined mapper. If it does not /// exist, create one. llvm::Function * getOrCreateUserDefinedMapperFunc(const OMPDeclareMapperDecl *D); /// Emits outlined function for the specified OpenMP parallel directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Function *emitParallelOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen); /// Emits outlined function for the specified OpenMP teams directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Function *emitTeamsOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen); /// Emits outlined function for the OpenMP task directive \a D. This /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t* /// TaskT). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param PartIDVar Variable for partition id in the current OpenMP untied /// task region. /// \param TaskTVar Variable for task_t argument. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param Tied true if task is generated for tied task, false otherwise. /// \param NumberOfParts Number of parts in untied task. Ignored for tied /// tasks. /// virtual llvm::Function *emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts); /// Cleans up references to the objects in finished function. /// virtual void functionFinished(CodeGenFunction &CGF); /// Emits code for parallel or serial call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run in parallel threads. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond); /// Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. /// \param Hint Value of the 'hint' clause (optional). virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint = nullptr); /// Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. virtual void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc); /// Emits code for a taskyield directive. virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc); /// Emit a taskgroup region. /// \param TaskgroupOpGen Generator for the statement associated with the /// given taskgroup region. virtual void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc); /// Emits a single region. /// \param SingleOpGen Generator for the statement associated with the given /// single region. virtual void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps); /// Emit an ordered region. /// \param OrderedOpGen Generator for the statement associated with the given /// ordered region. virtual void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads); /// Emit an implicit/explicit barrier for OpenMP threads. /// \param Kind Directive for which this implicit barrier call must be /// generated. Must be OMPD_barrier for explicit barrier generation. /// \param EmitChecks true if need to emit checks for cancellation barriers. /// \param ForceSimpleCall true simple barrier call must be emitted, false if /// runtime class decides which one to emit (simple or with cancellation /// checks). /// virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks = true, bool ForceSimpleCall = false); /// Check if the specified \a ScheduleKind is static non-chunked. /// This kind of worksharing directive is emitted without outer loop. /// \param ScheduleKind Schedule kind specified in the 'schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static non-chunked. /// This kind of distribute directive is emitted without outer loop. /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static chunked. /// \param ScheduleKind Schedule kind specified in the 'schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static non-chunked. /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is dynamic. /// This kind of worksharing directive is emitted without outer loop. /// \param ScheduleKind Schedule Kind specified in the 'schedule' clause. /// virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const; /// struct with the values to be passed to the dispatch runtime function struct DispatchRTInput { /// Loop lower bound llvm::Value *LB = nullptr; /// Loop upper bound llvm::Value *UB = nullptr; /// Chunk size specified using 'schedule' clause (nullptr if chunk /// was not specified) llvm::Value *Chunk = nullptr; DispatchRTInput() = default; DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk) : LB(LB), UB(UB), Chunk(Chunk) {} }; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// This is used for non static scheduled types and when the ordered /// clause is present on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param Ordered true if loop is ordered, false otherwise. /// \param DispatchValues struct containing llvm values for lower bound, upper /// bound, and chunk expression. /// For the default (nullptr) value, the chunk 1 will be used. /// virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues); /// Struct with the values to be passed to the static runtime function struct StaticRTInput { /// Size of the iteration variable in bits. unsigned IVSize = 0; /// Sign of the iteration variable. bool IVSigned = false; /// true if loop is ordered, false otherwise. bool Ordered = false; /// Address of the output variable in which the flag of the last iteration /// is returned. Address IL = Address::invalid(); /// Address of the output variable in which the lower iteration number is /// returned. Address LB = Address::invalid(); /// Address of the output variable in which the upper iteration number is /// returned. Address UB = Address::invalid(); /// Address of the output variable in which the stride value is returned /// necessary to generated the static_chunked scheduled loop. Address ST = Address::invalid(); /// Value of the chunk for the static_chunked scheduled loop. For the /// default (nullptr) value, the chunk 1 will be used. llvm::Value *Chunk = nullptr; StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL, Address LB, Address UB, Address ST, llvm::Value *Chunk = nullptr) : IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB), UB(UB), ST(ST), Chunk(Chunk) {} }; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// /// This is used only in case of static schedule, when the user did not /// specify a ordered clause on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds LB and UB and stride ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param Values Input arguments for the construct. /// virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values); /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause. /// \param Values Input arguments for the construct. /// virtual void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values); /// Call the appropriate runtime routine to notify that we finished /// iteration of the ordered loop with the dynamic scheduling. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned); /// Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive for which the static finish is emitted. /// virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind); /// Call __kmpc_dispatch_next( /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, /// kmp_int[32|64] *p_stride); /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration /// number is returned. /// \param UB Address of the output variable in which the upper iteration /// number is returned. /// \param ST Address of the output variable in which the stride value is /// returned. virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST); /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. virtual void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc); /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. virtual void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc); /// Returns address of the threadprivate variable for the current /// thread. /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc); /// Returns the address of the variable marked as declare target with link /// clause OR as declare target with to clause and unified memory. virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD); /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created /// threadprivate variable (if it is not constant) and registers destructor /// for the variable (if any). /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. virtual llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF = nullptr); /// Emit a code for initialization of declare target variable. /// \param VD Declare target variable. /// \param Addr Address of the global variable \a VD. /// \param PerformInit true if initialization expression is not constant. virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD, llvm::GlobalVariable *Addr, bool PerformInit); /// Creates artificial threadprivate variable with name \p Name and type \p /// VarType. /// \param VarType Type of the artificial threadprivate variable. /// \param Name Name of the artificial threadprivate variable. virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name); /// Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, SourceLocation Loc, llvm::AtomicOrdering AO); /// Emit task region for the task directive. The task region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, /// kmp_task_t *new_task), where new_task is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data); /// Emit task region for the taskloop directive. The taskloop region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task /// is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data); /// Emit code for the directive that does not require outlining. /// /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param HasCancel true if region has inner cancel directive, false /// otherwise. virtual void emitInlinedDirective(CodeGenFunction &CGF, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool HasCancel = false); /// Emits reduction function. /// \param ArgsType Array type containing pointers to reduction variables. /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. llvm::Function *emitReductionFunction(SourceLocation Loc, llvm::Type *ArgsType, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps); /// Emits single reduction combiner void emitSingleReductionCombiner(CodeGenFunction &CGF, const Expr *ReductionOp, const Expr *PrivateRef, const DeclRefExpr *LHS, const DeclRefExpr *RHS); struct ReductionOptionsTy { bool WithNowait; bool SimpleReduction; OpenMPDirectiveKind ReductionKind; }; /// Emit a code for reduction clause. Next code should be emitted for /// reduction: /// \code /// /// static kmp_critical_name lock = { 0 }; /// /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { /// ... /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); /// ... /// } /// /// ... /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), /// RedList, reduce_func, &<lock>)) { /// case 1: /// ... /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); /// ... /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); /// break; /// case 2: /// ... /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); /// ... /// break; /// default:; /// } /// \endcode /// /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. /// \param Options List of options for reduction codegen: /// WithNowait true if parent directive has also nowait clause, false /// otherwise. /// SimpleReduction Emit reduction operation only. Used for omp simd /// directive on the host. /// ReductionKind The kind of reduction to perform. virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options); /// Emit a code for initialization of task reduction clause. Next code /// should be emitted for reduction: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_init(gtid, n, red_data); /// \endcode /// For reduction clause with task modifier it emits the next call: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n, /// red_data); /// \endcode /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations. /// \param Data Additional data for task generation like tiedness, final /// state, list of privates, reductions etc. virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data); /// Emits the following code for reduction clause with task modifier: /// \code /// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing); /// \endcode virtual void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc, bool IsWorksharingReduction); /// Required to resolve existing problems in the runtime. Emits threadprivate /// variables to store the size of the VLAs/array sections for /// initializer/combiner/finalizer functions. /// \param RCG Allows to reuse an existing data for the reductions. /// \param N Reduction item for which fixups must be emitted. virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N); /// Get the address of `void *` type of the privatue copy of the reduction /// item specified by the \p SharedLVal. /// \param ReductionsPtr Pointer to the reduction data returned by the /// emitTaskReductionInit function. /// \param SharedLVal Address of the original reduction item. virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal); /// Emit code for 'taskwait' directive. virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc); /// Emit code for 'cancellation point' construct. /// \param CancelRegion Region kind for which the cancellation point must be /// emitted. /// virtual void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion); /// Emit code for 'cancel' construct. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param CancelRegion Region kind for which the cancel must be emitted. /// virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion); /// Emit outilined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Code generation sequence for the \a D directive. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen); /// Emit the target offloading code associated with \a D. The emitted /// code attempts offloading the execution to the device, an the event of /// a failure it executes the host version outlined in \a OutlinedFn. /// \param D Directive to emit. /// \param OutlinedFn Host version of the code to be offloaded. /// \param OutlinedFnID ID of host version of the code to be offloaded. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used and device modifier. /// \param SizeEmitter Callback to emit number of iterations for loop-based /// directives. virtual void emitTargetCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter); /// Emit the target regions enclosed in \a GD function definition or /// the function itself in case it is a valid device function. Returns true if /// \a GD was dealt with successfully. /// \param GD Function to scan. virtual bool emitTargetFunctions(GlobalDecl GD); /// Emit the global variable if it is a valid device global variable. /// Returns true if \a GD was dealt with successfully. /// \param GD Variable declaration to emit. virtual bool emitTargetGlobalVariable(GlobalDecl GD); /// Checks if the provided global decl \a GD is a declare target variable and /// registers it when emitting code for the host. virtual void registerTargetGlobalVariable(const VarDecl *VD, llvm::Constant *Addr); /// Registers provided target firstprivate variable as global on the /// target. llvm::Constant *registerTargetFirstprivateCopy(CodeGenFunction &CGF, const VarDecl *VD); /// Emit the global \a GD if it is meaningful for the target. Returns /// if it was emitted successfully. /// \param GD Global to scan. virtual bool emitTargetGlobal(GlobalDecl GD); /// Creates and returns a registration function for when at least one /// requires directives was used in the current module. llvm::Function *emitRequiresDirectiveRegFun(); /// Creates all the offload entries in the current compilation unit /// along with the associated metadata. void createOffloadEntriesAndInfoMetadata(); /// Emits code for teams call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run by team masters. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// virtual void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars); /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code /// for num_teams clause. /// \param NumTeams An integer expression of teams. /// \param ThreadLimit An integer expression of threads. virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc); /// Struct that keeps all the relevant information that should be kept /// throughout a 'target data' region. class TargetDataInfo { /// Set to true if device pointer information have to be obtained. bool RequiresDevicePointerInfo = false; /// Set to true if Clang emits separate runtime calls for the beginning and /// end of the region. These calls might have separate map type arrays. bool SeparateBeginEndCalls = false; public: /// The array of base pointer passed to the runtime library. llvm::Value *BasePointersArray = nullptr; /// The array of section pointers passed to the runtime library. llvm::Value *PointersArray = nullptr; /// The array of sizes passed to the runtime library. llvm::Value *SizesArray = nullptr; /// The array of map types passed to the runtime library for the beginning /// of the region or for the entire region if there are no separate map /// types for the region end. llvm::Value *MapTypesArray = nullptr; /// The array of map types passed to the runtime library for the end of the /// region, or nullptr if there are no separate map types for the region /// end. llvm::Value *MapTypesArrayEnd = nullptr; /// The array of user-defined mappers passed to the runtime library. llvm::Value *MappersArray = nullptr; /// Indicate whether any user-defined mapper exists. bool HasMapper = false; /// The total number of pointers passed to the runtime library. unsigned NumberOfPtrs = 0u; /// Map between the a declaration of a capture and the corresponding base /// pointer address where the runtime returns the device pointers. llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap; explicit TargetDataInfo() {} explicit TargetDataInfo(bool RequiresDevicePointerInfo, bool SeparateBeginEndCalls) : RequiresDevicePointerInfo(RequiresDevicePointerInfo), SeparateBeginEndCalls(SeparateBeginEndCalls) {} /// Clear information about the data arrays. void clearArrayInfo() { BasePointersArray = nullptr; PointersArray = nullptr; SizesArray = nullptr; MapTypesArray = nullptr; MapTypesArrayEnd = nullptr; MappersArray = nullptr; HasMapper = false; NumberOfPtrs = 0u; } /// Return true if the current target data information has valid arrays. bool isValid() { return BasePointersArray && PointersArray && SizesArray && MapTypesArray && (!HasMapper || MappersArray) && NumberOfPtrs; } bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; } bool separateBeginEndCalls() { return SeparateBeginEndCalls; } }; /// Emit the target data mapping code associated with \a D. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the /// target directive, or null if no device clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param Info A record used to store information that needs to be preserved /// until the region is closed. virtual void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info); /// Emit the data mapping/movement code associated with the directive /// \a D that should be of the form 'target [{enter|exit} data | update]'. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device); /// Marks function \a Fn with properly mangled versions of vector functions. /// \param FD Function marked as 'declare simd'. /// \param Fn LLVM function that must be marked with 'declare simd' /// attributes. virtual void emitDeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn); /// Emit initialization for doacross loop nesting support. /// \param D Loop-based construct used in doacross nesting construct. virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef<Expr *> NumIterations); /// Emit code for doacross ordered directive with 'depend' clause. /// \param C 'depend' clause with 'sink|source' dependency kind. virtual void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C); /// Translates the native parameter of outlined function if this is required /// for target. /// \param FD Field decl from captured record for the parameter. /// \param NativeParam Parameter itself. virtual const VarDecl *translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const { return NativeParam; } /// Gets the address of the native argument basing on the address of the /// target-specific parameter. /// \param NativeParam Parameter itself. /// \param TargetParam Corresponding target-specific parameter. virtual Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const; /// Choose default schedule type and chunk value for the /// dist_schedule clause. virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind, llvm::Value *&Chunk) const {} /// Choose default schedule type and chunk value for the /// schedule clause. virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const; /// Emits call of the outlined function with the provided arguments, /// translating these arguments to correct target-specific arguments. virtual void emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn, ArrayRef<llvm::Value *> Args = llvm::None) const; /// Emits OpenMP-specific function prolog. /// Required for device constructs. virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D); /// Gets the OpenMP-specific address of the local variable. virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD); /// Marks the declaration as already emitted for the device code and returns /// true, if it was marked already, and false, otherwise. bool markAsGlobalTarget(GlobalDecl GD); /// Emit deferred declare target variables marked for deferred emission. void emitDeferredTargetDecls() const; /// Adjust some parameters for the target-based directives, like addresses of /// the variables captured by reference in lambdas. virtual void adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF, const OMPExecutableDirective &D) const; /// Perform check on requires decl to ensure that target architecture /// supports unified addressing virtual void processRequiresDirective(const OMPRequiresDecl *D); /// Gets default memory ordering as specified in requires directive. llvm::AtomicOrdering getDefaultMemoryOrdering() const; /// Checks if the variable has associated OMPAllocateDeclAttr attribute with /// the predefined allocator and translates it into the corresponding address /// space. virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS); /// Return whether the unified_shared_memory has been specified. bool hasRequiresUnifiedSharedMemory() const; /// Checks if the \p VD variable is marked as nontemporal declaration in /// current context. bool isNontemporalDecl(const ValueDecl *VD) const; /// Create specialized alloca to handle lastprivate conditionals. Address emitLastprivateConditionalInit(CodeGenFunction &CGF, const VarDecl *VD); /// Checks if the provided \p LVal is lastprivate conditional and emits the /// code to update the value of the original variable. /// \code /// lastprivate(conditional: a) /// ... /// <type> a; /// lp_a = ...; /// #pragma omp critical(a) /// if (last_iv_a <= iv) { /// last_iv_a = iv; /// global_a = lp_a; /// } /// \endcode virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS); /// Checks if the lastprivate conditional was updated in inner region and /// writes the value. /// \code /// lastprivate(conditional: a) /// ... /// <type> a;bool Fired = false; /// #pragma omp ... shared(a) /// { /// lp_a = ...; /// Fired = true; /// } /// if (Fired) { /// #pragma omp critical(a) /// if (last_iv_a <= iv) { /// last_iv_a = iv; /// global_a = lp_a; /// } /// Fired = false; /// } /// \endcode virtual void checkAndEmitSharedLastprivateConditional( CodeGenFunction &CGF, const OMPExecutableDirective &D, const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls); /// Gets the address of the global copy used for lastprivate conditional /// update, if any. /// \param PrivLVal LValue for the private copy. /// \param VD Original lastprivate declaration. virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF, LValue PrivLVal, const VarDecl *VD, SourceLocation Loc); /// Emits list of dependecies based on the provided data (array of /// dependence/expression pairs). /// \returns Pointer to the first element of the array casted to VoidPtr type. std::pair<llvm::Value *, Address> emitDependClause(CodeGenFunction &CGF, ArrayRef<OMPTaskDataTy::DependData> Dependencies, SourceLocation Loc); /// Emits list of dependecies based on the provided data (array of /// dependence/expression pairs) for depobj construct. In this case, the /// variable is allocated in dynamically. \returns Pointer to the first /// element of the array casted to VoidPtr type. Address emitDepobjDependClause(CodeGenFunction &CGF, const OMPTaskDataTy::DependData &Dependencies, SourceLocation Loc); /// Emits the code to destroy the dependency object provided in depobj /// directive. void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc); /// Updates the dependency kind in the specified depobj object. /// \param DepobjLVal LValue for the main depobj object. /// \param NewDepKind New dependency kind. void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal, OpenMPDependClauseKind NewDepKind, SourceLocation Loc); /// Initializes user defined allocators specified in the uses_allocators /// clauses. void emitUsesAllocatorsInit(CodeGenFunction &CGF, const Expr *Allocator, const Expr *AllocatorTraits); /// Destroys user defined allocators specified in the uses_allocators clause. void emitUsesAllocatorsFini(CodeGenFunction &CGF, const Expr *Allocator); /// Returns true if the variable is a local variable in untied task. bool isLocalVarInUntiedTask(CodeGenFunction &CGF, const VarDecl *VD) const; }; /// Class supports emissionof SIMD-only code. class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime { public: explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {} ~CGOpenMPSIMDRuntime() override {} /// Emits outlined function for the specified OpenMP parallel directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. llvm::Function * emitParallelOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override; /// Emits outlined function for the specified OpenMP teams directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. llvm::Function * emitTeamsOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override; /// Emits outlined function for the OpenMP task directive \a D. This /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t* /// TaskT). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param PartIDVar Variable for partition id in the current OpenMP untied /// task region. /// \param TaskTVar Variable for task_t argument. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param Tied true if task is generated for tied task, false otherwise. /// \param NumberOfParts Number of parts in untied task. Ignored for tied /// tasks. /// llvm::Function *emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts) override; /// Emits code for parallel or serial call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run in parallel threads. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) override; /// Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. /// \param Hint Value of the 'hint' clause (optional). void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint = nullptr) override; /// Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc) override; /// Emits code for a taskyield directive. void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override; /// Emit a taskgroup region. /// \param TaskgroupOpGen Generator for the statement associated with the /// given taskgroup region. void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc) override; /// Emits a single region. /// \param SingleOpGen Generator for the statement associated with the given /// single region. void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps) override; /// Emit an ordered region. /// \param OrderedOpGen Generator for the statement associated with the given /// ordered region. void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads) override; /// Emit an implicit/explicit barrier for OpenMP threads. /// \param Kind Directive for which this implicit barrier call must be /// generated. Must be OMPD_barrier for explicit barrier generation. /// \param EmitChecks true if need to emit checks for cancellation barriers. /// \param ForceSimpleCall true simple barrier call must be emitted, false if /// runtime class decides which one to emit (simple or with cancellation /// checks). /// void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks = true, bool ForceSimpleCall = false) override; /// This is used for non static scheduled types and when the ordered /// clause is present on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param Ordered true if loop is ordered, false otherwise. /// \param DispatchValues struct containing llvm values for lower bound, upper /// bound, and chunk expression. /// For the default (nullptr) value, the chunk 1 will be used. /// void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues) override; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// /// This is used only in case of static schedule, when the user did not /// specify a ordered clause on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds LB and UB and stride ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param Values Input arguments for the construct. /// void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) override; /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause. /// \param Values Input arguments for the construct. /// void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) override; /// Call the appropriate runtime routine to notify that we finished /// iteration of the ordered loop with the dynamic scheduling. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned) override; /// Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive for which the static finish is emitted. /// void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind) override; /// Call __kmpc_dispatch_next( /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, /// kmp_int[32|64] *p_stride); /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration /// number is returned. /// \param UB Address of the output variable in which the upper iteration /// number is returned. /// \param ST Address of the output variable in which the stride value is /// returned. llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST) override; /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc) override; /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc) override; /// Returns address of the threadprivate variable for the current /// thread. /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc) override; /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created /// threadprivate variable (if it is not constant) and registers destructor /// for the variable (if any). /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF = nullptr) override; /// Creates artificial threadprivate variable with name \p Name and type \p /// VarType. /// \param VarType Type of the artificial threadprivate variable. /// \param Name Name of the artificial threadprivate variable. Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name) override; /// Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, SourceLocation Loc, llvm::AtomicOrdering AO) override; /// Emit task region for the task directive. The task region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, /// kmp_task_t *new_task), where new_task is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override; /// Emit task region for the taskloop directive. The taskloop region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task /// is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override; /// Emit a code for reduction clause. Next code should be emitted for /// reduction: /// \code /// /// static kmp_critical_name lock = { 0 }; /// /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { /// ... /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); /// ... /// } /// /// ... /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), /// RedList, reduce_func, &<lock>)) { /// case 1: /// ... /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); /// ... /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); /// break; /// case 2: /// ... /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); /// ... /// break; /// default:; /// } /// \endcode /// /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. /// \param Options List of options for reduction codegen: /// WithNowait true if parent directive has also nowait clause, false /// otherwise. /// SimpleReduction Emit reduction operation only. Used for omp simd /// directive on the host. /// ReductionKind The kind of reduction to perform. void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) override; /// Emit a code for initialization of task reduction clause. Next code /// should be emitted for reduction: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_init(gtid, n, red_data); /// \endcode /// For reduction clause with task modifier it emits the next call: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n, /// red_data); /// \endcode /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations. /// \param Data Additional data for task generation like tiedness, final /// state, list of privates, reductions etc. llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) override; /// Emits the following code for reduction clause with task modifier: /// \code /// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing); /// \endcode void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc, bool IsWorksharingReduction) override; /// Required to resolve existing problems in the runtime. Emits threadprivate /// variables to store the size of the VLAs/array sections for /// initializer/combiner/finalizer functions + emits threadprivate variable to /// store the pointer to the original reduction item for the custom /// initializer defined by declare reduction construct. /// \param RCG Allows to reuse an existing data for the reductions. /// \param N Reduction item for which fixups must be emitted. void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N) override; /// Get the address of `void *` type of the privatue copy of the reduction /// item specified by the \p SharedLVal. /// \param ReductionsPtr Pointer to the reduction data returned by the /// emitTaskReductionInit function. /// \param SharedLVal Address of the original reduction item. Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal) override; /// Emit code for 'taskwait' directive. void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) override; /// Emit code for 'cancellation point' construct. /// \param CancelRegion Region kind for which the cancellation point must be /// emitted. /// void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion) override; /// Emit code for 'cancel' construct. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param CancelRegion Region kind for which the cancel must be emitted. /// void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion) override; /// Emit outilined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Code generation sequence for the \a D directive. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) override; /// Emit the target offloading code associated with \a D. The emitted /// code attempts offloading the execution to the device, an the event of /// a failure it executes the host version outlined in \a OutlinedFn. /// \param D Directive to emit. /// \param OutlinedFn Host version of the code to be offloaded. /// \param OutlinedFnID ID of host version of the code to be offloaded. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used and device modifier. void emitTargetCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter) override; /// Emit the target regions enclosed in \a GD function definition or /// the function itself in case it is a valid device function. Returns true if /// \a GD was dealt with successfully. /// \param GD Function to scan. bool emitTargetFunctions(GlobalDecl GD) override; /// Emit the global variable if it is a valid device global variable. /// Returns true if \a GD was dealt with successfully. /// \param GD Variable declaration to emit. bool emitTargetGlobalVariable(GlobalDecl GD) override; /// Emit the global \a GD if it is meaningful for the target. Returns /// if it was emitted successfully. /// \param GD Global to scan. bool emitTargetGlobal(GlobalDecl GD) override; /// Emits code for teams call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run by team masters. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars) override; /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code /// for num_teams clause. /// \param NumTeams An integer expression of teams. /// \param ThreadLimit An integer expression of threads. void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc) override; /// Emit the target data mapping code associated with \a D. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the /// target directive, or null if no device clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param Info A record used to store information that needs to be preserved /// until the region is closed. void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) override; /// Emit the data mapping/movement code associated with the directive /// \a D that should be of the form 'target [{enter|exit} data | update]'. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device) override; /// Emit initialization for doacross loop nesting support. /// \param D Loop-based construct used in doacross nesting construct. void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef<Expr *> NumIterations) override; /// Emit code for doacross ordered directive with 'depend' clause. /// \param C 'depend' clause with 'sink|source' dependency kind. void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C) override; /// Translates the native parameter of outlined function if this is required /// for target. /// \param FD Field decl from captured record for the parameter. /// \param NativeParam Parameter itself. const VarDecl *translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const override; /// Gets the address of the native argument basing on the address of the /// target-specific parameter. /// \param NativeParam Parameter itself. /// \param TargetParam Corresponding target-specific parameter. Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const override; /// Gets the OpenMP-specific address of the local variable. Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD) override { return Address::invalid(); } }; } // namespace CodeGen } // namespace clang #endif
draw.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD RRRR AAA W W % % D D R R A A W W % % D D RRRR AAAAA W W W % % D D R RN A A WW WW % % DDDD R R A A W W % % % % % % MagickCore Image Drawing Methods % % % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon % rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion", % Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent % (www.appligent.com) contributed the dash pattern, linecap stroking % algorithm, and minor rendering improvements. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define BezierQuantum 200 #define PrimitiveExtentPad 2048 #define MaxBezierCoordinates 4194304 #define ThrowPointExpectedException(token,exception) \ { \ (void) ThrowMagickException(exception,GetMagickModule(),DrawError, \ "NonconformingDrawingPrimitiveDefinition","`%s'",token); \ status=MagickFalse; \ break; \ } /* Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo *points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _MVGInfo { PrimitiveInfo **primitive_info; size_t *extent; ssize_t offset; PointInfo point; ExceptionInfo *exception; } MVGInfo; typedef struct _PolygonInfo { EdgeInfo *edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* Forward declarations. */ static Image *DrawClippingMask(Image *,const DrawInfo *,const char *,const char *, ExceptionInfo *); static MagickBooleanType DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *, ExceptionInfo *), RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *), TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo, const double,const MagickBooleanType,const MagickBooleanType), TraceBezier(MVGInfo *,const size_t), TraceCircle(MVGInfo *,const PointInfo,const PointInfo), TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo), TraceSquareLinecap(PrimitiveInfo *,const size_t,const double); static PrimitiveInfo *TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *); static size_t TracePath(MVGInfo *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireDrawInfo() returns a DrawInfo structure properly initialized. % % The format of the AcquireDrawInfo method is: % % DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo *AcquireDrawInfo(void) { DrawInfo *draw_info; draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info)); GetDrawInfo((ImageInfo *) NULL,draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. If NULL % is specified, a new DrawInfo structure is created initialized to default % values. % % The format of the CloneDrawInfo method is: % % DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info, const DrawInfo *draw_info) { DrawInfo *clone_info; ExceptionInfo *exception; clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetDrawInfo(image_info,clone_info); if (draw_info == (DrawInfo *) NULL) return(clone_info); exception=AcquireExceptionInfo(); if (draw_info->primitive != (char *) NULL) (void) CloneString(&clone_info->primitive,draw_info->primitive); if (draw_info->geometry != (char *) NULL) (void) CloneString(&clone_info->geometry,draw_info->geometry); clone_info->compliance=draw_info->compliance; clone_info->viewbox=draw_info->viewbox; clone_info->affine=draw_info->affine; clone_info->gravity=draw_info->gravity; clone_info->fill=draw_info->fill; clone_info->stroke=draw_info->stroke; clone_info->stroke_width=draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue, exception); if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke_antialias=draw_info->stroke_antialias; clone_info->text_antialias=draw_info->text_antialias; clone_info->fill_rule=draw_info->fill_rule; clone_info->linecap=draw_info->linecap; clone_info->linejoin=draw_info->linejoin; clone_info->miterlimit=draw_info->miterlimit; clone_info->dash_offset=draw_info->dash_offset; clone_info->decorate=draw_info->decorate; clone_info->compose=draw_info->compose; if (draw_info->text != (char *) NULL) (void) CloneString(&clone_info->text,draw_info->text); if (draw_info->font != (char *) NULL) (void) CloneString(&clone_info->font,draw_info->font); if (draw_info->metrics != (char *) NULL) (void) CloneString(&clone_info->metrics,draw_info->metrics); if (draw_info->family != (char *) NULL) (void) CloneString(&clone_info->family,draw_info->family); clone_info->style=draw_info->style; clone_info->stretch=draw_info->stretch; clone_info->weight=draw_info->weight; if (draw_info->encoding != (char *) NULL) (void) CloneString(&clone_info->encoding,draw_info->encoding); clone_info->pointsize=draw_info->pointsize; clone_info->kerning=draw_info->kerning; clone_info->interline_spacing=draw_info->interline_spacing; clone_info->interword_spacing=draw_info->interword_spacing; clone_info->direction=draw_info->direction; if (draw_info->density != (char *) NULL) (void) CloneString(&clone_info->density,draw_info->density); clone_info->align=draw_info->align; clone_info->undercolor=draw_info->undercolor; clone_info->border_color=draw_info->border_color; if (draw_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) { register ssize_t x; for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ; clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)* sizeof(*clone_info->dash_pattern)); (void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t) (x+1)*sizeof(*clone_info->dash_pattern)); } clone_info->gradient=draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops=clone_info->gradient.number_stops; clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t) number_stops,sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops, (size_t) number_stops*sizeof(*clone_info->gradient.stops)); } clone_info->bounds=draw_info->bounds; clone_info->fill_alpha=draw_info->fill_alpha; clone_info->stroke_alpha=draw_info->stroke_alpha; clone_info->element_reference=draw_info->element_reference; clone_info->clip_path=draw_info->clip_path; clone_info->clip_units=draw_info->clip_units; if (draw_info->clip_mask != (char *) NULL) (void) CloneString(&clone_info->clip_mask,draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0, MagickTrue,exception); if (draw_info->composite_mask != (Image *) NULL) clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0, MagickTrue,exception); clone_info->render=draw_info->render; clone_info->debug=IsEventLogging(); exception=DestroyExceptionInfo(exception); return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P a t h T o P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPathToPolygon() converts a path to the more efficient sorted % rendering form. % % The format of the ConvertPathToPolygon method is: % % PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) % % A description of each parameter follows: % % o Method ConvertPathToPolygon returns the path in a more efficient sorted % rendering form of type PolygonInfo. % % o draw_info: Specifies a pointer to an DrawInfo structure. % % o path_info: Specifies a pointer to an PathInfo structure. % % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int DrawCompareEdges(const void *p_edge,const void *q_edge) { #define DrawCompareEdge(p,q) \ { \ if (((p)-(q)) < 0.0) \ return(-1); \ if (((p)-(q)) > 0.0) \ return(1); \ } register const PointInfo *p, *q; /* Edge sorting for right-handed coordinate system. */ p=((const EdgeInfo *) p_edge)->points; q=((const EdgeInfo *) q_edge)->points; DrawCompareEdge(p[0].y,q[0].y); DrawCompareEdge(p[0].x,q[0].x); DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)* (q[1].x-q[0].x)); DrawCompareEdge(p[1].y,q[1].y); DrawCompareEdge(p[1].x,q[1].x); return(0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo *polygon_info) { register EdgeInfo *p; register ssize_t i, j; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge"); p=polygon_info->edges; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { (void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:", (double) i); (void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s", p->direction != MagickFalse ? "down" : "up"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1, p->bounds.x2,p->bounds.y2); for (j=0; j < (ssize_t) p->number_points; j++) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g", p->points[j].x,p->points[j].y); p++; } (void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge"); } static void ReversePoints(PointInfo *points,const size_t number_points) { PointInfo point; register ssize_t i; for (i=0; i < (ssize_t) (number_points >> 1); i++) { point=points[i]; points[i]=points[number_points-(i+1)]; points[number_points-(i+1)]=point; } } static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) { long direction, next_direction; PointInfo point, *points; PolygonInfo *polygon_info; SegmentInfo bounds; register ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* Convert a path to the more efficient sorted rendering form. */ polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) return((PolygonInfo *) NULL); number_edges=16; polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); (void) memset(polygon_info->edges,0,number_edges* sizeof(*polygon_info->edges)); direction=0; edge=0; ghostline=MagickFalse; n=0; number_points=0; points=(PointInfo *) NULL; (void) memset(&point,0,sizeof(point)); (void) memset(&bounds,0,sizeof(bounds)); polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=0.0; polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) direction; polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->number_edges=0; for (i=0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; } if (points == (PointInfo *) NULL) { number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point=path_info[i].point; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; direction=0; n=1; continue; } /* Line to. */ next_direction=((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y-point.y) < MagickEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* New edge. */ point=points[n-1]; if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); n=1; ghostline=MagickFalse; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; edge++; } direction=next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points<<=1; points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } point=path_info[i].point; points[n]=point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.x > bounds.x2) bounds.x2=point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points=(PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; ghostline=MagickFalse; edge++; } } polygon_info->number_edges=edge; qsort(polygon_info->edges,(size_t) polygon_info->number_edges, sizeof(*polygon_info->edges),DrawCompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return(polygon_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P r i m i t i v e T o P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector % path structure. % % The format of the ConvertPrimitiveToPath method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o Method ConvertPrimitiveToPath returns a vector path structure of type % PathInfo. % % o draw_info: a structure of type DrawInfo. % % o primitive_info: Specifies a pointer to an PrimitiveInfo structure. % % */ static void LogPathInfo(const PathInfo *path_info) { register const PathInfo *p; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path"); for (p=path_info; p->code != EndCode; p++) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path"); } static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info) { MagickBooleanType closed_subpath; PathInfo *path_info; PathInfoCode code; PointInfo p, q; register ssize_t i, n; ssize_t coordinates, start; /* Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case AlphaPrimitive: case ColorPrimitive: case ImagePrimitive: case PointPrimitive: case TextPrimitive: return((PathInfo *) NULL); default: break; } for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) return((PathInfo *) NULL); coordinates=0; closed_subpath=MagickFalse; n=0; p.x=(-1.0); p.y=(-1.0); q.x=(-1.0); q.y=(-1.0); start=0; for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code=LineToCode; if (coordinates <= 0) { /* New subpath. */ coordinates=(ssize_t) primitive_info[i].coordinates; p=primitive_info[i].point; start=n; code=MoveToCode; closed_subpath=primitive_info[i].closed_subpath; } coordinates--; if ((code == MoveToCode) || (coordinates <= 0) || (fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) || (fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon)) { /* Eliminate duplicate points. */ path_info[n].code=code; path_info[n].point=primitive_info[i].point; q=primitive_info[i].point; n++; } if (coordinates > 0) continue; /* next point in current subpath */ if (closed_subpath != MagickFalse) { closed_subpath=MagickFalse; continue; } /* Mark the p point as open if the subpath is not closed. */ path_info[start].code=OpenCode; path_info[n].code=GhostlineCode; path_info[n].point=primitive_info[i].point; n++; path_info[n].code=LineToCode; path_info[n].point=p; n++; } path_info[n].code=EndCode; path_info[n].point.x=0.0; path_info[n].point.y=0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1), sizeof(*path_info)); return(path_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo structure. % % The format of the DestroyDrawInfo method is: % % DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) { assert(draw_info != (DrawInfo *) NULL); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info->signature == MagickCoreSignature); if (draw_info->primitive != (char *) NULL) draw_info->primitive=DestroyString(draw_info->primitive); if (draw_info->text != (char *) NULL) draw_info->text=DestroyString(draw_info->text); if (draw_info->geometry != (char *) NULL) draw_info->geometry=DestroyString(draw_info->geometry); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); if (draw_info->metrics != (char *) NULL) draw_info->metrics=DestroyString(draw_info->metrics); if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); if (draw_info->encoding != (char *) NULL) draw_info->encoding=DestroyString(draw_info->encoding); if (draw_info->density != (char *) NULL) draw_info->density=DestroyString(draw_info->density); if (draw_info->server_name != (char *) NULL) draw_info->server_name=(char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) draw_info->dash_pattern=(double *) RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *) NULL) draw_info->clip_mask=DestroyString(draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask); if (draw_info->composite_mask != (Image *) NULL) draw_info->composite_mask=DestroyImage(draw_info->composite_mask); draw_info->signature=(~MagickCoreSignature); draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y E d g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyEdge() destroys the specified polygon edge. % % The format of the DestroyEdge method is: % % ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % % o edge: the polygon edge number to destroy. % */ static size_t DestroyEdge(PolygonInfo *polygon_info, const size_t edge) { assert(edge < polygon_info->number_edges); polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < polygon_info->number_edges) (void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1, (size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges)); return(polygon_info->number_edges); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P o l y g o n I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPolygonInfo() destroys the PolygonInfo data structure. % % The format of the DestroyPolygonInfo method is: % % PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % */ static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) { register ssize_t i; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) polygon_info->edges[i].points=(PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges); return((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w A f f i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAffineImage() composites the source over the destination image as % dictated by the affine transform. % % The format of the DrawAffineImage method is: % % MagickBooleanType DrawAffineImage(Image *image,const Image *source, % const AffineMatrix *affine,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o source: the source image. % % o affine: the affine transform. % % o exception: return any errors or warnings in this structure. % */ static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine, const double y,const SegmentInfo *edge) { double intercept, z; register double x; SegmentInfo inverse_edge; /* Determine left and right edges. */ inverse_edge.x1=edge->x1; inverse_edge.y1=edge->y1; inverse_edge.x2=edge->x2; inverse_edge.y2=edge->y2; z=affine->ry*y+affine->tx; if (affine->sx >= MagickEpsilon) { intercept=(-z/affine->sx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->sx < -MagickEpsilon) { intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->sx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns)) { inverse_edge.x2=edge->x1; return(inverse_edge); } /* Determine top and bottom edges. */ z=affine->sy*y+affine->ty; if (affine->rx >= MagickEpsilon) { intercept=(-z/affine->rx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->rx < -MagickEpsilon) { intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->rx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows)) { inverse_edge.x2=edge->x2; return(inverse_edge); } return(inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine) { AffineMatrix inverse_affine; double determinant; determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx* affine->ry); inverse_affine.sx=determinant*affine->sy; inverse_affine.rx=determinant*(-affine->rx); inverse_affine.ry=determinant*(-affine->ry); inverse_affine.sy=determinant*affine->sx; inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty* inverse_affine.ry; inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty* inverse_affine.sy; return(inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image *image, const Image *source,const AffineMatrix *affine,ExceptionInfo *exception) { AffineMatrix inverse_affine; CacheView *image_view, *source_view; MagickBooleanType status; PixelInfo zero; PointInfo extent[4], min, max; register ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(source != (const Image *) NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) source->columns-1.0; extent[1].y=0.0; extent[2].x=(double) source->columns-1.0; extent[2].y=(double) source->rows-1.0; extent[3].x=0.0; extent[3].y=(double) source->rows-1.0; for (i=0; i < 4; i++) { PointInfo point; point=extent[i]; extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx; extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } /* Affine transform image. */ if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; edge.x1=MagickMax(min.x,0.0); edge.y1=MagickMax(min.y,0.0); edge.x2=MagickMin(max.x,(double) image->columns-1.0); edge.y2=MagickMin(max.y,(double) image->rows-1.0); inverse_affine=InverseAffineMatrix(affine); GetPixelInfo(image,&zero); start=(ssize_t) ceil(edge.y1-0.5); stop=(ssize_t) floor(edge.y2+0.5); source_view=AcquireVirtualCacheView(source,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,image,stop-start,1) #endif for (y=start; y <= stop; y++) { PixelInfo composite, pixel; PointInfo point; register ssize_t x; register Quantum *magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1- 0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1), 1,exception); if (q == (Quantum *) NULL) continue; pixel=zero; composite=zero; x_offset=0; for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++) { point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+ inverse_affine.tx; point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+ inverse_affine.ty; status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel, point.x,point.y,&pixel,exception); if (status == MagickFalse) break; GetPixelInfoPixel(image,q,&composite); CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha, &composite); SetPixelViaPixelInfo(image,&composite,q); x_offset++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w B o u n d i n g R e c t a n g l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawBoundingRectangles() draws the bounding rectangles on the image. This % is only useful for developers debugging the rendering algorithm. % % The format of the DrawBoundingRectangles method is: % % MagickBooleanType DrawBoundingRectangles(Image *image, % const DrawInfo *draw_info,PolygonInfo *polygon_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo structure. % % o exception: return any errors or warnings in this structure. % */ static inline double SaneStrokeWidth(const Image *image, const DrawInfo *draw_info) { return(MagickMin((double) draw_info->stroke_width, (2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows))); } static MagickBooleanType DrawBoundingRectangles(Image *image, const DrawInfo *draw_info,const PolygonInfo *polygon_info, ExceptionInfo *exception) { double mid; DrawInfo *clone_info; MagickStatusType status; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; register ssize_t i; SegmentInfo bounds; ssize_t coordinates; (void) memset(primitive_info,0,sizeof(primitive_info)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } resolution.x=96.0; resolution.y=96.0; if (clone_info->density != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(clone_info->density,&geometry_info); resolution.x=geometry_info.rho; resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == MagickFalse) resolution.y=resolution.x; } mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)* SaneStrokeWidth(image,clone_info)/2.0; bounds.x1=0.0; bounds.y1=0.0; bounds.x2=0.0; bounds.y2=0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds=polygon_info->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1) bounds.x1=polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1) bounds.y1=polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2) bounds.x2=polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2) bounds.y2=polygon_info->edges[i].bounds.y2; } bounds.x1-=mid; bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=mid; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=mid; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=mid; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double) image->rows-1 : bounds.y2; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke, exception); else status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) break; start.x=(double) (polygon_info->edges[i].bounds.x1-mid); start.y=(double) (polygon_info->edges[i].bounds.y1-mid); end.x=(double) (polygon_info->edges[i].bounds.x2+mid); end.y=(double) (polygon_info->edges[i].bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); if (status == MagickFalse) break; } if (i < (ssize_t) polygon_info->number_edges) { clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } } status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } start.x=(double) (bounds.x1-mid); start.y=(double) (bounds.y1-mid); end.x=(double) (bounds.x2+mid); end.y=(double) (bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClipPath() draws the clip path on the image mask. % % The format of the DrawClipPath method is: % % MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info, % const char *id,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawClipPath(Image *image, const DrawInfo *draw_info,const char *id,ExceptionInfo *exception) { const char *clip_path; Image *clipping_mask; MagickBooleanType status; clip_path=GetImageArtifact(image,id); if (clip_path == (const char *) NULL) return(MagickFalse); clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path, exception); if (clipping_mask == (Image *) NULL) return(MagickFalse); status=SetImageMask(image,WritePixelMask,clipping_mask,exception); clipping_mask=DestroyImage(clipping_mask); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p p i n g M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClippingMask() draws the clip path and returns it as an image clipping % mask. % % The format of the DrawClippingMask method is: % % Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *clip_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o clip_path: the clip path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, const char *id,const char *clip_path,ExceptionInfo *exception) { DrawInfo *clone_info; Image *clip_mask, *separate_mask; MagickStatusType status; /* Draw a clip path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); clip_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(clip_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(clip_mask)); status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception); status=QueryColorCompliance("#0000",AllCompliance, &clip_mask->background_color,exception); clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha; clip_mask->background_color.alpha_trait=BlendPixelTrait; status=SetImageBackgroundColor(clip_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,clip_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); if (clone_info->clip_mask != (char *) NULL) clone_info->clip_mask=DestroyString(clone_info->clip_mask); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; clone_info->clip_path=MagickTrue; status=RenderMVGContent(clip_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(clip_mask,AlphaChannel,exception); if (separate_mask != (Image *) NULL) { clip_mask=DestroyImage(clip_mask); clip_mask=separate_mask; status=NegateImage(clip_mask,MagickFalse,exception); if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path"); return(clip_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C o m p o s i t e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawCompositeMask() draws the mask path and returns it as an image mask. % % The format of the DrawCompositeMask method is: % % Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *mask_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the mask path id. % % o mask_path: the mask path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, const char *id,const char *mask_path,ExceptionInfo *exception) { Image *composite_mask, *separate_mask; DrawInfo *clone_info; MagickStatusType status; /* Draw a mask path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); composite_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(composite_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(composite_mask)); status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL, exception); status=QueryColorCompliance("#0000",AllCompliance, &composite_mask->background_color,exception); composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha; composite_mask->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(composite_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,mask_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; status=RenderMVGContent(composite_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(composite_mask,AlphaChannel,exception); if (separate_mask != (Image *) NULL) { composite_mask=DestroyImage(composite_mask); composite_mask=separate_mask; status=NegateImage(composite_mask,MagickFalse,exception); if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path"); return(composite_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w D a s h P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the % image while respecting the dash offset and dash pattern attributes. % % The format of the DrawDashPolygon method is: % % MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception) { double length, maximum_length, offset, scale, total_length; DrawInfo *clone_info; MagickStatusType status; PrimitiveInfo *dash_polygon; register double dx, dy; register ssize_t i; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash"); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; number_vertices=(size_t) i; dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL*number_vertices+32UL),sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) return(MagickFalse); (void) memset(dash_polygon,0,(2UL*number_vertices+32UL)* sizeof(*dash_polygon)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->miterlimit=0; dash_polygon[0]=primitive_info[0]; scale=ExpandAffine(&draw_info->affine); length=scale*draw_info->dash_pattern[0]; offset=fabs(draw_info->dash_offset) >= MagickEpsilon ? scale*draw_info->dash_offset : 0.0; j=1; for (n=0; offset > 0.0; j=0) { if (draw_info->dash_pattern[n] <= 0.0) break; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); if (offset > length) { offset-=length; n++; length=scale*draw_info->dash_pattern[n]; continue; } if (offset < length) { length-=offset; offset=0.0; break; } offset=0.0; n++; } status=MagickTrue; maximum_length=0.0; total_length=0.0; for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > MaxBezierCoordinates) break; if (fabs(length) < MagickEpsilon) { if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); ) { total_length+=length; if ((n & 0x01) != 0) { dash_polygon[0]=primitive_info[0]; dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); j=1; } else { if ((j+1) > (ssize_t) number_vertices) break; dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } length-=(maximum_length-total_length); if ((n & 0x01) != 0) continue; dash_polygon[j]=primitive_info[i]; dash_polygon[j].coordinates=1; j++; } if ((total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x+=MagickEpsilon; dash_polygon[j].point.y+=MagickEpsilon; dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGradientImage() draws a linear gradient on the image. % % The format of the DrawGradientImage method is: % % MagickBooleanType DrawGradientImage(Image *image, % const DrawInfo *draw_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static inline double GetStopColorOffset(const GradientInfo *gradient, const ssize_t x,const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo *gradient_vector; gradient_vector=(&gradient->gradient_vector); p.x=gradient_vector->x2-gradient_vector->x1; p.y=gradient_vector->y2-gradient_vector->y1; q.x=(double) x-gradient_vector->x1; q.y=(double) y-gradient_vector->y1; length=sqrt(q.x*q.x+q.y*q.y); gamma=sqrt(p.x*p.x+p.y*p.y)*length; gamma=PerceptibleReciprocal(gamma); scale=p.x*q.x+p.y*q.y; offset=gamma*scale*length; return(offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x=(double) x-gradient->center.x; v.y=(double) y-gradient->center.y; return(sqrt(v.x*v.x+v.y*v.y)); } v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians( gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.x); v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians( gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.y); return(sqrt(v.x*v.x+v.y*v.y)); } } return(0.0); } static int StopInfoCompare(const void *x,const void *y) { StopInfo *stop_1, *stop_2; stop_1=(StopInfo *) x; stop_2=(StopInfo *) y; if (stop_1->offset > stop_2->offset) return(1); if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon) return(0); return(-1); } MagickExport MagickBooleanType DrawGradientImage(Image *image, const DrawInfo *draw_info,ExceptionInfo *exception) { CacheView *image_view; const GradientInfo *gradient; const SegmentInfo *gradient_vector; double length; MagickBooleanType status; PixelInfo zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); gradient=(&draw_info->gradient); qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo), StopInfoCompare); gradient_vector=(&gradient->gradient_vector); point.x=gradient_vector->x2-gradient_vector->x1; point.y=gradient_vector->y2-gradient_vector->y1; length=sqrt(point.x*point.x+point.y*point.y); bounding_box=gradient->bounding_box; status=MagickTrue; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,bounding_box.height-bounding_box.y,1) #endif for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++) { PixelInfo composite, pixel; double alpha, offset; register Quantum *magick_restrict q; register ssize_t i, x; ssize_t j; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; composite=zero; offset=GetStopColorOffset(gradient,0,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++) { GetPixelInfoPixel(image,q,&pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite=gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case ReflectSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } if (offset < 0.0) offset=(-offset); if ((ssize_t) fmod(offset,2.0) == 0) offset=fmod(offset,1.0); else offset=1.0-fmod(offset,1.0); for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case RepeatSpread: { MagickBooleanType antialias; double repeat; antialias=MagickFalse; repeat=0.0; if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type == LinearGradient) { repeat=fmod(offset,length); if (repeat < 0.0) repeat=length-fmod(-repeat,length); else repeat=fmod(offset,length); antialias=(repeat < length) && ((repeat+1.0) > length) ? MagickTrue : MagickFalse; offset=PerceptibleReciprocal(length)*repeat; } else { repeat=fmod(offset,gradient->radius); if (repeat < 0.0) repeat=gradient->radius-fmod(-repeat,gradient->radius); else repeat=fmod(offset,gradient->radius); antialias=repeat+1.0 > gradient->radius ? MagickTrue : MagickFalse; offset=repeat/gradient->radius; } } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha=length-repeat; else alpha=gradient->radius-repeat; i=0; j=(ssize_t) gradient->number_stops-1L; } CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } } CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha, &pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawImage() draws a graphic primitive on your image. The primitive % may be represented as a string or filename. Precede the filename with an % "at" sign (@) and the contents of the file are drawn on the image. You % can affect how text is drawn by setting one or more members of the draw % info structure. % % The format of the DrawImage method is: % % MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info, const size_t pad) { double extent; size_t quantum; /* Check if there is enough storage for drawing pimitives. */ extent=(double) mvg_info->offset+pad+PrimitiveExtentPad; quantum=sizeof(**mvg_info->primitive_info); if (((extent*quantum) < (double) SSIZE_MAX) && ((extent*quantum) < (double) GetMaxMemoryRequest())) { if (extent <= (double) *mvg_info->extent) return(MagickTrue); *mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory( *mvg_info->primitive_info,(size_t) extent,quantum); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) { register ssize_t i; *mvg_info->extent=(size_t) extent; for (i=mvg_info->offset+1; i < (ssize_t) extent; i++) (*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive; return(MagickTrue); } } /* Reallocation failed, allocate a primitive to facilitate unwinding. */ (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) *mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory( *mvg_info->primitive_info); *mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory( PrimitiveExtentPad*quantum); (void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum); *mvg_info->extent=1; return(MagickFalse); } MagickExport int MVGMacroCompare(const void *target,const void *source) { const char *p, *q; p=(const char *) target; q=(const char *) source; return(strcmp(p,q)); } static SplayTreeInfo *GetMVGMacros(const char *primitive) { char *macro, *token; const char *q; size_t extent; SplayTreeInfo *macros; /* Scan graphic primitives for definitions and classes. */ if (primitive == (const char *) NULL) return((SplayTreeInfo *) NULL); macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory, RelinquishMagickMemory); macro=AcquireString(primitive); token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; for (q=primitive; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (*token == '\0') break; if (LocaleCompare("push",token) == 0) { register const char *end, *start; GetNextToken(q,&q,extent,token); if (*q == '"') { char name[MagickPathExtent]; const char *p; ssize_t n; /* Named macro (e.g. push graphic-context "wheel"). */ GetNextToken(q,&q,extent,token); start=q; end=q; (void) CopyMagickString(name,token,MagickPathExtent); n=1; for (p=q; *p != '\0'; ) { GetNextToken(p,&p,extent,token); if (*token == '\0') break; if (LocaleCompare(token,"pop") == 0) { end=p-strlen(token)-1; n--; } if (LocaleCompare(token,"push") == 0) n++; if ((n == 0) && (end > start)) { /* Extract macro. */ GetNextToken(p,&p,extent,token); (void) CopyMagickString(macro,start,(size_t) (end-start)); (void) AddValueToSplayTree(macros,ConstantString(name), ConstantString(macro)); break; } } } } } token=DestroyString(token); macro=DestroyString(macro); return(macros); } static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value=StringToDouble(point,&p); return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info, const PointInfo point) { primitive_info->coordinates=1; primitive_info->closed_subpath=MagickFalse; primitive_info->point=point; return(MagickTrue); } static MagickBooleanType RenderMVGContent(Image *image, const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char keyword[MagickPathExtent], geometry[MagickPathExtent], *next_token, pattern[MagickPathExtent], *primitive, *token; const char *q; double angle, coordinates, cursor, factor, primitive_extent; DrawInfo *clone_info, **graphic_context; MagickBooleanType proceed; MagickStatusType status; MVGInfo mvg_info; PointInfo point; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register const char *p; register ssize_t i, x; SegmentInfo bounds; size_t extent, number_points, number_stops; SplayTreeInfo *macros; ssize_t defsDepth, j, k, n, symbolDepth; StopInfo *stops; TypeMetric metrics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (depth > MagickMaxRecursionDepth) ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply", image->filename); if ((draw_info->primitive == (char *) NULL) || (*draw_info->primitive == '\0')) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image"); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) { status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); if (status == MagickFalse) return(MagickFalse); } primitive=(char *) NULL; if (*draw_info->primitive != '@') primitive=AcquireString(draw_info->primitive); else if ((strlen(draw_info->primitive) > 1) && (*(draw_info->primitive+1) != '-')) primitive=FileToString(draw_info->primitive+1,~0UL,exception); if (primitive == (char *) NULL) return(MagickFalse); primitive_extent=(double) strlen(primitive); (void) SetImageArtifact(image,"mvg:vector-graphics",primitive); n=0; number_stops=0; stops=(StopInfo *) NULL; /* Allocate primitive info memory. */ graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive=DestroyString(primitive); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } number_points=PrimitiveExtentPad; primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(primitive_info,0,(size_t) number_points* sizeof(*primitive_info)); (void) memset(&mvg_info,0,sizeof(mvg_info)); mvg_info.primitive_info=(&primitive_info); mvg_info.extent=(&number_points); mvg_info.exception=exception; graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info); graphic_context[n]->viewbox=image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width=image->columns; graphic_context[n]->viewbox.height=image->rows; } token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; defsDepth=0; symbolDepth=0; cursor=0.0; macros=GetMVGMacros(primitive); status=MagickTrue; for (q=primitive; *q != '\0'; ) { /* Interpret graphic primitive. */ GetNextToken(q,&q,MagickPathExtent,keyword); if (*keyword == '\0') break; if (*keyword == '#') { /* Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p=q-strlen(keyword)-1; primitive_type=UndefinedPrimitive; current=graphic_context[n]->affine; GetAffineMatrix(&affine); switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.rx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ry=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("alpha",keyword) == 0) { primitive_type=AlphaPrimitive; break; } if (LocaleCompare("arc",keyword) == 0) { primitive_type=ArcPrimitive; break; } status=MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier",keyword) == 0) { primitive_type=BezierPrimitive; break; } if (LocaleCompare("border-color",keyword) == 0) { GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->border_color,exception); break; } status=MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("class",keyword) == 0) { const char *mvg_class; GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } mvg_class=(const char *) GetValueFromSplayTree(macros,token); if (mvg_class != (const char *) NULL) { char *elements; ssize_t offset; /* Inject class elements in stream. */ offset=(ssize_t) (p-primitive); elements=AcquireString(primitive); elements[offset]='\0'; (void) ConcatenateString(&elements,mvg_class); (void) ConcatenateString(&elements,"\n"); (void) ConcatenateString(&elements,q); primitive=DestroyString(primitive); primitive=elements; q=primitive+offset; } break; } if (LocaleCompare("clip-path",keyword) == 0) { const char *clip_path; /* Take a node from within the MVG document, and duplicate it here. */ GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } (void) CloneString(&graphic_context[n]->clip_mask,token); clip_path=(const char *) GetValueFromSplayTree(macros,token); if (clip_path != (const char *) NULL) { if (graphic_context[n]->clipping_mask != (Image *) NULL) graphic_context[n]->clipping_mask= DestroyImage(graphic_context[n]->clipping_mask); graphic_context[n]->clipping_mask=DrawClippingMask(image, graphic_context[n],token,clip_path,exception); if (draw_info->compliance != SVGCompliance) { const char *clip_path; clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image, graphic_context[n]->clip_mask,clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } } break; } if (LocaleCompare("clip-rule",keyword) == 0) { ssize_t fill_rule; GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("clip-units",keyword) == 0) { ssize_t clip_units; GetNextToken(q,&q,extent,token); clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse, token); if (clip_units == -1) { status=MagickFalse; break; } graphic_context[n]->clip_units=(ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx=draw_info->bounds.x2; affine.sy=draw_info->bounds.y2; affine.tx=draw_info->bounds.x1; affine.ty=draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle",keyword) == 0) { primitive_type=CirclePrimitive; break; } if (LocaleCompare("color",keyword) == 0) { primitive_type=ColorPrimitive; break; } if (LocaleCompare("compliance",keyword) == 0) { /* MVG compliance associates a clipping mask with an image; SVG compliance associates a clipping mask with a graphics context. */ GetNextToken(q,&q,extent,token); graphic_context[n]->compliance=(ComplianceType) ParseCommandOption( MagickComplianceOptions,MagickFalse,token); break; } status=MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate",keyword) == 0) { ssize_t decorate; GetNextToken(q,&q,extent,token); decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse, token); if (decorate == -1) { status=MagickFalse; break; } graphic_context[n]->decorate=(DecorationType) decorate; break; } if (LocaleCompare("density",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->density,token); break; } if (LocaleCompare("direction",keyword) == 0) { ssize_t direction; GetNextToken(q,&q,extent,token); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, token); if (direction == -1) status=MagickFalse; else graphic_context[n]->direction=(DirectionType) direction; break; } status=MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse",keyword) == 0) { primitive_type=EllipsePrimitive; break; } if (LocaleCompare("encoding",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->encoding,token); break; } status=MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill",keyword) == 0) { GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->fill_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->fill,exception); if (graphic_context[n]->fill_alpha != OpaqueAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; } break; } if (LocaleCompare("fill-opacity",keyword) == 0) { double opacity; GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* StringToDouble(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); graphic_context[n]->fill_alpha*=opacity; if (graphic_context[n]->fill.alpha != TransparentAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; else graphic_context[n]->fill.alpha=(MagickRealType) ClampToQuantum(QuantumRange*(1.0-opacity)); break; } if (LocaleCompare("fill-rule",keyword) == 0) { ssize_t fill_rule; GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("font",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->font,token); if (LocaleCompare("none",token) == 0) graphic_context[n]->font=(char *) RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->family,token); break; } if (LocaleCompare("font-size",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->pointsize=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("font-stretch",keyword) == 0) { ssize_t stretch; GetNextToken(q,&q,extent,token); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token); if (stretch == -1) { status=MagickFalse; break; } graphic_context[n]->stretch=(StretchType) stretch; break; } if (LocaleCompare("font-style",keyword) == 0) { ssize_t style; GetNextToken(q,&q,extent,token); style=ParseCommandOption(MagickStyleOptions,MagickFalse,token); if (style == -1) { status=MagickFalse; break; } graphic_context[n]->style=(StyleType) style; break; } if (LocaleCompare("font-weight",keyword) == 0) { ssize_t weight; GetNextToken(q,&q,extent,token); weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight=(size_t) weight; break; } status=MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units",keyword) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gravity",keyword) == 0) { ssize_t gravity; GetNextToken(q,&q,extent,token); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token); if (gravity == -1) { status=MagickFalse; break; } graphic_context[n]->gravity=(GravityType) gravity; break; } status=MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image",keyword) == 0) { ssize_t compose; primitive_type=ImagePrimitive; GetNextToken(q,&q,extent,token); compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token); if (compose == -1) { status=MagickFalse; break; } graphic_context[n]->compose=(CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interline_spacing=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("interword-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->kerning=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("letter-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); clone_info->text=AcquireString(" "); status&=GetTypeMetrics(image,clone_info,&metrics,exception); graphic_context[n]->kerning=metrics.width* StringToDouble(token,&next_token); clone_info=DestroyDrawInfo(clone_info); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("line",keyword) == 0) { primitive_type=LinePrimitive; break; } status=MagickFalse; break; } case 'm': case 'M': { if (LocaleCompare("mask",keyword) == 0) { const char *mask_path; /* Take a node from within the MVG document, and duplicate it here. */ GetNextToken(q,&q,extent,token); mask_path=(const char *) GetValueFromSplayTree(macros,token); if (mask_path != (const char *) NULL) { if (graphic_context[n]->composite_mask != (Image *) NULL) graphic_context[n]->composite_mask= DestroyImage(graphic_context[n]->composite_mask); graphic_context[n]->composite_mask=DrawCompositeMask(image, graphic_context[n],token,mask_path,exception); if (draw_info->compliance != SVGCompliance) status=SetImageMask(image,CompositePixelMask, graphic_context[n]->composite_mask,exception); } break; } break; } case 'o': case 'O': { if (LocaleCompare("offset",keyword) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("opacity",keyword) == 0) { double opacity; GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* StringToDouble(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); graphic_context[n]->fill_alpha*=opacity; graphic_context[n]->stroke_alpha*=opacity; break; } status=MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path",keyword) == 0) { primitive_type=PathPrimitive; break; } if (LocaleCompare("point",keyword) == 0) { primitive_type=PointPrimitive; break; } if (LocaleCompare("polyline",keyword) == 0) { primitive_type=PolylinePrimitive; break; } if (LocaleCompare("polygon",keyword) == 0) { primitive_type=PolygonPrimitive; break; } if (LocaleCompare("pop",keyword) == 0) { GetNextToken(q,&q,extent,token); if (LocaleCompare("class",token) == 0) break; if (LocaleCompare("clip-path",token) == 0) break; if (LocaleCompare("defs",token) == 0) { defsDepth--; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) break; if (LocaleCompare("graphic-context",token) == 0) { if (n <= 0) { (void) ThrowMagickException(exception,GetMagickModule(), DrawError,"UnbalancedGraphicContextPushPop","`%s'",token); status=MagickFalse; n=0; break; } if ((graphic_context[n]->clip_mask != (char *) NULL) && (draw_info->compliance != SVGCompliance)) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0) status=SetImageMask(image,WritePixelMask,(Image *) NULL, exception); graphic_context[n]=DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("mask",token) == 0) break; if (LocaleCompare("pattern",token) == 0) break; if (LocaleCompare("symbol",token) == 0) { symbolDepth--; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } if (LocaleCompare("push",keyword) == 0) { GetNextToken(q,&q,extent,token); if (LocaleCompare("class",token) == 0) { /* Class context. */ for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"class") != 0) continue; break; } GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("clip-path",token) == 0) { GetNextToken(q,&q,extent,token); for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"clip-path") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("defs",token) == 0) { defsDepth++; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent], type[MagickPathExtent]; SegmentInfo segment; GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); GetNextToken(q,&q,extent,token); (void) CopyMagickString(type,token,MagickPathExtent); GetNextToken(q,&q,extent,token); segment.x1=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.y1=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.x2=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.y2=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (LocaleCompare(type,"radial") == 0) { GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); } for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"gradient") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); bounds.x1=graphic_context[n]->affine.sx*segment.x1+ graphic_context[n]->affine.ry*segment.y1+ graphic_context[n]->affine.tx; bounds.y1=graphic_context[n]->affine.rx*segment.x1+ graphic_context[n]->affine.sy*segment.y1+ graphic_context[n]->affine.ty; bounds.x2=graphic_context[n]->affine.sx*segment.x2+ graphic_context[n]->affine.ry*segment.y2+ graphic_context[n]->affine.tx; bounds.y2=graphic_context[n]->affine.rx*segment.x2+ graphic_context[n]->affine.sy*segment.y2+ graphic_context[n]->affine.ty; (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-type",name); (void) SetImageArtifact(image,key,type); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0), MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0), bounds.x1,bounds.y1); (void) SetImageArtifact(image,key,geometry); GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("graphic-context",token) == 0) { n++; graphic_context=(DrawInfo **) ResizeQuantumMemory( graphic_context,(size_t) (n+1),sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL, graphic_context[n-1]); if (*q == '"') GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("mask",token) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("pattern",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent]; RectangleInfo bounds; GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); GetNextToken(q,&q,extent,token); bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); bounds.width=(size_t) floor(StringToDouble(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); bounds.height=(size_t) floor(StringToDouble(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(token,exception); for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"pattern") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double) bounds.height,(double) bounds.x,(double) bounds.y); (void) SetImageArtifact(image,key,geometry); GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("symbol",token) == 0) { symbolDepth++; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } status=MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle",keyword) == 0) { primitive_type=RectanglePrimitive; break; } if (LocaleCompare("rotate",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0))); affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0))); break; } if (LocaleCompare("roundRectangle",keyword) == 0) { primitive_type=RoundRectanglePrimitive; break; } status=MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("skewX",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.ry=sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.rx=(-tan(DegreesToRadians(angle)/2.0)); break; } if (LocaleCompare("stop-color",keyword) == 0) { PixelInfo stop_color; number_stops++; if (number_stops == 1) stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops)); else if (number_stops > 2) stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops, sizeof(*stops)); if (stops == (StopInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance,&stop_color, exception); stops[number_stops-1].color=stop_color; GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; stops[number_stops-1].offset=factor*StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke",keyword) == 0) { GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->stroke_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->stroke,exception); if (graphic_context[n]->stroke_alpha != OpaqueAlpha) graphic_context[n]->stroke.alpha= graphic_context[n]->stroke_alpha; } break; } if (LocaleCompare("stroke-antialias",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray",keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *) NULL) graphic_context[n]->dash_pattern=(double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *r; r=q; GetNextToken(r,&r,extent,token); if (*token == ',') GetNextToken(r,&r,extent,token); for (x=0; IsPoint(token) != MagickFalse; x++) { GetNextToken(r,&r,extent,token); if (*token == ',') GetNextToken(r,&r,extent,token); } graphic_context[n]->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); status=MagickFalse; break; } (void) memset(graphic_context[n]->dash_pattern,0,(size_t) (2*x+2)*sizeof(*graphic_context[n]->dash_pattern)); for (j=0; j < x; j++) { GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->dash_pattern[j]=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->dash_pattern[j] < 0.0) status=MagickFalse; } if ((x & 0x01) != 0) for ( ; j < (2*x); j++) graphic_context[n]->dash_pattern[j]= graphic_context[n]->dash_pattern[j-x]; graphic_context[n]->dash_pattern[j]=0.0; break; } GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke-dashoffset",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->dash_offset=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke-linecap",keyword) == 0) { ssize_t linecap; GetNextToken(q,&q,extent,token); linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token); if (linecap == -1) { status=MagickFalse; break; } graphic_context[n]->linecap=(LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin",keyword) == 0) { ssize_t linejoin; GetNextToken(q,&q,extent,token); linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse, token); if (linejoin == -1) { status=MagickFalse; break; } graphic_context[n]->linejoin=(LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->miterlimit=StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity",keyword) == 0) { double opacity; GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* StringToDouble(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); graphic_context[n]->stroke_alpha*=opacity; if (graphic_context[n]->stroke.alpha != TransparentAlpha) graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha; else graphic_context[n]->stroke.alpha=(MagickRealType) ClampToQuantum(QuantumRange*(1.0-opacity)); break; } if (LocaleCompare("stroke-width",keyword) == 0) { GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; graphic_context[n]->stroke_width=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text",keyword) == 0) { primitive_type=TextPrimitive; cursor=0.0; break; } if (LocaleCompare("text-align",keyword) == 0) { ssize_t align; GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-anchor",keyword) == 0) { ssize_t align; GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-antialias",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->text_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor",keyword) == 0) { GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->undercolor,exception); break; } if (LocaleCompare("translate",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); cursor=0.0; break; } status=MagickFalse; break; } case 'u': case 'U': { if (LocaleCompare("use",keyword) == 0) { const char *use; /* Get a macro from the MVG document, and "use" it here. */ GetNextToken(q,&q,extent,token); use=(const char *) GetValueFromSplayTree(macros,token); if (use != (const char *) NULL) { clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); (void) CloneString(&clone_info->primitive,use); status=RenderMVGContent(image,clone_info,depth+1,exception); clone_info=DestroyDrawInfo(clone_info); } break; } break; } case 'v': case 'V': { if (LocaleCompare("viewbox",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'w': case 'W': { if (LocaleCompare("word-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } default: { status=MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx-1.0) >= MagickEpsilon) || (fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) || (fabs(affine.sy-1.0) >= MagickEpsilon) || (fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon)) { graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx; graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx; graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy; graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy; graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+ current.tx; graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+ current.ty; } if (primitive_type == UndefinedPrimitive) { if (*q == '\0') { if (number_stops > 1) { GradientType type; type=LinearGradient; if (draw_info->gradient.type == RadialGradient) type=RadialGradient; (void) GradientImage(image,type,PadSpread,stops,number_stops, exception); } if (number_stops > 0) stops=(StopInfo *) RelinquishMagickMemory(stops); } if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),p); continue; } /* Parse the primitive attributes. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); i=0; mvg_info.offset=i; j=0; primitive_info[0].point.x=0.0; primitive_info[0].point.y=0.0; primitive_info[0].coordinates=0; primitive_info[0].method=FloodfillMethod; primitive_info[0].closed_subpath=MagickFalse; for (x=0; *q != '\0'; x++) { /* Define points. */ if (IsPoint(q) == MagickFalse) break; GetNextToken(q,&q,extent,token); point.x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); point.y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,(const char **) NULL,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); primitive_info[i].primitive=primitive_type; primitive_info[i].point=point; primitive_info[i].coordinates=0; primitive_info[i].method=FloodfillMethod; primitive_info[i].closed_subpath=MagickFalse; i++; mvg_info.offset=i; if (i < (ssize_t) number_points) continue; status&=CheckPrimitiveExtent(&mvg_info,number_points); } if (status == MagickFalse) break; if ((primitive_info[j].primitive == TextPrimitive) || (primitive_info[j].primitive == ImagePrimitive)) if (primitive_info[j].text != (char *) NULL) primitive_info[j].text=DestroyString(primitive_info[j].text); primitive_info[j].primitive=primitive_type; primitive_info[j].coordinates=(size_t) x; primitive_info[j].method=FloodfillMethod; primitive_info[j].closed_subpath=MagickFalse; /* Circumscribe primitive within a circle. */ bounds.x1=primitive_info[j].point.x; bounds.y1=primitive_info[j].point.y; bounds.x2=primitive_info[j].point.x; bounds.y2=primitive_info[j].point.y; for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++) { point=primitive_info[j+k].point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.y < bounds.y1) bounds.y1=point.y; if (point.x > bounds.x2) bounds.x2=point.x; if (point.y > bounds.y2) bounds.y2=point.y; } /* Speculate how many points our primitive might consume. */ coordinates=(double) primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { coordinates*=5.0; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot((double) alpha,(double) beta); coordinates*=5.0; coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0* BezierQuantum+360.0; break; } case BezierPrimitive: { coordinates=(double) (BezierQuantum*primitive_info[j].coordinates); if (primitive_info[j].coordinates > (107*BezierQuantum)) { (void) ThrowMagickException(exception,GetMagickModule(),DrawError, "TooManyBezierCoordinates","`%s'",token); status=MagickFalse; break; } break; } case PathPrimitive: { char *s, *t; GetNextToken(q,&q,extent,token); coordinates=1.0; t=token; for (s=token; *s != '\0'; s=t) { double value; value=StringToDouble(s,&t); (void) value; if (s == t) { t++; continue; } coordinates++; } for (s=token; *s != '\0'; s++) if (strspn(s,"AaCcQqSsTt") != 0) coordinates+=(20.0*BezierQuantum)+360.0; break; } case CirclePrimitive: case ArcPrimitive: case EllipsePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot(alpha,beta); coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0; break; } default: break; } if (coordinates > MaxBezierCoordinates) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"TooManyBezierCoordinates","`%s'",token); status=MagickFalse; } if (status == MagickFalse) break; if (((size_t) (i+coordinates)) >= number_points) { /* Resize based on speculative points required by primitive. */ number_points+=coordinates+1; if (number_points < (size_t) coordinates) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } mvg_info.offset=i; status&=CheckPrimitiveExtent(&mvg_info,number_points); } status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad); if (status == MagickFalse) break; mvg_info.offset=j; switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } status&=TracePoint(primitive_info+j,primitive_info[j].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case LinePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceLine(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+2].point.x < 0.0) || (primitive_info[j+2].point.y < 0.0)) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0) { status=MagickFalse; break; } if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0) { status=MagickFalse; break; } status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { primitive_type=UndefinedPrimitive; break; } status&=TraceArc(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x < 0.0) || (primitive_info[j+1].point.y < 0.0)) { status=MagickFalse; break; } status&=TraceEllipse(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceCircle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PolylinePrimitive: { if (primitive_info[j].coordinates < 1) { status=MagickFalse; break; } break; } case PolygonPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } primitive_info[i]=primitive_info[j]; primitive_info[i].coordinates=0; primitive_info[j].coordinates++; primitive_info[j].closed_subpath=MagickTrue; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } status&=TraceBezier(&mvg_info,primitive_info[j].coordinates); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PathPrimitive: { coordinates=(double) TracePath(&mvg_info,token,exception); if (coordinates == 0.0) { status=MagickFalse; break; } i=(ssize_t) (j+coordinates); break; } case AlphaPrimitive: case ColorPrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); method=ParseCommandOption(MagickMethodOptions,MagickFalse,token); if (method == -1) { status=MagickFalse; break; } primitive_info[j].method=(PaintMethod) method; break; } case TextPrimitive: { char geometry[MagickPathExtent]; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } if (*token != ',') GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); /* Compute text cursor offset. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) && (fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon)) { mvg_info.point=primitive_info->point; primitive_info->point.x+=cursor; } else { mvg_info.point=primitive_info->point; cursor=0.0; } (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); clone_info->render=MagickFalse; clone_info->text=AcquireString(token); status&=GetTypeMetrics(image,clone_info,&metrics,exception); clone_info=DestroyDrawInfo(clone_info); cursor+=metrics.width; break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); break; } } mvg_info.offset=i; if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1), p); if (status == MagickFalse) break; primitive_info[i].primitive=UndefinedPrimitive; if (i == 0) continue; /* Transform points. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+ graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx; primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+ graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty; point=primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1=point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1=point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2=point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2=point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (draw_info->compliance != SVGCompliance) && (graphic_context[n]->clip_mask != (char *) NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0)) { const char *clip_path; clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image,graphic_context[n]->clip_mask, clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } status&=DrawPrimitive(image,graphic_context[n],primitive_info, exception); } proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image"); /* Relinquish resources. */ macros=DestroySplayTree(macros); token=DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) { for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info); } primitive=DestroyString(primitive); if (stops != (StopInfo *) NULL) stops=(StopInfo *) RelinquishMagickMemory(stops); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition", keyword); return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, ExceptionInfo *exception) { return(RenderMVGContent(image,draw_info,0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P a t t e r n P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPatternPath() draws a pattern. % % The format of the DrawPatternPath method is: % % MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info, % const char *name,Image **pattern,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the pattern name. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawPatternPath(Image *image, const DrawInfo *draw_info,const char *name,Image **pattern, ExceptionInfo *exception) { char property[MagickPathExtent]; const char *geometry, *path, *type; DrawInfo *clone_info; ImageInfo *image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); assert(name != (const char *) NULL); (void) FormatLocaleString(property,MagickPathExtent,"%s",name); path=GetImageArtifact(image,property); if (path == (const char *) NULL) return(MagickFalse); (void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name); geometry=GetImageArtifact(image,property); if (geometry == (const char *) NULL) return(MagickFalse); if ((*pattern) != (Image *) NULL) *pattern=DestroyImage(*pattern); image_info=AcquireImageInfo(); image_info->size=AcquireString(geometry); *pattern=AcquireImage(image_info,exception); image_info=DestroyImageInfo(image_info); (void) QueryColorCompliance("#000000ff",AllCompliance, &(*pattern)->background_color,exception); (void) SetImageBackgroundColor(*pattern,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), "begin pattern-path %s %s",name,geometry); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill_pattern=NewImageList(); clone_info->stroke_pattern=NewImageList(); (void) FormatLocaleString(property,MagickPathExtent,"%s-type",name); type=GetImageArtifact(image,property); if (type != (const char *) NULL) clone_info->gradient.type=(GradientType) ParseCommandOption( MagickGradientOptions,MagickFalse,type); (void) CloneString(&clone_info->primitive,path); status=RenderMVGContent(*pattern,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w P o l y g o n P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The format of the DrawPolygonPrimitive method is: % % MagickBooleanType DrawPolygonPrimitive(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info) { register ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i]=DestroyPolygonInfo(polygon_info[i]); polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info); return(polygon_info); } static PolygonInfo **AcquirePolygonThreadSet( const PrimitiveInfo *primitive_info) { PathInfo *magick_restrict path_info; PolygonInfo **polygon_info; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) return((PolygonInfo **) NULL); (void) memset(polygon_info,0,number_threads*sizeof(*polygon_info)); path_info=ConvertPrimitiveToPath(primitive_info); if (path_info == (PathInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); for (i=0; i < (ssize_t) number_threads; i++) { polygon_info[i]=ConvertPathToPolygon(path_info); if (polygon_info[i] == (PolygonInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); } path_info=(PathInfo *) RelinquishMagickMemory(path_info); return(polygon_info); } static double GetFillAlpha(PolygonInfo *polygon_info,const double mid, const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x, const ssize_t y,double *stroke_alpha) { double alpha, beta, distance, subpath_alpha; PointInfo delta; register const PointInfo *q; register EdgeInfo *p; register ssize_t i; ssize_t j, winding_number; /* Compute fill & stroke opacity for this (x,y) point. */ *stroke_alpha=0.0; subpath_alpha=0.0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= (p->bounds.y1-mid-0.5)) break; if ((double) y > (p->bounds.y2+mid+0.5)) { (void) DestroyEdge(polygon_info,(size_t) j); continue; } if (((double) x <= (p->bounds.x1-mid-0.5)) || ((double) x > (p->bounds.x2+mid+0.5))) continue; i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) { if ((double) y <= (p->points[i-1].y-mid-0.5)) break; if ((double) y > (p->points[i].y+mid+0.5)) continue; if (p->scanline != (double) y) { p->scanline=(double) y; p->highwater=(size_t) i; } /* Compute distance between a point and an edge. */ q=p->points+i-1; delta.x=(q+1)->x-q->x; delta.y=(q+1)->y-q->y; beta=delta.x*(x-q->x)+delta.y*(y-q->y); if (beta <= 0.0) { delta.x=(double) x-q->x; delta.y=(double) y-q->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=delta.x*delta.x+delta.y*delta.y; if (beta >= alpha) { delta.x=(double) x-(q+1)->x; delta.y=(double) y-(q+1)->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=PerceptibleReciprocal(alpha); beta=delta.x*(y-q->y)-delta.y*(x-q->x); distance=alpha*beta*beta; } } /* Compute stroke & subpath opacity. */ beta=0.0; if (p->ghostline == MagickFalse) { alpha=mid+0.5; if ((*stroke_alpha < 1.0) && (distance <= ((alpha+0.25)*(alpha+0.25)))) { alpha=mid-0.5; if (distance <= ((alpha+0.25)*(alpha+0.25))) *stroke_alpha=1.0; else { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt((double) distance); alpha=beta-mid-0.5; if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25))) *stroke_alpha=(alpha-0.25)*(alpha-0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0)) continue; if (distance <= 0.0) { subpath_alpha=1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < MagickEpsilon) { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt(distance); } alpha=beta-1.0; if (subpath_alpha < (alpha*alpha)) subpath_alpha=alpha*alpha; } } /* Compute fill opacity. */ if (fill == MagickFalse) return(0.0); if (subpath_alpha >= 1.0) return(1.0); /* Determine winding number. */ winding_number=0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= p->bounds.y1) break; if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1)) continue; if ((double) x > p->bounds.x2) { winding_number+=p->direction ? 1 : -1; continue; } i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) (p->number_points-1); i++) if ((double) y <= p->points[i].y) break; q=p->points+i-1; if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x))) winding_number+=p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return(1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return(1.0); return(subpath_alpha); } static MagickBooleanType DrawPolygonPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType fill, status; double mid; PolygonInfo **magick_restrict polygon_info; register EdgeInfo *p; register ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates <= 1) return(MagickTrue); /* Compute bounding box. */ polygon_info=AcquirePolygonThreadSet(primitive_info); if (polygon_info == (PolygonInfo **) NULL) return(MagickFalse); DisableMSCWarning(4127) if (0) { status=DrawBoundingRectangles(image,draw_info,polygon_info[0],exception); if (status == MagickFalse) { polygon_info=DestroyPolygonThreadSet(polygon_info); return(status); } } RestoreMSCWarning if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon"); fill=(primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0; bounds=polygon_info[0]->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p=polygon_info[0]->edges+i; if (p->bounds.x1 < bounds.x1) bounds.x1=p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1=p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2=p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2=p->bounds.y2; } bounds.x1-=(mid+1.0); bounds.y1-=(mid+1.0); bounds.x2+=(mid+1.0); bounds.y2+=(mid+1.0); if ((bounds.x1 >= (double) image->columns) || (bounds.y1 >= (double) image->rows) || (bounds.x2 <= 0.0) || (bounds.y2 <= 0.0)) { polygon_info=DestroyPolygonThreadSet(polygon_info); return(MagickTrue); /* virtual polygon */ } bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x1; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y1; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x2; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y2; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* Draw point. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); x=start_x; q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for ( ; x <= stop_x; x++) { if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) && (y == (ssize_t) ceil(primitive_info->point.y-0.5))) { GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-polygon"); return(status); } /* Draw polygon or line. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+ 1),1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=start_x; x <= stop_x; x++) { double fill_alpha, stroke_alpha; PixelInfo fill_color, stroke_color; /* Fill and/or stroke. */ fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule, x,y,&stroke_alpha); if (draw_info->stroke_antialias == MagickFalse) { fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0; stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0; } GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception); CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q, (double) GetPixelAlpha(image,q),q); GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception); CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q, (double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image. % % The format of the DrawPrimitive method is: % % MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static inline double ConstrainCoordinate(double x) { if (x < (double) -SSIZE_MAX) return((double) -SSIZE_MAX); if (x > (double) SSIZE_MAX) return((double) SSIZE_MAX); return(x); } static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, point, q; register ssize_t i, x; ssize_t coordinates, y; x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); switch (primitive_info->primitive) { case AlphaPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ColorPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ImagePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ImagePrimitive %.20g,%.20g",(double) x,(double) y); return; } case PointPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "PointPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case TextPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "TextPrimitive %.20g,%.20g",(double) x,(double) y); return; } default: break; } coordinates=0; p=primitive_info[0].point; q.x=(-1.0); q.y=(-1.0); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin open (%.20g)",(double) coordinates); p=point; } point=primitive_info[i].point; if ((fabs(q.x-point.x) >= MagickEpsilon) || (fabs(q.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y); else (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y); q=point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x-point.x) >= MagickEpsilon) || (fabs(p.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)", (double) coordinates); else (void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)", (double) coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickStatusType status; register ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-primitive"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx, draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy, draw_info->affine.tx,draw_info->affine.ty); } status=MagickTrue; if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelInfoGray(&draw_info->fill) == MagickFalse) || (IsPixelInfoGray(&draw_info->stroke) == MagickFalse))) status=SetImageColorspace(image,sRGBColorspace,exception); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask, exception); status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask, exception); } x=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.x-0.5)); y=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.y-0.5)); image_view=AcquireAuthenticCacheView(image,exception); switch (primitive_info->primitive) { case AlphaPrimitive: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); (void) SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelInfo pixel, target; (void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { ChannelType channel_mask; PixelInfo target; (void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } channel_mask=SetImageChannelMask(image,AlphaChannel); status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); (void) SetImageChannelMask(image,channel_mask); break; } case ResetMethod: { MagickBooleanType sync; PixelInfo pixel; for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } } break; } case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetPixelInfo(image,&pixel); GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); (void) SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelInfo pixel, target; (void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { PixelInfo target; (void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); break; } case ResetMethod: { MagickBooleanType sync; PixelInfo pixel; GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MagickPathExtent]; Image *composite_image, *composite_images; ImageInfo *clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *) NULL) break; clone_info=AcquireImageInfo(); if (LocaleNCompare(primitive_info->text,"data:",5) == 0) composite_images=ReadInlineImage(clone_info,primitive_info->text, exception); else { (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); composite_images=ReadImage(clone_info,exception); } clone_info=DestroyImageInfo(clone_info); if (composite_images == (Image *) NULL) { status=0; break; } composite_image=RemoveFirstImageFromList(&composite_images); composite_images=DestroyImageList(composite_images); (void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor) NULL,(void *) NULL); x1=(ssize_t) ceil(primitive_info[1].point.x-0.5); y1=(ssize_t) ceil(primitive_info[1].point.y-0.5); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { /* Resize image. */ (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y); composite_image->filter=image->filter; (void) TransformImage(&composite_image,(char *) NULL, composite_geometry,exception); } if (composite_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel, exception); if (draw_info->alpha != OpaqueAlpha) (void) SetImageAlpha(composite_image,draw_info->alpha,exception); SetGeometry(image,&geometry); image->gravity=draw_info->gravity; geometry.x=x; geometry.y=y; (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double) composite_image->rows,(double) geometry.x,(double) geometry.y); (void) ParseGravityGeometry(image,composite_geometry,&geometry,exception); affine=draw_info->affine; affine.tx=(double) geometry.x; affine.ty=(double) geometry.y; composite_image->interpolate=image->interpolate; if ((draw_info->compose == OverCompositeOp) || (draw_info->compose == SrcOverCompositeOp)) (void) DrawAffineImage(image,composite_image,&affine,exception); else (void) CompositeImage(image,composite_image,draw_info->compose, MagickTrue,geometry.x,geometry.y,exception); composite_image=DestroyImage(composite_image); break; } case PointPrimitive: { PixelInfo fill_color; register Quantum *q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&fill_color,exception); CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q, (double) GetPixelAlpha(image,q),q); (void) SyncCacheViewAuthenticPixels(image_view,exception); break; } case TextPrimitive: { char geometry[MagickPathExtent]; DrawInfo *clone_info; if (primitive_info->text == (char *) NULL) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->text,primitive_info->text); (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); (void) CloneString(&clone_info->geometry,geometry); status&=AnnotateImage(image,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo *clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale=ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *) NULL) && (fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) && (fabs(scale*draw_info->stroke_width) >= MagickEpsilon) && (draw_info->stroke.alpha != (Quantum) TransparentAlpha)) { /* Draw dash polygon. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); status=DrawDashPolygon(draw_info,primitive_info,image,exception); break; } mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0; if ((mid > 1.0) && ((draw_info->stroke.alpha != (Quantum) TransparentAlpha) || (draw_info->stroke_pattern != (Image *) NULL))) { double x, y; MagickBooleanType closed_path; /* Draw strokes while respecting line cap/join attributes. */ closed_path=primitive_info[0].closed_subpath; i=(ssize_t) primitive_info[0].coordinates; x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x); y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) closed_path=MagickTrue; if ((((draw_info->linecap == RoundCap) || (closed_path != MagickFalse)) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { status=DrawPolygonPrimitive(image,draw_info,primitive_info, exception); break; } clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); status&=DrawStrokePolygon(image,draw_info,primitive_info,exception); break; } status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception); break; } } image_view=DestroyCacheView(image_view); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception); status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w S t r o k e P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on % the image while respecting the line cap and join attributes. % % The format of the DrawStrokePolygon method is: % % MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % */ static MagickBooleanType DrawRoundLinecap(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { PrimitiveInfo linecap[5]; register ssize_t i; for (i=0; i < 4; i++) linecap[i]=(*primitive_info); linecap[0].coordinates=4; linecap[1].point.x+=2.0*MagickEpsilon; linecap[2].point.x+=2.0*MagickEpsilon; linecap[2].point.y+=2.0*MagickEpsilon; linecap[3].point.y+=2.0*MagickEpsilon; linecap[4].primitive=UndefinedPrimitive; return(DrawPolygonPrimitive(image,draw_info,linecap,exception)); } static MagickBooleanType DrawStrokePolygon(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { DrawInfo *clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo *stroke_polygon; register const PrimitiveInfo *p, *q; /* Draw stroked polygon. */ if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-stroke-polygon"); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill=draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; clone_info->stroke_width=0.0; clone_info->fill_rule=NonZeroRule; status=MagickTrue; for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates) { if (p->coordinates == 1) continue; stroke_polygon=TraceStrokePolygon(image,draw_info,p); if (stroke_polygon == (PrimitiveInfo *) NULL) { status=0; stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); break; } status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception); stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); if (status == 0) break; q=p+p->coordinates-1; closed_path=p->closed_subpath; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { status&=DrawRoundLinecap(image,draw_info,p,exception); status&=DrawRoundLinecap(image,draw_info,q,exception); } } clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-stroke-polygon"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A f f i n e M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the identity % matrix. % % The format of the GetAffineMatrix method is: % % void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(affine_matrix != (AffineMatrix *) NULL); (void) memset(affine_matrix,0,sizeof(*affine_matrix)); affine_matrix->sx=1.0; affine_matrix->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetDrawInfo() initializes draw_info to default values from image_info. % % The format of the GetDrawInfo method is: % % void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) { char *next_token; const char *option; ExceptionInfo *exception; ImageInfo *clone_info; /* Initialize draw attributes. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); (void) memset(draw_info,0,sizeof(*draw_info)); clone_info=CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception=AcquireExceptionInfo(); (void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke, exception); draw_info->stroke_antialias=clone_info->antialias; draw_info->stroke_width=1.0; draw_info->fill_rule=EvenOddRule; draw_info->alpha=OpaqueAlpha; draw_info->fill_alpha=OpaqueAlpha; draw_info->stroke_alpha=OpaqueAlpha; draw_info->linecap=ButtCap; draw_info->linejoin=MiterJoin; draw_info->miterlimit=10; draw_info->decorate=NoDecoration; draw_info->pointsize=12.0; draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha; draw_info->compose=OverCompositeOp; draw_info->render=MagickTrue; draw_info->clip_path=MagickFalse; draw_info->debug=IsEventLogging(); if (clone_info->font != (char *) NULL) draw_info->font=AcquireString(clone_info->font); if (clone_info->density != (char *) NULL) draw_info->density=AcquireString(clone_info->density); draw_info->text_antialias=clone_info->antialias; if (fabs(clone_info->pointsize) >= MagickEpsilon) draw_info->pointsize=clone_info->pointsize; draw_info->border_color=clone_info->border_color; if (clone_info->server_name != (char *) NULL) draw_info->server_name=AcquireString(clone_info->server_name); option=GetImageOption(clone_info,"direction"); if (option != (const char *) NULL) draw_info->direction=(DirectionType) ParseCommandOption( MagickDirectionOptions,MagickFalse,option); else draw_info->direction=UndefinedDirection; option=GetImageOption(clone_info,"encoding"); if (option != (const char *) NULL) (void) CloneString(&draw_info->encoding,option); option=GetImageOption(clone_info,"family"); if (option != (const char *) NULL) (void) CloneString(&draw_info->family,option); option=GetImageOption(clone_info,"fill"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->fill, exception); option=GetImageOption(clone_info,"gravity"); if (option != (const char *) NULL) draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(clone_info,"interline-spacing"); if (option != (const char *) NULL) draw_info->interline_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"interword-spacing"); if (option != (const char *) NULL) draw_info->interword_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"kerning"); if (option != (const char *) NULL) draw_info->kerning=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"stroke"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke, exception); option=GetImageOption(clone_info,"strokewidth"); if (option != (const char *) NULL) draw_info->stroke_width=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"style"); if (option != (const char *) NULL) draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,option); option=GetImageOption(clone_info,"undercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor, exception); option=GetImageOption(clone_info,"weight"); if (option != (const char *) NULL) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(option); draw_info->weight=(size_t) weight; } exception=DestroyExceptionInfo(exception); draw_info->signature=MagickCoreSignature; clone_info=DestroyImageInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r m u t a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Permutate() returns the permuation of the (n,k). % % The format of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n,const ssize_t k) { double r; register ssize_t i; r=1.0; for (i=k+1; i <= n; i++) r*=i; for (i=1; i <= (n-k); i++) r/=i; return(r); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a c e P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TracePrimitive is a collection of methods for generating graphic % primitives such as arcs, ellipses, paths, etc. % */ static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo degrees) { PointInfo center, radius; center.x=0.5*(end.x+start.x); center.y=0.5*(end.y+start.y); radius.x=fabs(center.x-start.x); radius.y=fabs(center.y-start.y); return(TraceEllipse(mvg_info,center,radius,degrees)); } static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo arc,const double angle, const MagickBooleanType large_arc,const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; MagickStatusType status; PointInfo center, points[3], radii; register double cosine, sine; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; size_t arc_segments; ssize_t offset; offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) return(TracePoint(primitive_info,end)); radii.x=fabs(arc.x); radii.y=fabs(arc.y); if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return(TraceLine(primitive_info,start,end)); cosine=cos(DegreesToRadians(fmod((double) angle,360.0))); sine=sin(DegreesToRadians(fmod((double) angle,360.0))); center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2); center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2); delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/ (radii.y*radii.y); if (delta < MagickEpsilon) return(TraceLine(primitive_info,start,end)); if (delta > 1.0) { radii.x*=sqrt((double) delta); radii.y*=sqrt((double) delta); } points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x); points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y); points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x); points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y); alpha=points[1].x-points[0].x; beta=points[1].y-points[0].y; factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25; if (factor <= 0.0) factor=0.0; else { factor=sqrt((double) factor); if (sweep == large_arc) factor=(-factor); } center.x=(double) ((points[0].x+points[1].x)/2-factor*beta); center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha); alpha=atan2(points[0].y-center.y,points[0].x-center.x); theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta+=2.0*MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta-=2.0*MagickPI; arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+ MagickEpsilon)))); status=MagickTrue; p=primitive_info; for (i=0; i < (ssize_t) arc_segments; i++) { beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments)); gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))* sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/ sin(fmod((double) beta,DegreesToRadians(360.0))); points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x; p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y; (p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y* points[0].y); (p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y* points[0].y); (p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y* points[1].y); (p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y* points[1].y); (p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y* points[2].y); (p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y* points[2].y); if (i == (ssize_t) (arc_segments-1)) (p+3)->point=end; status&=TraceBezier(mvg_info,4); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; p+=p->coordinates; } mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(status == 0 ? MagickFalse : MagickTrue); } static MagickBooleanType TraceBezier(MVGInfo *mvg_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i, j; size_t control_points, quantum; /* Allocate coefficients. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=number_coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { for (j=i+1; j < (ssize_t) number_coordinates; j++) { alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x); if (alpha > (double) quantum) quantum=(size_t) alpha; alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y); if (alpha > (double) quantum) quantum=(size_t) alpha; } } quantum=(size_t) MagickMin((double) quantum/number_coordinates, (double) BezierQuantum); control_points=quantum*number_coordinates; if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse) return(MagickFalse); primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; coefficients=(double *) AcquireQuantumMemory((size_t) number_coordinates,sizeof(*coefficients)); points=(PointInfo *) AcquireQuantumMemory((size_t) control_points, sizeof(*points)); if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL)) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); /* Compute bezier points. */ end=primitive_info[number_coordinates-1].point; for (i=0; i < (ssize_t) number_coordinates; i++) coefficients[i]=Permutate((ssize_t) number_coordinates-1,i); weight=0.0; for (i=0; i < (ssize_t) control_points; i++) { p=primitive_info; point.x=0.0; point.y=0.0; alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0); for (j=0; j < (ssize_t) number_coordinates; j++) { point.x+=alpha*coefficients[j]*p->point.x; point.y+=alpha*coefficients[j]*p->point.y; alpha*=weight/(1.0-weight); p++; } points[i]=point; weight+=1.0/control_points; } /* Bezier curves are just short segmented polys. */ p=primitive_info; for (i=0; i < (ssize_t) control_points; i++) { if (TracePoint(p,points[i]) == MagickFalse) return(MagickFalse); p+=p->coordinates; } if (TracePoint(p,end) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickTrue); } static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha=end.x-start.x; beta=end.y-start.y; radius=hypot((double) alpha,(double) beta); offset.x=(double) radius; offset.y=(double) radius; degrees.x=0.0; degrees.y=360.0; return(TraceEllipse(mvg_info,start,offset,degrees)); } static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center, const PointInfo radii,const PointInfo arc) { double coordinates, delta, step, x, y; PointInfo angle, point; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; /* Ellipses are just short segmented polys. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return(MagickTrue); delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y)); step=MagickPI/8.0; if ((delta >= 0.0) && (delta < (MagickPI/8.0))) step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0); angle.x=DegreesToRadians(arc.x); y=arc.y; while (y < arc.x) y+=360.0; angle.y=DegreesToRadians(y); coordinates=ceil((angle.y-angle.x)/step+1.0); if ((coordinates > (double) SSIZE_MAX) || (coordinates > (double) GetMaxMemoryRequest())) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse) return(MagickFalse); primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; for (p=primitive_info; angle.x < angle.y; angle.x+=step) { point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; } point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; x=fabs(primitive_info[0].point.x- primitive_info[primitive_info->coordinates-1].point.x); y=fabs(primitive_info[0].point.y- primitive_info[primitive_info->coordinates-1].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { if (TracePoint(primitive_info,start) == MagickFalse) return(MagickFalse); if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) { primitive_info->primitive=PointPrimitive; primitive_info->coordinates=1; return(MagickTrue); } if (TracePoint(primitive_info+1,end) == MagickFalse) return(MagickFalse); (primitive_info+1)->primitive=primitive_info->primitive; primitive_info->coordinates=2; primitive_info->closed_subpath=MagickFalse; return(MagickTrue); } static size_t TracePath(MVGInfo *mvg_info,const char *path, ExceptionInfo *exception) { char *next_token, token[MagickPathExtent]; const char *p; double x, y; int attribute, last_attribute; MagickBooleanType status; PointInfo end = {0.0, 0.0}, points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} }, point = {0.0, 0.0}, start = {0.0, 0.0}; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register PrimitiveInfo *q; register ssize_t i; size_t number_coordinates, z_count; ssize_t subpath_offset; subpath_offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; status=MagickTrue; attribute=0; number_coordinates=0; z_count=0; primitive_type=primitive_info->primitive; q=primitive_info; for (p=path; *p != '\0'; ) { if (status == MagickFalse) break; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == '\0') break; last_attribute=attribute; attribute=(int) (*p++); switch (attribute) { case 'a': case 'A': { double angle = 0.0; MagickBooleanType large_arc = MagickFalse, sweep = MagickFalse; PointInfo arc = {0.0, 0.0}; /* Elliptical arc. */ do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); arc.x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); arc.y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse; GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse; if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'A' ? x : point.x+x); end.y=(double) (attribute == (int) 'A' ? y : point.y+y); if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* Cubic Bézier curve. */ do { points[0]=point; for (i=1; i < 4; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'C' ? x : point.x+x); end.y=(double) (attribute == (int) 'C' ? y : point.y+y); points[i]=end; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'H' ? x: point.x+x); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { /* Line to. */ do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'L' ? x : point.x+x); point.y=(double) (attribute == (int) 'L' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { /* Move to. */ if (mvg_info->offset != subpath_offset) { primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; } i=0; do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'M' ? x : point.x+x); point.y=(double) (attribute == (int) 'M' ? y : point.y+y); if (i == 0) start=point; i++; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* Quadratic Bézier curve. */ do { points[0]=point; for (i=1; i < 3; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'Q' ? x : point.x+x); end.y=(double) (attribute == (int) 'Q' ? y : point.y+y); points[i]=end; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* Cubic Bézier curve. */ do { points[0]=points[3]; points[1].x=2.0*points[3].x-points[2].x; points[1].y=2.0*points[3].y-points[2].y; for (i=2; i < 4; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'S' ? x : point.x+x); end.y=(double) (attribute == (int) 'S' ? y : point.y+y); points[i]=end; } if (strchr("CcSs",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* Quadratic Bézier curve. */ do { points[0]=points[2]; points[1].x=2.0*points[2].x-points[1].x; points[1].y=2.0*points[2].y-points[1].y; for (i=2; i < 3; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'T' ? x : point.x+x); end.y=(double) (attribute == (int) 'T' ? y : point.y+y); points[i]=end; } if (status == MagickFalse) break; if (strchr("QqTt",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { /* Line to. */ do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.y=(double) (attribute == (int) 'V' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { /* Close path. */ point=start; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); primitive_info->closed_subpath=MagickTrue; number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; z_count++; break; } default: { ThrowPointExpectedException(token,exception); break; } } } if (status == MagickFalse) return(0); primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive=primitive_type; if (z_count > 1) q->method=FillToBorderMethod; } q=primitive_info; return(number_coordinates); } static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { PointInfo point; register PrimitiveInfo *p; register ssize_t i; if ((fabs(start.x-end.x) < MagickEpsilon) || (fabs(start.y-end.y) < MagickEpsilon)) { primitive_info->coordinates=0; return(MagickTrue); } p=primitive_info; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=start.x; point.y=end.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,end) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=end.x; point.y=start.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info, const PointInfo start,const PointInfo end,PointInfo arc) { PointInfo degrees, point, segment; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; ssize_t offset; offset=mvg_info->offset; segment.x=fabs(end.x-start.x); segment.y=fabs(end.y-start.y); if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon)) { (*mvg_info->primitive_info+mvg_info->offset)->coordinates=0; return(MagickTrue); } if (arc.x > (0.5*segment.x)) arc.x=0.5*segment.x; if (arc.y > (0.5*segment.y)) arc.y=0.5*segment.y; point.x=start.x+segment.x-arc.x; point.y=start.y+arc.y; degrees.x=270.0; degrees.y=360.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+segment.x-arc.x; point.y=start.y+segment.y-arc.y; degrees.x=0.0; degrees.y=90.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+segment.y-arc.y; degrees.x=90.0; degrees.y=180.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+arc.y; degrees.x=180.0; degrees.y=270.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse) return(MagickFalse); p+=p->coordinates; mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info, const size_t number_vertices,const double offset) { double distance; register double dx, dy; register ssize_t i; ssize_t j; dx=0.0; dy=0.0; for (i=1; i < (ssize_t) number_vertices; i++) { dx=primitive_info[0].point.x-primitive_info[i].point.x; dy=primitive_info[0].point.y-primitive_info[i].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } if (i == (ssize_t) number_vertices) i=(ssize_t) number_vertices-1L; distance=hypot((double) dx,(double) dy); primitive_info[0].point.x=(double) (primitive_info[i].point.x+ dx*(distance+offset)/distance); primitive_info[0].point.y=(double) (primitive_info[i].point.y+ dy*(distance+offset)/distance); for (j=(ssize_t) number_vertices-2; j >= 0; j--) { dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x; dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } distance=hypot((double) dx,(double) dy); primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+ dx*(distance+offset)/distance); primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+ dy*(distance+offset)/distance); return(MagickTrue); } static PrimitiveInfo *TraceStrokePolygon(const Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { #define CheckPathExtent(pad) \ if ((ssize_t) (q+(pad)) >= (ssize_t) max_strokes) \ { \ if (~max_strokes < (pad)) \ { \ path_p=(PointInfo *) RelinquishMagickMemory(path_p); \ path_q=(PointInfo *) RelinquishMagickMemory(path_q); \ } \ else \ { \ max_strokes+=(pad); \ path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes, \ sizeof(*path_p)); \ path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes, \ sizeof(*path_q)); \ } \ if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) \ { \ if (path_p != (PointInfo *) NULL) \ path_p=(PointInfo *) RelinquishMagickMemory(path_p); \ if (path_q != (PointInfo *) NULL) \ path_q=(PointInfo *) RelinquishMagickMemory(path_q); \ polygon_primitive=(PrimitiveInfo *) \ RelinquishMagickMemory(polygon_primitive); \ return((PrimitiveInfo *) NULL); \ } \ } typedef struct _LineSegment { double p, q; } LineSegment; double delta_theta, dot_product, mid, miterlimit; LineSegment dx = {0,0}, dy = {0,0}, inverse_slope = {0,0}, slope = {0,0}, theta = {0,0}; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *path_p, *path_q; PrimitiveInfo *polygon_primitive, *stroke_polygon; register ssize_t i; size_t arc_segments, max_strokes, number_vertices; ssize_t j, n, p, q; /* Allocate paths. */ number_vertices=primitive_info->coordinates; max_strokes=2*number_vertices+6*BezierQuantum+360; polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices+2UL,sizeof(*polygon_primitive)); if (polygon_primitive == (PrimitiveInfo *) NULL) return((PrimitiveInfo *) NULL); (void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices* sizeof(*polygon_primitive)); closed_path=primitive_info[0].closed_subpath; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices]=primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive=UndefinedPrimitive; /* Compute the slope for the first line segment, p. */ dx.p=0.0; dy.p=0.0; for (n=1; n < (ssize_t) number_vertices; n++) { dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x; dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y; if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon)) break; } if (n == (ssize_t) number_vertices) { if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse)) { /* Zero length subpath. */ stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory( sizeof(*stroke_polygon)); stroke_polygon[0]=polygon_primitive[0]; stroke_polygon[0].coordinates=0; polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } n=(ssize_t) number_vertices-1L; } path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_p)); if (path_p == (PointInfo *) NULL) { polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return((PrimitiveInfo *) NULL); } path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_q)); if (path_q == (PointInfo *) NULL) { path_p=(PointInfo *) RelinquishMagickMemory(path_p); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return((PrimitiveInfo *) NULL); } slope.p=0.0; inverse_slope.p=0.0; if (fabs(dx.p) < MagickEpsilon) { if (dx.p >= 0.0) slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.p) < MagickEpsilon) { if (dy.p >= 0.0) inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.p=dy.p/dx.p; inverse_slope.p=(-1.0/slope.p); } mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0; miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) (void) TraceSquareLinecap(polygon_primitive,number_vertices,mid); offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0))); offset.y=(double) (offset.x*inverse_slope.p); if ((dy.p*offset.x-dx.p*offset.y) > 0.0) { box_p[0].x=polygon_primitive[0].point.x-offset.x; box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p; box_p[1].x=polygon_primitive[n].point.x-offset.x; box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p; box_q[0].x=polygon_primitive[0].point.x+offset.x; box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p; box_q[1].x=polygon_primitive[n].point.x+offset.x; box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p; } else { box_p[0].x=polygon_primitive[0].point.x+offset.x; box_p[0].y=polygon_primitive[0].point.y+offset.y; box_p[1].x=polygon_primitive[n].point.x+offset.x; box_p[1].y=polygon_primitive[n].point.y+offset.y; box_q[0].x=polygon_primitive[0].point.x-offset.x; box_q[0].y=polygon_primitive[0].point.y-offset.y; box_q[1].x=polygon_primitive[n].point.x-offset.x; box_q[1].y=polygon_primitive[n].point.y-offset.y; } /* Create strokes for the line join attribute: bevel, miter, round. */ p=0; q=0; path_q[p++]=box_q[0]; path_p[q++]=box_p[0]; for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++) { /* Compute the slope for this line segment, q. */ dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x; dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y; dot_product=dx.q*dx.q+dy.q*dy.q; if (dot_product < 0.25) continue; slope.q=0.0; inverse_slope.q=0.0; if (fabs(dx.q) < MagickEpsilon) { if (dx.q >= 0.0) slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.q) < MagickEpsilon) { if (dy.q >= 0.0) inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.q=dy.q/dx.q; inverse_slope.q=(-1.0/slope.q); } offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0))); offset.y=(double) (offset.x*inverse_slope.q); dot_product=dy.q*offset.x-dx.q*offset.y; if (dot_product > 0.0) { box_p[2].x=polygon_primitive[n].point.x-offset.x; box_p[2].y=polygon_primitive[n].point.y-offset.y; box_p[3].x=polygon_primitive[i].point.x-offset.x; box_p[3].y=polygon_primitive[i].point.y-offset.y; box_q[2].x=polygon_primitive[n].point.x+offset.x; box_q[2].y=polygon_primitive[n].point.y+offset.y; box_q[3].x=polygon_primitive[i].point.x+offset.x; box_q[3].y=polygon_primitive[i].point.y+offset.y; } else { box_p[2].x=polygon_primitive[n].point.x+offset.x; box_p[2].y=polygon_primitive[n].point.y+offset.y; box_p[3].x=polygon_primitive[i].point.x+offset.x; box_p[3].y=polygon_primitive[i].point.y+offset.y; box_q[2].x=polygon_primitive[n].point.x-offset.x; box_q[2].y=polygon_primitive[n].point.y-offset.y; box_q[3].x=polygon_primitive[i].point.x-offset.x; box_q[3].y=polygon_primitive[i].point.y-offset.y; } if (fabs((double) (slope.p-slope.q)) < MagickEpsilon) { box_p[4]=box_p[1]; box_q[4]=box_q[1]; } else { box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+ box_p[3].y)/(slope.p-slope.q)); box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y); box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+ box_q[3].y)/(slope.p-slope.q)); box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y); } CheckPathExtent(6*BezierQuantum+360); dot_product=dx.q*dy.p-dx.p*dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_p[p++]=box_p[4]; else { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { path_q[q++]=box_q[4]; path_p[p++]=box_p[4]; } else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_p[p++]=box_p[4]; else { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x); theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x); if (theta.q < theta.p) theta.q+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/ (2.0*sqrt((double) (1.0/mid))))); CheckPathExtent(arc_segments+6*BezierQuantum+360); path_q[q].x=box_q[1].x; path_q[q].y=box_q[1].y; q++; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); path_q[q].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); path_q[q].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); q++; } path_q[q++]=box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_q[q++]=box_q[4]; else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { path_q[q++]=box_q[4]; path_p[p++]=box_p[4]; } else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_q[q++]=box_q[4]; else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x); theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x); if (theta.p < theta.q) theta.p+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/ (2.0*sqrt((double) (1.0/mid))))); CheckPathExtent(arc_segments+6*BezierQuantum+360); path_p[p++]=box_p[1]; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); path_p[p].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); path_p[p].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); p++; } path_p[p++]=box_p[2]; break; } default: break; } slope.p=slope.q; inverse_slope.p=inverse_slope.q; box_p[0]=box_p[2]; box_p[1]=box_p[3]; box_q[0]=box_q[2]; box_q[1]=box_q[3]; dx.p=dx.q; dy.p=dy.q; n=i; } path_p[p++]=box_p[1]; path_q[q++]=box_q[1]; /* Trace stroked polygon. */ stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon)); if (stroke_polygon != (PrimitiveInfo *) NULL) { for (i=0; i < (ssize_t) p; i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=path_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; } for ( ; i < (ssize_t) (p+q+closed_path); i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[p+closed_path].point; i++; } stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; stroke_polygon[i].primitive=UndefinedPrimitive; stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1); } path_p=(PointInfo *) RelinquishMagickMemory(path_p); path_q=(PointInfo *) RelinquishMagickMemory(path_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return(stroke_polygon); }
target-data-1c.c
// ---------------------------------------------------------------------------------------- // Implementation of Example target.3c (Section 52.3, page 196) from Openmp // 4.0.2 Examples // on the document http://openmp.org/mp-documents/openmp-examples-4.0.2.pdf // // // // // ---------------------------------------------------------------------------------------- #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <time.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> #endif #include "BenchmarksUtil.h" // define the error threshold for the results "not matching" #define ERROR_THRESHOLD 0.05 /* Problem size */ #define N 8192 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init(DATA_TYPE *A, DATA_TYPE *B) { int i; for (i = 0; i < N; i++) { A[i] = i / 2.0; B[i] = ((N - 1) - i) / 3.0; } return; } void vec_mult(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int i; for (i = 0; i < N; i++) C[i] = A[i] * B[i]; } void vec_mult_OMP(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int i; #pragma omp target data map(to : A[ : N], B[ : N]) map(from : C[ : N]) \ device(DEVICE_ID) { #pragma target #pragma omp parallel for for (i = 0; i < N; i++) C[i] = A[i] * B[i]; } } int compareResults(DATA_TYPE *B, DATA_TYPE *B_GPU) { int i, fail; fail = 0; // Compare B and B_GPU for (i = 0; i < N; i++) { if (percentDiff(B[i], B_GPU[i]) > ERROR_THRESHOLD) { fail++; } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", ERROR_THRESHOLD, fail); return fail; } int main(int argc, char *argv[]) { double t_start, t_end, t_start_OMP, t_end_OMP; int fail = 0; DATA_TYPE *A; DATA_TYPE *B; DATA_TYPE *C; DATA_TYPE *C_OMP; A = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); B = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); C = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); C_OMP = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); fprintf(stdout, ">> Two vector multiplication <<\n"); // initialize the arrays init(A, B); t_start_OMP = rtclock(); vec_mult_OMP(A, B, C_OMP); t_end_OMP = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end_OMP - t_start_OMP); //); #ifdef RUN_TEST t_start = rtclock(); vec_mult(A, B, C); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); //); fail = compareResults(C, C_OMP); #endif free(A); free(B); free(C); free(C_OMP); return fail; }
9429.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) { int t10; int t8; int t6; int t4; int t2; for (t2 = 0; t2 <= tmax - 1; t2 += 1) { for (t4 = 0; t4 <= ny - 1; t4 += 1) ey[0][t4] = _fict_[t2]; #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 1; t4 <= nx - 1; t4 += 128) for (t6 = t4; t6 <= (t4 + 127 < nx - 1 ? t4 + 127 : nx - 1); t6 += 1) for (t8 = 0; t8 <= ny - 1; t8 += 16) for (t10 = t8; t10 <= (ny - 1 < t8 + 15 ? ny - 1 : t8 + 15); t10 += 1) ey[t6][t10] = ey[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6 - 1][t10]); #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 0; t4 <= nx - 1; t4 += 128) for (t6 = t4; t6 <= (t4 + 127 < nx - 1 ? t4 + 127 : nx - 1); t6 += 1) for (t8 = 1; t8 <= ny - 1; t8 += 16) for (t10 = t8; t10 <= (ny - 1 < t8 + 15 ? ny - 1 : t8 + 15); t10 += 1) ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]); #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 0; t4 <= nx - 2; t4 += 128) for (t6 = t4; t6 <= (t4 + 127 < nx - 2 ? t4 + 127 : nx - 2); t6 += 1) for (t8 = 0; t8 <= ny - 2; t8 += 16) for (t10 = t8; t10 <= (ny - 2 < t8 + 15 ? ny - 2 : t8 + 15); t10 += 1) hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]); } }
t_cholmod_gpu.c
/* ========================================================================== */ /* === GPU/t_cholmod_gpu ==================================================== */ /* ========================================================================== */ /* ----------------------------------------------------------------------------- * CHOLMOD/GPU Module. Copyright (C) 2005-2012, Timothy A. Davis * http://www.suitesparse.com * -------------------------------------------------------------------------- */ /* GPU BLAS template routine for cholmod_super_numeric. */ /* ========================================================================== */ /* === include files and definitions ======================================== */ /* ========================================================================== */ #ifdef GPU_BLAS #include <string.h> #include "cholmod_template.h" #include "cholmod_gpu_kernels.h" #include "tq84-tsearch.h" #include <fenv.h> #include <cuda.h> #include <cuda_runtime.h> #undef L_ENTRY #ifdef REAL #define L_ENTRY 1 #else #define L_ENTRY 2 #endif /* ========================================================================== */ /* === gpu_clear_memory ===================================================== */ /* ========================================================================== */ /* * Ensure the Lx is zeroed before forming factor. This is a significant cost * in the GPU case - so using this parallel memset code for efficiency. */ void TEMPLATE2 (CHOLMOD (gpu_clear_memory)) ( double* buff, size_t size, int num_threads ) { int chunk_multiplier = 5; int num_chunks = chunk_multiplier * num_threads; size_t chunksize = size / num_chunks; size_t i; #pragma omp parallel for num_threads(num_threads) private(i) schedule(dynamic) for(i = 0; i < num_chunks; i++) { size_t chunkoffset = i * chunksize; if(i == num_chunks - 1) { memset(buff + chunkoffset, 0, (size - chunksize*(num_chunks - 1)) * sizeof(double)); } else { memset(buff + chunkoffset, 0, chunksize * sizeof(double)); } } } /* ========================================================================== */ /* === gpu_init ============================================================= */ /* ========================================================================== */ /* * Performs required initialization for GPU computing. * * Returns 0 if there is an error, so the intended use is * * useGPU = CHOLMOD(gpu_init) * * which would locally turn off gpu processing if the initialization failed. */ int TEMPLATE2 (CHOLMOD (gpu_init)) ( void *Cwork, cholmod_factor *L, cholmod_common *Common, Int nsuper, Int n, Int nls, cholmod_gpu_pointers *gpu_p ) { Int i, k, maxSize ; cublasStatus_t cublasError ; cudaError_t cudaErr ; size_t maxBytesSize, HostPinnedSize ; feenableexcept (FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW ); maxSize = L->maxcsize; /* #define PAGE_SIZE (4*1024) */ CHOLMOD_GPU_PRINTF (("gpu_init : %p\n", (void *) ((size_t) Cwork & ~(4*1024-1)))) ; /* make sure the assumed buffer sizes are large enough */ if ( (nls+2*n+4)*sizeof(Int) > Common->devBuffSize ) { ERROR (CHOLMOD_GPU_PROBLEM,"\n\n" "GPU Memory allocation error. Ls, Map and RelativeMap exceed\n" "devBuffSize. It is not clear if this is due to insufficient\n" "device or host memory or both. You can try:\n" " 1) increasing the amount of GPU memory requested\n" " 2) reducing CHOLMOD_NUM_HOST_BUFFERS\n" " 3) using a GPU & host with more memory\n" "This issue is a known limitation and should be fixed in a \n" "future release of CHOLMOD.\n") ; return (0) ; } /* divvy up the memory in dev_mempool */ gpu_p->d_Lx[0] = Common->dev_mempool; gpu_p->d_Lx[1] = (char *)Common->dev_mempool + Common->devBuffSize; gpu_p->d_C = (char*)Common->dev_mempool + 2*Common->devBuffSize; gpu_p->d_A[0] = (char*)Common->dev_mempool + 3*Common->devBuffSize; gpu_p->d_A[1] = (char*)Common->dev_mempool + 4*Common->devBuffSize; gpu_p->d_Ls = (char*)Common->dev_mempool + 5*Common->devBuffSize; gpu_p->d_Map = (char*)gpu_p->d_Ls + (nls+1)*sizeof(Int) ; gpu_p->d_RelativeMap = (char*)gpu_p->d_Map + (n+1)*sizeof(Int) ; /* Copy all of the Ls and Lpi data to the device. If any supernodes are * to be computed on the device then this will be needed, so might as * well do it now. */ cudaErr = cudaMemcpy ( gpu_p->d_Ls, L->s, nls*sizeof(Int), cudaMemcpyHostToDevice ); CHOLMOD_HANDLE_CUDA_ERROR(cudaErr,"cudaMemcpy(d_Ls)"); if (!(Common->gpuStream[0])) { /* ------------------------------------------------------------------ */ /* create each CUDA stream */ /* ------------------------------------------------------------------ */ for ( i=0; i<CHOLMOD_HOST_SUPERNODE_BUFFERS; i++ ) { cudaErr = cudaStreamCreate ( &(Common->gpuStream[i]) ); if (cudaErr != cudaSuccess) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA stream") ; return (0) ; } } /* ------------------------------------------------------------------ */ /* create each CUDA event */ /* ------------------------------------------------------------------ */ for (i = 0 ; i < 3 ; i++) { cudaErr = cudaEventCreateWithFlags (&(Common->cublasEventPotrf [i]), cudaEventDisableTiming) ; if (cudaErr != cudaSuccess) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event") ; return (0) ; } } for (i = 0 ; i < CHOLMOD_HOST_SUPERNODE_BUFFERS ; i++) { cudaErr = cudaEventCreateWithFlags (&(Common->updateCBuffersFree[i]), cudaEventDisableTiming) ; if (cudaErr != cudaSuccess) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event") ; return (0) ; } } cudaErr = cudaEventCreateWithFlags ( &(Common->updateCKernelsComplete), cudaEventDisableTiming ); if (cudaErr != cudaSuccess) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA updateCKernelsComplete event") ; return (0) ; } } gpu_p->h_Lx[0] = (double*)(Common->host_pinned_mempool); for ( k=1; k<CHOLMOD_HOST_SUPERNODE_BUFFERS; k++ ) { gpu_p->h_Lx[k] = (double*)((char *)(Common->host_pinned_mempool) + k*Common->devBuffSize); } return (1); /* initialization successfull, useGPU = 1 */ } /* ========================================================================== */ /* === gpu_reorder_descendants ============================================== */ /* ========================================================================== */ /* Reorder the descendant supernodes as: * 1st - descendant supernodes eligible for processing on the GPU * in increasing (by flops) order * 2nd - supernodes whose processing is to remain on the CPU * in any order * * All of the GPU-eligible supernodes will be scheduled first. All * CPU-eligible descendants will overlap with the last (largest) * CHOLMOD_HOST_SUPERNODE_BUFFERS GPU-eligible descendants. */ void TEMPLATE2 (CHOLMOD (gpu_reorder_descendants)) ( cholmod_common *Common, Int *Super, Int *locals, Int *Lpi, Int *Lpos, Int *Head, Int *Next, Int *Previous, Int *ndescendants, Int *tail, Int *mapCreatedOnGpu, cholmod_gpu_pointers *gpu_p ) { Int prevd, nextd, firstcpu, d, k, kd1, kd2, ndcol, pdi, pdend, pdi1; Int dnext, ndrow2, p; Int n_descendant = 0; double score; /* use h_Lx[0] to buffer the GPU-eligible descendants */ struct cholmod_descendant_score_t* scores = (struct cholmod_descendant_score_t*) gpu_p->h_Lx[0]; double cpuref = 0.0; int nreverse = 1; int previousd; d = Head[*locals]; prevd = -1; firstcpu = -1; *mapCreatedOnGpu = 0; while ( d != EMPTY ) { /* Get the parameters for the current descendant supernode */ kd1 = Super [d] ; /* d contains cols kd1 to kd2-1 of L */ kd2 = Super [d+1] ; ndcol = kd2 - kd1 ; /* # of columns in all of d */ pdi = Lpi [d] ; /* pointer to first row of d in Ls */ pdend = Lpi [d+1] ; /* pointer just past last row of d in Ls */ p = Lpos [d] ; /* offset of 1st row of d affecting s */ pdi1 = pdi + p ; /* ptr to 1st row of d affecting s in Ls */ ndrow2 = pdend - pdi1; nextd = Next[d]; /* compute a rough flops 'score' for this descendant supernode */ score = ndrow2 * ndcol; if ( ndrow2*L_ENTRY >= CHOLMOD_ND_ROW_LIMIT && ndcol*L_ENTRY >= CHOLMOD_ND_COL_LIMIT ) { score += Common->devBuffSize; } /* place in sort buffer */ scores[n_descendant].score = score; scores[n_descendant].d = d; n_descendant++; d = nextd; } /* Sort the GPU-eligible supernodes */ qsort ( scores, n_descendant, sizeof(struct cholmod_descendant_score_t), (tq84__compar_fn_t) CHOLMOD(score_comp) ); /* Place sorted data back in descendant supernode linked list*/ if ( n_descendant > 0 ) { Head[*locals] = scores[0].d; if ( n_descendant > 1 ) { #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ if (n_descendant > 64) for ( k=1; k<n_descendant; k++ ) { Next[scores[k-1].d] = scores[k].d; } } Next[scores[n_descendant-1].d] = firstcpu; } /* reverse the first CHOLMOD_HOST_SUPERNODE_BUFFERS to better hide PCIe communications */ if ( Head[*locals] != EMPTY && Next[Head[*locals]] != EMPTY ) { previousd = Head[*locals]; d = Next[Head[*locals]]; while ( d!=EMPTY && nreverse < CHOLMOD_HOST_SUPERNODE_BUFFERS ) { kd1 = Super [d] ; /* d contains cols kd1 to kd2-1 of L */ kd2 = Super [d+1] ; ndcol = kd2 - kd1 ; /* # of columns in all of d */ pdi = Lpi [d] ; /* pointer to first row of d in Ls */ pdend = Lpi [d+1] ; /* pointer just past last row of d in Ls */ p = Lpos [d] ; /* offset of 1st row of d affecting s */ pdi1 = pdi + p ; /* ptr to 1st row of d affecting s in Ls */ ndrow2 = pdend - pdi1; nextd = Next[d]; nreverse++; if ( ndrow2*L_ENTRY >= CHOLMOD_ND_ROW_LIMIT && ndcol*L_ENTRY >= CHOLMOD_ND_COL_LIMIT ) { /* place this supernode at the front of the list */ Next[previousd] = Next[d]; Next[d] = Head[*locals]; Head[*locals] = d; } else { previousd = d; } d = nextd; } } /* create a 'previous' list so we can traverse backwards */ *ndescendants = 0; if ( Head[*locals] != EMPTY ) { Previous[Head[*locals]] = EMPTY; for (d = Head [*locals] ; d != EMPTY ; d = dnext) { (*ndescendants)++; dnext = Next[d]; if ( dnext != EMPTY ) { Previous[dnext] = d; } else { *tail = d; } } } return; } /* ========================================================================== */ /* === gpu_initialize_supernode ============================================= */ /* ========================================================================== */ /* C = L (k1:n-1, kd1:kd2-1) * L (k1:k2-1, kd1:kd2-1)', except that k1:n-1 */ void TEMPLATE2 (CHOLMOD (gpu_initialize_supernode)) ( cholmod_common *Common, Int nscol, Int nsrow, Int psi, cholmod_gpu_pointers *gpu_p ) { cudaError_t cuErr; /* initialize the device supernode assemby memory to zero */ cuErr = cudaMemset ( gpu_p->d_A[0], 0, nscol*nsrow*L_ENTRY*sizeof(double) ); CHOLMOD_HANDLE_CUDA_ERROR(cuErr,"cudaMemset(d_A)"); /* Create the Map on the device */ createMapOnDevice ( (Int *)(gpu_p->d_Map), (Int *)(gpu_p->d_Ls), psi, nsrow ); return; } /* ========================================================================== */ /* === gpu_updateC ========================================================== */ /* ========================================================================== */ /* C = L (k1:n-1, kd1:kd2-1) * L (k1:k2-1, kd1:kd2-1)', except that k1:n-1 * refers to all of the rows in L, but many of the rows are all zero. * Supernode d holds columns kd1 to kd2-1 of L. Nonzero rows in the range * k1:k2-1 are in the list Ls [pdi1 ... pdi2-1], of size ndrow1. Nonzero rows * in the range k2:n-1 are in the list Ls [pdi2 ... pdend], of size ndrow2. * Let L1 = L (Ls [pdi1 ... pdi2-1], kd1:kd2-1), and let L2 = L (Ls [pdi2 ... * pdend], kd1:kd2-1). C is ndrow2-by-ndrow1. Let C1 be the first ndrow1 * rows of C and let C2 be the last ndrow2-ndrow1 rows of C. Only the lower * triangular part of C1 needs to be computed since C1 is symmetric. * * UpdateC is completely asynchronous w.r.t. the GPU. Once the input buffer * d_Lx[] has been filled, all of the device operations are issues, and the * host can continue with filling the next input buffer / or start processing * all of the descendant supernodes which are not eligible for processing on * the device (since they are too small - will not fill the device). */ int TEMPLATE2 (CHOLMOD (gpu_updateC)) ( Int ndrow1, /* C is ndrow2-by-ndrow2 */ Int ndrow2, Int ndrow, /* leading dimension of Lx */ Int ndcol, /* L1 is ndrow1-by-ndcol */ Int nsrow, Int pdx1, /* L1 starts at Lx + L_ENTRY*pdx1 */ /* L2 starts at Lx + L_ENTRY*(pdx1 + ndrow1) */ Int pdi1, double *Lx, double *C, cholmod_common *Common, cholmod_gpu_pointers *gpu_p ) { double *devPtrLx, *devPtrC ; double alpha, beta ; cublasStatus_t cublasStatus ; cudaError_t cudaStat [2] ; Int ndrow3 ; int icol, irow; int iHostBuff, iDevBuff ; #ifndef NTIMER double tstart = 0; #endif if ((ndrow2*L_ENTRY < CHOLMOD_ND_ROW_LIMIT) || (ndcol*L_ENTRY < CHOLMOD_ND_COL_LIMIT)) { /* too small for the CUDA BLAS; use the CPU instead */ return (0) ; } ndrow3 = ndrow2 - ndrow1 ; #ifndef NTIMER Common->syrkStart = SuiteSparse_time ( ) ; Common->CHOLMOD_GPU_SYRK_CALLS++ ; #endif /* ---------------------------------------------------------------------- */ /* allocate workspace on the GPU */ /* ---------------------------------------------------------------------- */ iHostBuff = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS; iDevBuff = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS; /* cycle the device Lx buffer, d_Lx, through CHOLMOD_DEVICE_STREAMS, usually 2, so we can overlap the copy of this descendent supernode with the compute of the previous descendant supernode */ devPtrLx = (double *)(gpu_p->d_Lx[iDevBuff]); /* very little overlap between kernels for difference descendant supernodes (since we enforce the supernodes must be large enough to fill the device) so we only need one C buffer */ devPtrC = (double *)(gpu_p->d_C); /* ---------------------------------------------------------------------- */ /* copy Lx to the GPU */ /* ---------------------------------------------------------------------- */ /* copy host data to pinned buffer first for better H2D bandwidth */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) if (ndcol > 32) for ( icol=0; icol<ndcol; icol++ ) { for ( irow=0; irow<ndrow2*L_ENTRY; irow++ ) { gpu_p->h_Lx[iHostBuff][icol*ndrow2*L_ENTRY+irow] = Lx[pdx1*L_ENTRY+icol*ndrow*L_ENTRY + irow]; } } cudaStat[0] = cudaMemcpyAsync ( devPtrLx, gpu_p->h_Lx[iHostBuff], ndrow2*ndcol*L_ENTRY*sizeof(devPtrLx[0]), cudaMemcpyHostToDevice, Common->gpuStream[iDevBuff] ); if ( cudaStat[0] ) { CHOLMOD_GPU_PRINTF ((" ERROR cudaMemcpyAsync = %d \n", cudaStat[0])); return (0); } /* make the current stream wait for kernels in previous streams */ cudaStreamWaitEvent ( Common->gpuStream[iDevBuff], Common->updateCKernelsComplete, 0 ) ; /* ---------------------------------------------------------------------- */ /* create the relative map for this descendant supernode */ /* ---------------------------------------------------------------------- */ createRelativeMapOnDevice ( (Int *)(gpu_p->d_Map), (Int *)(gpu_p->d_Ls), (Int *)(gpu_p->d_RelativeMap), pdi1, ndrow2, &(Common->gpuStream[iDevBuff]) ); /* ---------------------------------------------------------------------- */ /* do the CUDA SYRK */ /* ---------------------------------------------------------------------- */ cublasStatus = cublasSetStream (Common->cublasHandle, Common->gpuStream[iDevBuff]) ; if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS stream") ; } alpha = 1.0 ; beta = 0.0 ; #ifdef REAL cublasStatus = cublasDsyrk (Common->cublasHandle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, (int) ndrow1, (int) ndcol, /* N, K: L1 is ndrow1-by-ndcol */ &alpha, /* ALPHA: 1 */ devPtrLx, ndrow2, /* A, LDA: L1, ndrow2 */ &beta, /* BETA: 0 */ devPtrC, ndrow2) ; /* C, LDC: C1 */ #else cublasStatus = cublasZherk (Common->cublasHandle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, (int) ndrow1, (int) ndcol, /* N, K: L1 is ndrow1-by-ndcol*/ &alpha, /* ALPHA: 1 */ (const cuDoubleComplex *) devPtrLx, ndrow2, /* A, LDA: L1, ndrow2 */ &beta, /* BETA: 0 */ (cuDoubleComplex *) devPtrC, ndrow2) ; /* C, LDC: C1 */ #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } #ifndef NTIMER Common->CHOLMOD_GPU_SYRK_TIME += SuiteSparse_time() - Common->syrkStart; #endif /* ---------------------------------------------------------------------- */ /* compute remaining (ndrow2-ndrow1)-by-ndrow1 block of C, C2 = L2*L1' */ /* ---------------------------------------------------------------------- */ #ifndef NTIMER Common->CHOLMOD_GPU_GEMM_CALLS++ ; tstart = SuiteSparse_time(); #endif if (ndrow3 > 0) { #ifndef REAL cuDoubleComplex calpha = {1.0,0.0} ; cuDoubleComplex cbeta = {0.0,0.0} ; #endif /* ------------------------------------------------------------------ */ /* do the CUDA BLAS dgemm */ /* ------------------------------------------------------------------ */ #ifdef REAL alpha = 1.0 ; beta = 0.0 ; cublasStatus = cublasDgemm (Common->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, ndrow3, ndrow1, ndcol, /* M, N, K */ &alpha, /* ALPHA: 1 */ devPtrLx + L_ENTRY*(ndrow1), /* A, LDA: L2*/ ndrow2, /* ndrow */ devPtrLx, /* B, LDB: L1 */ ndrow2, /* ndrow */ &beta, /* BETA: 0 */ devPtrC + L_ENTRY*ndrow1, /* C, LDC: C2 */ ndrow2) ; #else cublasStatus = cublasZgemm (Common->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_C, ndrow3, ndrow1, ndcol, /* M, N, K */ &calpha, /* ALPHA: 1 */ (const cuDoubleComplex*) devPtrLx + ndrow1, ndrow2, /* ndrow */ (const cuDoubleComplex *) devPtrLx, ndrow2, /* ndrow */ &cbeta, /* BETA: 0 */ (cuDoubleComplex *)devPtrC + ndrow1, ndrow2) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } } #ifndef NTIMER Common->CHOLMOD_GPU_GEMM_TIME += SuiteSparse_time() - tstart; #endif /* ------------------------------------------------------------------ */ /* Assemble the update C on the device using the d_RelativeMap */ /* ------------------------------------------------------------------ */ #ifdef REAL addUpdateOnDevice ( gpu_p->d_A[0], devPtrC, gpu_p->d_RelativeMap, ndrow1, ndrow2, nsrow, &(Common->gpuStream[iDevBuff]) ); #else addComplexUpdateOnDevice ( gpu_p->d_A[0], devPtrC, gpu_p->d_RelativeMap, ndrow1, ndrow2, nsrow, &(Common->gpuStream[iDevBuff]) ); #endif /* Record an event indicating that kernels for this descendant are complete */ cudaEventRecord ( Common->updateCKernelsComplete, Common->gpuStream[iDevBuff]); cudaEventRecord ( Common->updateCBuffersFree[iHostBuff], Common->gpuStream[iDevBuff]); return (1) ; } /* ========================================================================== */ /* === gpu_final_assembly =================================================== */ /* ========================================================================== */ /* If the supernode was assembled on both the CPU and the GPU, this will * complete the supernode assembly on both the GPU and CPU. */ void TEMPLATE2 (CHOLMOD (gpu_final_assembly)) ( cholmod_common *Common, double *Lx, Int psx, Int nscol, Int nsrow, int supernodeUsedGPU, int *iHostBuff, int *iDevBuff, cholmod_gpu_pointers *gpu_p ) { Int iidx, i, j; Int iHostBuff2 ; Int iDevBuff2 ; if ( supernodeUsedGPU ) { /* ------------------------------------------------------------------ */ /* Apply all of the Shur-complement updates, computed on the gpu, to */ /* the supernode. */ /* ------------------------------------------------------------------ */ *iHostBuff = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS; *iDevBuff = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS; if ( nscol * L_ENTRY >= CHOLMOD_POTRF_LIMIT ) { /* If this supernode is going to be factored using the GPU (potrf) * then it will need the portion of the update assembled ont the * CPU. So copy that to a pinned buffer an H2D copy to device. */ /* wait until a buffer is free */ cudaEventSynchronize ( Common->updateCBuffersFree[*iHostBuff] ); /* copy update assembled on CPU to a pinned buffer */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if (nscol>32) for ( j=0; j<nscol; j++ ) { for ( i=j; i<nsrow*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; gpu_p->h_Lx[*iHostBuff][iidx] = Lx[psx*L_ENTRY+iidx]; } } /* H2D transfer of update assembled on CPU */ cudaMemcpyAsync ( gpu_p->d_A[1], gpu_p->h_Lx[*iHostBuff], nscol*nsrow*L_ENTRY*sizeof(double), cudaMemcpyHostToDevice, Common->gpuStream[*iDevBuff] ); } Common->ibuffer++; iHostBuff2 = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS; iDevBuff2 = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS; /* wait for all kernels to complete */ cudaEventSynchronize( Common->updateCKernelsComplete ); /* copy assembled Schur-complement updates computed on GPU */ cudaMemcpyAsync ( gpu_p->h_Lx[iHostBuff2], gpu_p->d_A[0], nscol*nsrow*L_ENTRY*sizeof(double), cudaMemcpyDeviceToHost, Common->gpuStream[iDevBuff2] ); if ( nscol * L_ENTRY >= CHOLMOD_POTRF_LIMIT ) { /* with the current implementation, potrf still uses data from the * CPU - so put the fully assembled supernode in a pinned buffer for * fastest access */ /* need both H2D and D2H copies to be complete */ cudaDeviceSynchronize(); /* sum updates from cpu and device on device */ #ifdef REAL sumAOnDevice ( gpu_p->d_A[1], gpu_p->d_A[0], -1.0, nsrow, nscol ); #else sumComplexAOnDevice ( gpu_p->d_A[1], gpu_p->d_A[0], -1.0, nsrow, nscol ); #endif /* place final assembled supernode in pinned buffer */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if (nscol>32) for ( j=0; j<nscol; j++ ) { for ( i=j*L_ENTRY; i<nscol*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; gpu_p->h_Lx[*iHostBuff][iidx] -= gpu_p->h_Lx[iHostBuff2][iidx]; } } } else { /* assemble with CPU updates */ cudaDeviceSynchronize(); #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if (nscol>32) for ( j=0; j<nscol; j++ ) { for ( i=j*L_ENTRY; i<nsrow*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; Lx[psx*L_ENTRY+iidx] -= gpu_p->h_Lx[iHostBuff2][iidx]; } } } } return; } /* ========================================================================== */ /* === gpu_lower_potrf ====================================================== */ /* ========================================================================== */ /* Cholesky factorzation (dpotrf) of a matrix S, operating on the lower * triangular part only. S is nscol2-by-nscol2 with leading dimension nsrow. * * S is the top part of the supernode (the lower triangular matrx). * This function also copies the bottom rectangular part of the supernode (B) * onto the GPU, in preparation for gpu_triangular_solve. */ /* * On entry, d_A[1] contains the fully assembled supernode */ int TEMPLATE2 (CHOLMOD (gpu_lower_potrf)) ( Int nscol2, /* S is nscol2-by-nscol2 */ Int nsrow, /* leading dimension of S */ Int psx, /* S is located at Lx + L_ENTRY*psx */ double *Lx, /* contains S; overwritten with Cholesky factor */ Int *info, /* BLAS info return value */ cholmod_common *Common, cholmod_gpu_pointers *gpu_p ) { double *devPtrA, *devPtrB, *A ; double alpha, beta ; cudaError_t cudaStat ; cublasStatus_t cublasStatus ; Int j, nsrow2, nb, n, gpu_lda, lda, gpu_ldb ; int ilda, ijb, iinfo ; #ifndef NTIMER double tstart ; #endif if (nscol2 * L_ENTRY < CHOLMOD_POTRF_LIMIT) { /* too small for the CUDA BLAS; use the CPU instead */ return (0) ; } #ifndef NTIMER tstart = SuiteSparse_time ( ) ; Common->CHOLMOD_GPU_POTRF_CALLS++ ; #endif nsrow2 = nsrow - nscol2 ; /* ---------------------------------------------------------------------- */ /* heuristic to get the block size depending of the problem size */ /* ---------------------------------------------------------------------- */ nb = 128 ; if (nscol2 > 4096) nb = 256 ; if (nscol2 > 8192) nb = 384 ; n = nscol2 ; gpu_lda = ((nscol2+31)/32)*32 ; lda = nsrow ; A = gpu_p->h_Lx[(Common->ibuffer+CHOLMOD_HOST_SUPERNODE_BUFFERS-1)% CHOLMOD_HOST_SUPERNODE_BUFFERS]; /* ---------------------------------------------------------------------- */ /* determine the GPU leading dimension of B */ /* ---------------------------------------------------------------------- */ gpu_ldb = 0 ; if (nsrow2 > 0) { gpu_ldb = ((nsrow2+31)/32)*32 ; } /* ---------------------------------------------------------------------- */ /* remember where device memory is, to be used by triangular solve later */ /* ---------------------------------------------------------------------- */ devPtrA = gpu_p->d_Lx[0]; devPtrB = gpu_p->d_Lx[1]; /* ---------------------------------------------------------------------- */ /* copy A from device to device */ /* ---------------------------------------------------------------------- */ cudaStat = cudaMemcpy2DAsync ( devPtrA, gpu_lda * L_ENTRY * sizeof (devPtrA[0]), gpu_p->d_A[1], nsrow * L_ENTRY * sizeof (Lx[0]), nscol2 * L_ENTRY * sizeof (devPtrA[0]), nscol2, cudaMemcpyDeviceToDevice, Common->gpuStream[0] ); if ( cudaStat ) { ERROR ( CHOLMOD_GPU_PROBLEM, "GPU memcopy device to device"); } /* ---------------------------------------------------------------------- */ /* copy B in advance, for gpu_triangular_solve */ /* ---------------------------------------------------------------------- */ if (nsrow2 > 0) { cudaStat = cudaMemcpy2DAsync (devPtrB, gpu_ldb * L_ENTRY * sizeof (devPtrB [0]), gpu_p->d_A[1] + L_ENTRY*nscol2, nsrow * L_ENTRY * sizeof (Lx [0]), nsrow2 * L_ENTRY * sizeof (devPtrB [0]), nscol2, cudaMemcpyDeviceToDevice, Common->gpuStream[0]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ; } } /* ------------------------------------------------------------------ */ /* define the dpotrf stream */ /* ------------------------------------------------------------------ */ cublasStatus = cublasSetStream (Common->cublasHandle, Common->gpuStream [0]) ; if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS stream") ; } /* ---------------------------------------------------------------------- */ /* block Cholesky factorization of S */ /* ---------------------------------------------------------------------- */ for (j = 0 ; j < n ; j += nb) { Int jb = nb < (n-j) ? nb : (n-j) ; /* ------------------------------------------------------------------ */ /* do the CUDA BLAS dsyrk */ /* ------------------------------------------------------------------ */ alpha = -1.0 ; beta = 1.0 ; #ifdef REAL cublasStatus = cublasDsyrk (Common->cublasHandle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, jb, j, &alpha, devPtrA + j, gpu_lda, &beta, devPtrA + j + j*gpu_lda, gpu_lda) ; #else cublasStatus = cublasZherk (Common->cublasHandle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, jb, j, &alpha, (cuDoubleComplex*)devPtrA + j, gpu_lda, &beta, (cuDoubleComplex*)devPtrA + j + j*gpu_lda, gpu_lda) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } /* ------------------------------------------------------------------ */ cudaStat = cudaEventRecord (Common->cublasEventPotrf [0], Common->gpuStream [0]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ; } cudaStat = cudaStreamWaitEvent (Common->gpuStream [1], Common->cublasEventPotrf [0], 0) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ; } /* ------------------------------------------------------------------ */ /* copy back the jb columns on two different streams */ /* ------------------------------------------------------------------ */ cudaStat = cudaMemcpy2DAsync (A + L_ENTRY*(j + j*lda), lda * L_ENTRY * sizeof (double), devPtrA + L_ENTRY*(j + j*gpu_lda), gpu_lda * L_ENTRY * sizeof (double), L_ENTRY * sizeof (double)*jb, jb, cudaMemcpyDeviceToHost, Common->gpuStream [1]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy from device") ; } /* ------------------------------------------------------------------ */ /* do the CUDA BLAS dgemm */ /* ------------------------------------------------------------------ */ if ((j+jb) < n) { #ifdef REAL alpha = -1.0 ; beta = 1.0 ; cublasStatus = cublasDgemm (Common->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, (n-j-jb), jb, j, &alpha, devPtrA + (j+jb), gpu_lda, devPtrA + (j) , gpu_lda, &beta, devPtrA + (j+jb + j*gpu_lda), gpu_lda) ; #else cuDoubleComplex calpha = {-1.0,0.0} ; cuDoubleComplex cbeta = { 1.0,0.0} ; cublasStatus = cublasZgemm (Common->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_C, (n-j-jb), jb, j, &calpha, (cuDoubleComplex*)devPtrA + (j+jb), gpu_lda, (cuDoubleComplex*)devPtrA + (j), gpu_lda, &cbeta, (cuDoubleComplex*)devPtrA + (j+jb + j*gpu_lda), gpu_lda ) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } } cudaStat = cudaStreamSynchronize (Common->gpuStream [1]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ; } /* ------------------------------------------------------------------ */ /* compute the Cholesky factorization of the jbxjb block on the CPU */ /* ------------------------------------------------------------------ */ ilda = (int) lda ; ijb = jb ; #ifdef REAL LAPACK_DPOTRF ("L", &ijb, A + L_ENTRY * (j + j*lda), &ilda, &iinfo) ; #else LAPACK_ZPOTRF ("L", &ijb, A + L_ENTRY * (j + j*lda), &ilda, &iinfo) ; #endif *info = iinfo ; if (*info != 0) { *info = *info + j ; break ; } /* ------------------------------------------------------------------ */ /* copy the result back to the GPU */ /* ------------------------------------------------------------------ */ cudaStat = cudaMemcpy2DAsync (devPtrA + L_ENTRY*(j + j*gpu_lda), gpu_lda * L_ENTRY * sizeof (double), A + L_ENTRY * (j + j*lda), lda * L_ENTRY * sizeof (double), L_ENTRY * sizeof (double) * jb, jb, cudaMemcpyHostToDevice, Common->gpuStream [0]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ; } /* ------------------------------------------------------------------ */ /* do the CUDA BLAS dtrsm */ /* ------------------------------------------------------------------ */ if ((j+jb) < n) { #ifdef REAL alpha = 1.0 ; cublasStatus = cublasDtrsm (Common->cublasHandle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_T, CUBLAS_DIAG_NON_UNIT, (n-j-jb), jb, &alpha, devPtrA + (j + j*gpu_lda), gpu_lda, devPtrA + (j+jb + j*gpu_lda), gpu_lda) ; #else cuDoubleComplex calpha = {1.0,0.0}; cublasStatus = cublasZtrsm (Common->cublasHandle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_C, CUBLAS_DIAG_NON_UNIT, (n-j-jb), jb, &calpha, (cuDoubleComplex *)devPtrA + (j + j*gpu_lda), gpu_lda, (cuDoubleComplex *)devPtrA + (j+jb + j*gpu_lda), gpu_lda) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } /* -------------------------------------------------------------- */ /* Copy factored column back to host. */ /* -------------------------------------------------------------- */ cudaStat = cudaEventRecord (Common->cublasEventPotrf[2], Common->gpuStream[0]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ; } cudaStat = cudaStreamWaitEvent (Common->gpuStream[1], Common->cublasEventPotrf[2], 0) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ; } cudaStat = cudaMemcpy2DAsync (A + L_ENTRY*(j + jb + j * lda), lda * L_ENTRY * sizeof (double), devPtrA + L_ENTRY* (j + jb + j * gpu_lda), gpu_lda * L_ENTRY * sizeof (double), L_ENTRY * sizeof (double)* (n - j - jb), jb, cudaMemcpyDeviceToHost, Common->gpuStream[1]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ; } } } #ifndef NTIMER Common->CHOLMOD_GPU_POTRF_TIME += SuiteSparse_time ( ) - tstart ; #endif return (1) ; } /* ========================================================================== */ /* === gpu_triangular_solve ================================================= */ /* ========================================================================== */ /* The current supernode is columns k1 to k2-1 of L. Let L1 be the diagonal * block (factorized by dpotrf/zpotrf above; rows/cols k1:k2-1), and L2 be rows * k2:n-1 and columns k1:k2-1 of L. The triangular system to solve is L2*L1' = * S2, where S2 is overwritten with L2. More precisely, L2 = S2 / L1' in * MATLAB notation. */ /* Version with pre-allocation in POTRF */ int TEMPLATE2 (CHOLMOD (gpu_triangular_solve)) ( Int nsrow2, /* L1 and S2 are nsrow2-by-nscol2 */ Int nscol2, /* L1 is nscol2-by-nscol2 */ Int nsrow, /* leading dimension of L1, L2, and S2 */ Int psx, /* L1 is at Lx+L_ENTRY*psx; * L2 at Lx+L_ENTRY*(psx+nscol2)*/ double *Lx, /* holds L1, L2, and S2 */ cholmod_common *Common, cholmod_gpu_pointers *gpu_p ) { double *devPtrA, *devPtrB ; cudaError_t cudaStat ; cublasStatus_t cublasStatus ; Int gpu_lda, gpu_ldb, gpu_rowstep ; Int gpu_row_start = 0 ; Int gpu_row_max_chunk, gpu_row_chunk; int ibuf = 0; int iblock = 0; int iHostBuff = (Common->ibuffer+CHOLMOD_HOST_SUPERNODE_BUFFERS-1) % CHOLMOD_HOST_SUPERNODE_BUFFERS; int i, j; Int iidx; int iwrap; #ifndef NTIMER double tstart ; #endif #ifdef REAL double alpha = 1.0 ; gpu_row_max_chunk = 768; #else cuDoubleComplex calpha = {1.0,0.0} ; gpu_row_max_chunk = 256; #endif if ( nsrow2 <= 0 ) { return (0) ; } #ifndef NTIMER tstart = SuiteSparse_time ( ) ; Common->CHOLMOD_GPU_TRSM_CALLS++ ; #endif gpu_lda = ((nscol2+31)/32)*32 ; gpu_ldb = ((nsrow2+31)/32)*32 ; devPtrA = gpu_p->d_Lx[0]; devPtrB = gpu_p->d_Lx[1]; /* make sure the copy of B has completed */ cudaStreamSynchronize( Common->gpuStream[0] ); /* ---------------------------------------------------------------------- */ /* do the CUDA BLAS dtrsm */ /* ---------------------------------------------------------------------- */ while ( gpu_row_start < nsrow2 ) { gpu_row_chunk = nsrow2 - gpu_row_start; if ( gpu_row_chunk > gpu_row_max_chunk ) { gpu_row_chunk = gpu_row_max_chunk; } cublasStatus = cublasSetStream ( Common->cublasHandle, Common->gpuStream[ibuf] ); if ( cublasStatus != CUBLAS_STATUS_SUCCESS ) { ERROR ( CHOLMOD_GPU_PROBLEM, "GPU CUBLAS stream"); } #ifdef REAL cublasStatus = cublasDtrsm (Common->cublasHandle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_T, CUBLAS_DIAG_NON_UNIT, gpu_row_chunk, nscol2, &alpha, devPtrA, gpu_lda, devPtrB + gpu_row_start, gpu_ldb) ; #else cublasStatus = cublasZtrsm (Common->cublasHandle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_C, CUBLAS_DIAG_NON_UNIT, gpu_row_chunk, nscol2, &calpha, (const cuDoubleComplex *) devPtrA, gpu_lda, (cuDoubleComplex *)devPtrB + gpu_row_start , gpu_ldb) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } /* ------------------------------------------------------------------ */ /* copy result back to the CPU */ /* ------------------------------------------------------------------ */ cudaStat = cudaMemcpy2DAsync ( gpu_p->h_Lx[iHostBuff] + L_ENTRY*(nscol2+gpu_row_start), nsrow * L_ENTRY * sizeof (Lx [0]), devPtrB + L_ENTRY*gpu_row_start, gpu_ldb * L_ENTRY * sizeof (devPtrB [0]), gpu_row_chunk * L_ENTRY * sizeof (devPtrB [0]), nscol2, cudaMemcpyDeviceToHost, Common->gpuStream[ibuf]); if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy from device") ; } cudaEventRecord ( Common->updateCBuffersFree[ibuf], Common->gpuStream[ibuf] ); gpu_row_start += gpu_row_chunk; ibuf++; ibuf = ibuf % CHOLMOD_HOST_SUPERNODE_BUFFERS; iblock ++; if ( iblock >= CHOLMOD_HOST_SUPERNODE_BUFFERS ) { Int gpu_row_start2 ; Int gpu_row_end ; /* then CHOLMOD_HOST_SUPERNODE_BUFFERS worth of work has been * scheduled, so check for completed events and copy result into * Lx before continuing. */ cudaEventSynchronize ( Common->updateCBuffersFree [iblock%CHOLMOD_HOST_SUPERNODE_BUFFERS] ); /* copy into Lx */ gpu_row_start2 = nscol2 + (iblock-CHOLMOD_HOST_SUPERNODE_BUFFERS) *gpu_row_max_chunk; gpu_row_end = gpu_row_start2+gpu_row_max_chunk; if ( gpu_row_end > nsrow ) gpu_row_end = nsrow; #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if ( nscol2 > 32 ) for ( j=0; j<nscol2; j++ ) { for ( i=gpu_row_start2*L_ENTRY; i<gpu_row_end*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx]; } } } } /* Convenient to copy the L1 block here */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private ( iidx ) if ( nscol2 > 32 ) for ( j=0; j<nscol2; j++ ) { for ( i=j*L_ENTRY; i<nscol2*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY + i; Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx]; } } /* now account for the last HSTREAMS buffers */ for ( iwrap=0; iwrap<CHOLMOD_HOST_SUPERNODE_BUFFERS; iwrap++ ) { int i, j; Int gpu_row_start2 = nscol2 + (iblock-CHOLMOD_HOST_SUPERNODE_BUFFERS) *gpu_row_max_chunk; if (iblock-CHOLMOD_HOST_SUPERNODE_BUFFERS >= 0 && gpu_row_start2 < nsrow ) { Int iidx; Int gpu_row_end = gpu_row_start2+gpu_row_max_chunk; if ( gpu_row_end > nsrow ) gpu_row_end = nsrow; cudaEventSynchronize ( Common->updateCBuffersFree [iblock%CHOLMOD_HOST_SUPERNODE_BUFFERS] ); /* copy into Lx */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if ( nscol2 > 32 ) for ( j=0; j<nscol2; j++ ) { for ( i=gpu_row_start2*L_ENTRY; i<gpu_row_end*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx]; } } } iblock++; } /* ---------------------------------------------------------------------- */ /* return */ /* ---------------------------------------------------------------------- */ #ifndef NTIMER Common->CHOLMOD_GPU_TRSM_TIME += SuiteSparse_time ( ) - tstart ; #endif return (1) ; } /* ========================================================================== */ /* === gpu_copy_supernode =================================================== */ /* ========================================================================== */ /* * In the event gpu_triangular_sovle is not needed / called, this routine * copies the factored diagonal block from the GPU to the CPU. */ void TEMPLATE2 (CHOLMOD (gpu_copy_supernode)) ( cholmod_common *Common, double *Lx, Int psx, Int nscol, Int nscol2, Int nsrow, int supernodeUsedGPU, int iHostBuff, cholmod_gpu_pointers *gpu_p ) { Int iidx, i, j; if ( supernodeUsedGPU && nscol2 * L_ENTRY >= CHOLMOD_POTRF_LIMIT ) { cudaDeviceSynchronize(); #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx,i,j) if (nscol>32) for ( j=0; j<nscol; j++ ) { for ( i=j*L_ENTRY; i<nscol*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx]; } } } return; } #endif #undef REAL #undef COMPLEX #undef ZOMPLEX
Stmt.h
//===--- Stmt.h - Classes for representing statements -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <string> namespace llvm { class FoldingSetNodeID; } namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class IdentifierInfo; class LabelDecl; class ParmVarDecl; class PrinterHelper; struct PrintingPolicy; class QualType; class RecordDecl; class SourceManager; class StringLiteral; class SwitchStmt; class Token; class VarDecl; //===--------------------------------------------------------------------===// // ExprIterator - Iterators for iterating over Stmt* arrays that contain // only Expr*. This is needed because AST nodes use Stmt* arrays to store // references to children (to be compatible with StmtIterator). //===--------------------------------------------------------------------===// class Stmt; class Expr; class ExprIterator { Stmt** I; public: ExprIterator(Stmt** i) : I(i) {} ExprIterator() : I(nullptr) {} ExprIterator& operator++() { ++I; return *this; } ExprIterator operator-(size_t i) { return I-i; } ExprIterator operator+(size_t i) { return I+i; } Expr* operator[](size_t idx); // FIXME: Verify that this will correctly return a signed distance. signed operator-(const ExprIterator& R) const { return I - R.I; } Expr* operator*() const; Expr* operator->() const; bool operator==(const ExprIterator& R) const { return I == R.I; } bool operator!=(const ExprIterator& R) const { return I != R.I; } bool operator>(const ExprIterator& R) const { return I > R.I; } bool operator>=(const ExprIterator& R) const { return I >= R.I; } }; class ConstExprIterator { const Stmt * const *I; public: ConstExprIterator(const Stmt * const *i) : I(i) {} ConstExprIterator() : I(nullptr) {} ConstExprIterator& operator++() { ++I; return *this; } ConstExprIterator operator+(size_t i) const { return I+i; } ConstExprIterator operator-(size_t i) const { return I-i; } const Expr * operator[](size_t idx) const; signed operator-(const ConstExprIterator& R) const { return I - R.I; } const Expr * operator*() const; const Expr * operator->() const; bool operator==(const ConstExprIterator& R) const { return I == R.I; } bool operator!=(const ConstExprIterator& R) const { return I != R.I; } bool operator>(const ConstExprIterator& R) const { return I > R.I; } bool operator>=(const ConstExprIterator& R) const { return I >= R.I; } }; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: void* operator new(size_t bytes) throw() { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void* data) throw() { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } class StmtBitfields { friend class Stmt; /// \brief The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class CompoundStmtBitfields { friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; }; class ExprBitfields { friend class Expr; friend class DeclRefExpr; // computeDependence friend class InitListExpr; // ctor friend class DesignatedInitExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class ASTStmtReader; // deserialization friend class CXXNewExpr; // ctor friend class DependentScopeDeclRefExpr; // ctor friend class CXXConstructExpr; // ctor friend class CallExpr; // ctor friend class OffsetOfExpr; // ctor friend class ObjCMessageExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ShuffleVectorExpr; // ctor friend class ParenListExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class OverloadExpr; // ctor friend class PseudoObjectExpr; // ctor friend class AtomicExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 2; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = 16 }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 2; }; enum APFloatSemantics { IEEEhalf, IEEEsingle, IEEEdouble, x87DoubleExtended, IEEEquad, PPCDoubleDouble }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 2; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class DeclRefExprBitfields { friend class DeclRefExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingLocal : 1; }; class CastExprBitfields { friend class CastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned BasePathSize : 32 - 6 - NumExprBits; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; }; class ExprWithCleanupsBitfields { friend class ExprWithCleanups; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned NumObjects : 32 - NumExprBits; }; class PseudoObjectExprBitfields { friend class PseudoObjectExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class TypeTraitExprBitfields { friend class TypeTraitExpr; friend class ASTStmtReader; friend class ASTStmtWriter; unsigned : NumExprBits; /// \brief The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// \brief If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// \brief The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; union { // FIXME: this is wasteful on 64-bit platforms. void *Aligner; StmtBitfields StmtBits; CompoundStmtBitfields CompoundStmtBits; ExprBitfields ExprBits; CharacterLiteralBitfields CharacterLiteralBits; FloatingLiteralBitfields FloatingLiteralBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; DeclRefExprBitfields DeclRefExprBits; CastExprBitfields CastExprBits; CallExprBitfields CallExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; PseudoObjectExprBitfields PseudoObjectExprBits; ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; InitListExprBitfields InitListExprBits; TypeTraitExprBitfields TypeTraitExprBits; }; friend class ASTStmtReader; friend class ASTStmtWriter; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void* operator new(size_t bytes, void* mem) throw() { return mem; } void operator delete(void*, const ASTContext&, unsigned) throw() { } void operator delete(void*, const ASTContext*, unsigned) throw() { } void operator delete(void*, size_t) throw() { } void operator delete(void*, void*) throw() { } public: /// \brief A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell { }; private: /// \brief Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// \brief Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) { StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } public: Stmt(StmtClass SC) { StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getLocStart() const LLVM_READONLY; SourceLocation getLocEnd() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// \brief Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip past any implicit AST nodes which might surround this /// statement, such as ExprWithCleanups or ImplicitCastExpr nodes. Stmt *IgnoreImplicit(); const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. typedef StmtIterator child_iterator; typedef ConstStmtIterator const_child_iterator; typedef StmtRange child_range; typedef ConstStmtRange const_child_range; child_range children(); const_child_range children() const { return const_cast<Stmt*>(this)->children(); } child_iterator child_begin() { return children().first; } child_iterator child_end() { return children().second; } const_child_iterator child_begin() const { return children().first; } const_child_iterator child_end() const { return children().second; } /// \brief Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. /// class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// \brief Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) { } /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } SourceLocation getStartLoc() const { return StartLoc; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return StartLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } typedef DeclGroupRef::iterator decl_iterator; typedef DeclGroupRef::const_iterator const_decl_iterator; typedef llvm::iterator_range<decl_iterator> decl_range; typedef llvm::iterator_range<const_decl_iterator> decl_const_range; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } typedef std::reverse_iterator<decl_iterator> reverse_decl_iterator; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { SourceLocation SemiLoc; /// \brief True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode bool HasLeadingEmptyMacro; public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass), SemiLoc(L), HasLeadingEmptyMacro(hasLeadingEmptyMacro) {} /// \brief Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty), HasLeadingEmptyMacro(false) { } SourceLocation getSemiLoc() const { return SemiLoc; } void setSemiLoc(SourceLocation L) { SemiLoc = L; } bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; } SourceLocation getLocStart() const LLVM_READONLY { return SemiLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SemiLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(); } friend class ASTStmtReader; friend class ASTStmtWriter; }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. /// class CompoundStmt : public Stmt { Stmt** Body; SourceLocation LBracLoc, RBracLoc; public: CompoundStmt(const ASTContext &C, ArrayRef<Stmt*> Stmts, SourceLocation LB, SourceLocation RB); // \brief Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), Body(nullptr), LBracLoc(Loc), RBracLoc(Loc) { CompoundStmtBits.NumStmts = 0; } // \brief Build an empty compound statement. explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty), Body(nullptr) { CompoundStmtBits.NumStmts = 0; } void setStmts(const ASTContext &C, Stmt **Stmts, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } typedef Stmt** body_iterator; typedef llvm::iterator_range<body_iterator> body_range; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return Body; } body_iterator body_end() { return Body + size(); } Stmt *body_back() { return !body_empty() ? Body[size()-1] : nullptr; } void setLastStmt(Stmt *S) { assert(!body_empty() && "setLastStmt"); Body[size()-1] = S; } typedef Stmt* const * const_body_iterator; typedef llvm::iterator_range<const_body_iterator> body_const_range; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return Body; } const_body_iterator body_end() const { return Body + size(); } const Stmt *body_back() const { return !body_empty() ? Body[size() - 1] : nullptr; } typedef std::reverse_iterator<body_iterator> reverse_body_iterator; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } typedef std::reverse_iterator<const_body_iterator> const_reverse_body_iterator; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } SourceLocation getLocStart() const LLVM_READONLY { return LBracLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RBracLoc; } SourceLocation getLBracLoc() const { return LBracLoc; } void setLBracLoc(SourceLocation L) { LBracLoc = L; } SourceLocation getRBracLoc() const { return RBracLoc; } void setRBracLoc(SourceLocation L) { RBracLoc = L; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(Body, Body + CompoundStmtBits.NumStmts); } const_child_range children() const { return child_range(Body, Body + CompoundStmtBits.NumStmts); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: // A pointer to the following CaseStmt or DefaultStmt class, // used by SwitchStmt. SwitchCase *NextSwitchCase; SourceLocation KeywordLoc; SourceLocation ColonLoc; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), NextSwitchCase(nullptr), KeywordLoc(KWLoc), ColonLoc(ColonLoc) { } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC), NextSwitchCase(nullptr) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return KeywordLoc; } void setKeywordLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase*>(this)->getSubStmt(); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; class CaseStmt : public SwitchCase { enum { LHS, RHS, SUBSTMT, END_EXPR }; Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for // GNU "case 1 ... 4" extension SourceLocation EllipsisLoc; public: CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { SubExprs[SUBSTMT] = nullptr; SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs); SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs); EllipsisLoc = ellipsisLoc; } /// \brief Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass, Empty) { } SourceLocation getCaseLoc() const { return KeywordLoc; } void setCaseLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getEllipsisLoc() const { return EllipsisLoc; } void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); } Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); } Stmt *getSubStmt() { return SubExprs[SUBSTMT]; } const Expr *getLHS() const { return reinterpret_cast<const Expr*>(SubExprs[LHS]); } const Expr *getRHS() const { return reinterpret_cast<const Expr*>(SubExprs[RHS]); } const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; } void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; } void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); } void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[END_EXPR]); } }; class DefaultStmt : public SwitchCase { Stmt* SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// \brief Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) { } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return KeywordLoc; } void setDefaultLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt+1); } }; inline SourceLocation SwitchCase::getLocEnd() const { if (const CaseStmt *CS = dyn_cast<CaseStmt>(this)) return CS->getLocEnd(); return cast<DefaultStmt>(this)->getLocEnd(); } /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; /// class LabelStmt : public Stmt { LabelDecl *TheDecl; Stmt *SubStmt; SourceLocation IdentLoc; public: LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : Stmt(LabelStmtClass), TheDecl(D), SubStmt(substmt), IdentLoc(IL) { } // \brief Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) { } SourceLocation getIdentLoc() const { return IdentLoc; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setIdentLoc(SourceLocation L) { IdentLoc = L; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getLocStart() const LLVM_READONLY { return IdentLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// \brief Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } /// class AttributedStmt : public Stmt { Stmt *SubStmt; SourceLocation AttrLoc; unsigned NumAttrs; friend class ASTStmtReader; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt) : Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc), NumAttrs(Attrs.size()) { memcpy(getAttrArrayPtr(), Attrs.data(), Attrs.size() * sizeof(Attr *)); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) { memset(getAttrArrayPtr(), 0, NumAttrs * sizeof(Attr *)); } Attr *const *getAttrArrayPtr() const { return reinterpret_cast<Attr *const *>(this + 1); } Attr **getAttrArrayPtr() { return reinterpret_cast<Attr **>(this + 1); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); // \brief Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttrLoc; } ArrayRef<const Attr*> getAttrs() const { return ArrayRef<const Attr*>(getAttrArrayPtr(), NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getLocStart() const LLVM_READONLY { return AttrLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. /// class IfStmt : public Stmt { enum { VAR, COND, THEN, ELSE, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation IfLoc; SourceLocation ElseLoc; public: IfStmt(const ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond, Stmt *then, SourceLocation EL = SourceLocation(), Stmt *elsev = nullptr); /// \brief Build an empty if/then/else statement explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } const Stmt *getThen() const { return SubExprs[THEN]; } void setThen(Stmt *S) { SubExprs[THEN] = S; } const Stmt *getElse() const { return SubExprs[ELSE]; } void setElse(Stmt *S) { SubExprs[ELSE] = S; } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Stmt *getThen() { return SubExprs[THEN]; } Stmt *getElse() { return SubExprs[ELSE]; } SourceLocation getIfLoc() const { return IfLoc; } void setIfLoc(SourceLocation L) { IfLoc = L; } SourceLocation getElseLoc() const { return ElseLoc; } void setElseLoc(SourceLocation L) { ElseLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return IfLoc; } SourceLocation getLocEnd() const LLVM_READONLY { if (SubExprs[ELSE]) return SubExprs[ELSE]->getLocEnd(); else return SubExprs[THEN]->getLocEnd(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. /// class SwitchStmt : public Stmt { enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // This points to a linked list of case and default statements. SwitchCase *FirstCase; SourceLocation SwitchLoc; /// If the SwitchStmt is a switch on an enum value, this records whether /// all the enum values were covered by CaseStmts. This value is meant to /// be a hint for possible clients. unsigned AllEnumCasesCovered : 1; public: SwitchStmt(const ASTContext &C, VarDecl *Var, Expr *cond); /// \brief Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Stmt *getBody() const { return SubExprs[BODY]; } const SwitchCase *getSwitchCaseList() const { return FirstCase; } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } Stmt *getBody() { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SwitchCase *getSwitchCaseList() { return FirstCase; } /// \brief Set the case list for this switch statement. void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; } SourceLocation getSwitchLoc() const { return SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { SubExprs[BODY] = S; SwitchLoc = SL; } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase); FirstCase = SC; } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { AllEnumCasesCovered = 1; } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return (bool) AllEnumCasesCovered; } SourceLocation getLocStart() const LLVM_READONLY { return SwitchLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. /// class WhileStmt : public Stmt { enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation WhileLoc; public: WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body, SourceLocation WL); /// \brief Build an empty while statement. explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return WhileLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// DoStmt - This represents a 'do/while' stmt. /// class DoStmt : public Stmt { enum { BODY, COND, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation DoLoc; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) { SubExprs[COND] = reinterpret_cast<Stmt*>(cond); SubExprs[BODY] = body; } /// \brief Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) { } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getDoLoc() const { return DoLoc; } void setDoLoc(SourceLocation L) { DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return DoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. /// class ForStmt : public Stmt { enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation ForLoc; SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// \brief Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) { } Stmt *getInit() { return SubExprs[INIT]; } /// \brief Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForLoc; } void setForLoc(SourceLocation L) { ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ForLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// GotoStmt - This represents a direct goto. /// class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation GotoLoc; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {} /// \brief Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) { } LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoLoc; } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LabelLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(); } }; /// IndirectGotoStmt - This represents an indirect goto. /// class IndirectGotoStmt : public Stmt { SourceLocation GotoLoc; SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc), Target((Stmt*)target) {} /// \brief Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) { } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr*>(Target); } const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);} void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt*>(this)->getConstantTarget(); } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return Target->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target+1); } }; /// ContinueStmt - This represents a continue. /// class ContinueStmt : public Stmt { SourceLocation ContinueLoc; public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {} /// \brief Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) { } SourceLocation getContinueLoc() const { return ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ContinueLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return ContinueLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(); } }; /// BreakStmt - This represents a break. /// class BreakStmt : public Stmt { SourceLocation BreakLoc; public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) {} /// \brief Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) { } SourceLocation getBreakLoc() const { return BreakLoc; } void setBreakLoc(SourceLocation L) { BreakLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return BreakLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return BreakLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. /// class ReturnStmt : public Stmt { Stmt *RetExpr; SourceLocation RetLoc; const VarDecl *NRVOCandidate; public: ReturnStmt(SourceLocation RL) : Stmt(ReturnStmtClass), RetExpr(nullptr), RetLoc(RL), NRVOCandidate(nullptr) {} ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate) : Stmt(ReturnStmtClass), RetExpr((Stmt*) E), RetLoc(RL), NRVOCandidate(NRVOCandidate) {} /// \brief Build an empty return expression. explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) { } const Expr *getRetValue() const; Expr *getRetValue(); void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); } SourceLocation getReturnLoc() const { return RetLoc; } void setReturnLoc(SourceLocation L) { RetLoc = L; } /// \brief Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return NRVOCandidate; } void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; } SourceLocation getLocStart() const LLVM_READONLY { return RetLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RetExpr ? RetExpr->getLocEnd() : RetLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr+1); return child_range(); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. /// class AsmStmt : public Stmt { protected: SourceLocation AsmLoc; /// \brief True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// \brief If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) { } friend class ASTStmtReader; public: /// \brief Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty), Exprs(nullptr) { } SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getLocStart() const LLVM_READONLY { return SourceLocation(); } SourceLocation getLocEnd() const LLVM_READONLY { return SourceLocation(); } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. typedef ExprIterator inputs_iterator; typedef ConstExprIterator const_inputs_iterator; typedef llvm::iterator_range<inputs_iterator> inputs_range; typedef llvm::iterator_range<const_inputs_iterator> inputs_const_range; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. typedef ExprIterator outputs_iterator; typedef ConstExprIterator const_outputs_iterator; typedef llvm::iterator_range<outputs_iterator> outputs_range; typedef llvm::iterator_range<const_outputs_iterator> outputs_const_range; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. /// class GCCAsmStmt : public AsmStmt { SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints; StringLiteral **Clobbers; IdentifierInfo **Names; friend class ASTStmtReader; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, SourceLocation rparenloc); /// \brief Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty), Constraints(nullptr), Clobbers(nullptr), Names(nullptr) { } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, char Modifier) : MyKind(Operand), Str(), OperandNo(OpNo) { Str += Modifier; } bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { assert(isString()); return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const { assert(isOperand()); return Str[0]; } }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. /// class MSAsmStmt : public AsmStmt { SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks; Token *AsmToks; StringRef *Constraints; StringRef *Clobbers; friend class ASTStmtReader; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// \brief Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty), NumAsmToks(0), AsmToks(nullptr), Constraints(nullptr), Clobbers(nullptr) { } SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return ArrayRef<StringRef>(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return ArrayRef<StringRef>(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return ArrayRef<Expr*>(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) { } public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) { } public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getLocEnd(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; int HandlerIndex; int HandlerParentIndex; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler, int HandlerIndex, int HandlerParentIndex); friend class ASTReader; friend class ASTStmtReader; explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) { } public: static SEHTryStmt *Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler, int HandlerIndex, int HandlerParentIndex); SourceLocation getLocStart() const LLVM_READONLY { return getTryLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } int getHandlerIndex() const { return HandlerIndex; } int getHandlerParentIndex() const { return HandlerParentIndex; } }; /// Represents a __leave statement. /// class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// \brief Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) { } SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return LeaveLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(); } }; /// \brief This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// \brief The different capture forms: by 'this' or by reference, etc. enum VariableCaptureKind { VCK_This, VCK_ByRef }; /// \brief Describes the capture of either a variable or 'this'. class Capture { llvm::PointerIntPair<VarDecl *, 1, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: /// \brief Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. /// Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr) : VarAndKind(Var, Kind), Loc(Loc) { switch (Kind) { case VCK_This: assert(!Var && "'this' capture cannot have a variable!"); break; case VCK_ByRef: assert(Var && "capturing by reference must have a variable!"); break; } } /// \brief Determine the kind of capture. VariableCaptureKind getCaptureKind() const { return VarAndKind.getInt(); } /// \brief Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// \brief Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// \brief Determine whether this capture handles a variable. bool capturesVariable() const { return getCaptureKind() != VCK_This; } /// \brief Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture does not capture 'this'. VarDecl *getCapturedVar() const { assert(!capturesThis() && "No variable available for 'this' capture"); return VarAndKind.getPointer(); } friend class ASTStmtReader; }; private: /// \brief The number of variable captured, including 'this'. unsigned NumCaptures; /// \brief The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 1, CapturedRegionKind> CapDeclAndKind; /// \brief The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl; /// \brief Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// \brief Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() const { return reinterpret_cast<Stmt **>(const_cast<CapturedStmt *>(this) + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// \brief Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return const_cast<CapturedStmt *>(this)->getCapturedStmt(); } /// \brief Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl() { return CapDeclAndKind.getPointer(); } const CapturedDecl *getCapturedDecl() const { return const_cast<CapturedStmt *>(this)->getCapturedDecl(); } /// \brief Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D) { assert(D && "null CapturedDecl"); CapDeclAndKind.setPointer(D); } /// \brief Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const { return CapDeclAndKind.getInt(); } /// \brief Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind) { CapDeclAndKind.setInt(Kind); } /// \brief Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// \brief Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// \brief True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// \brief An iterator that walks over the captures. typedef Capture *capture_iterator; typedef const Capture *const_capture_iterator; typedef llvm::iterator_range<capture_iterator> capture_range; typedef llvm::iterator_range<const_capture_iterator> capture_const_range; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// \brief Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// \brief Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// \brief Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// \brief Iterator that walks over the capture initialization arguments. typedef Expr **capture_init_iterator; typedef llvm::iterator_range<capture_init_iterator> capture_init_range; capture_init_range capture_inits() const { return capture_init_range(capture_init_begin(), capture_init_end()); } /// \brief Retrieve the first initialization argument. capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr **>(getStoredStmts()); } /// \brief Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getLocStart() const LLVM_READONLY { return getCapturedStmt()->getLocStart(); } SourceLocation getLocEnd() const LLVM_READONLY { return getCapturedStmt()->getLocEnd(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); friend class ASTStmtReader; }; } // end namespace clang #endif
dslash.h
#ifndef _DSLASH_H #define _DSLASH_H #include <iostream> #include <vector> #include <unistd.h> #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <sys/resource.h> #include <math.h> #include <chrono> extern size_t sites_on_node; extern size_t even_sites_on_node; extern std::vector<int> squaresize; extern unsigned int verbose; extern size_t warmups; #ifndef ITERATIONS # define ITERATIONS 100 #endif #ifndef PRECISION # define PRECISION 2 // 1->single, 2->double #endif #ifndef LDIM # define LDIM 32 // Lattice size = LDIM^4 #endif /* Adapted from su3.h in MILC version 7 */ /* generic precision complex number definition */ /* specific for float complex */ typedef struct { float real; float imag; } fcomplex; /* specific for double complex */ typedef struct { double real; double imag; } dcomplex; typedef struct { fcomplex e[3][3]; } fsu3_matrix; typedef struct { fcomplex c[3]; } fsu3_vector; typedef struct { dcomplex e[3][3]; } dsu3_matrix; typedef struct { dcomplex c[3]; } dsu3_vector; #if (PRECISION==1) #define su3_matrix fsu3_matrix #define su3_vector fsu3_vector #define Real float #define Complx fcomplex #define EPISON 2E-5 #else #define su3_matrix dsu3_matrix #define su3_vector dsu3_vector #define Real double #define Complx dcomplex #define EPISON 2E-6 #endif /* PRECISION */ /* c = a + b */ #define CADD(a,b,c) { (c).real = (a).real + (b).real; \ (c).imag = (a).imag + (b).imag; } /* c = a - b */ #define CSUB(a,b,c) { (c).real = (a).real - (b).real; \ (c).imag = (a).imag - (b).imag; } /* c += a * b */ #define CMULSUM(a,b,c) { (c).real += (a).real*(b).real - (a).imag*(b).imag; \ (c).imag += (a).real*(b).imag + (a).imag*(b).real; } /* c = a * b */ #define CMUL(a,b,c) { (c).real = (a).real*(b).real - (a).imag*(b).imag; \ (c).imag = (a).real*(b).imag + (a).imag*(b).real; } /* a += b */ #define CSUM(a,b) { (a).real += (b).real; (a).imag += (b).imag; } /* b = a* */ #define CONJG(a,b) { (b).real = (a).real; (b).imag = -(a).imag; } #pragma omp declare target //-------------------------------------------------------------------------------- inline void su3_adjoint( const su3_matrix *a, su3_matrix *b ){ int i,j; for(i=0;i<3;i++)for(j=0;j<3;j++){ CONJG( a->e[j][i], b->e[i][j] ); } } //-------------------------------------------------------------------------------- inline void mult_su3_mat_vec( const su3_matrix *a, const su3_vector *b, su3_vector *c ){ int i,j; for(i=0;i<3;i++){ Complx x = {0.0, 0.0}; for(j=0;j<3;j++){ CMULSUM( a->e[i][j] , b->c[j] , x ); } c->c[i] = x; } } //-------------------------------------------------------------------------------- inline void mult_su3_mat_vec_sum( const su3_matrix *a, const su3_vector *b, su3_vector *c ){ int i,j; for(i=0;i<3;i++){ Complx x = {0.0, 0.0}; for(j=0;j<3;j++){ CMULSUM( a->e[i][j] , b->c[j] , x ); } c->c[i].real += x.real; c->c[i].imag += x.imag; } } //-------------------------------------------------------------------------------- inline void add_su3_vector( const su3_vector *a, const su3_vector *b, su3_vector *c ){ int i; for(i=0;i<3;i++){ CADD( a->c[i], b->c[i], c->c[i] ); } } //-------------------------------------------------------------------------------- inline void sub_su3_vector( const su3_vector *a, const su3_vector *b, su3_vector *c ){ int i; for(i=0;i<3;i++){ CSUB( a->c[i], b->c[i], c->c[i] ); } } #pragma omp end declare target //-------------------------------------------------------------------------------- inline void mult_su3_mat_vec_sum_4dir( su3_matrix *a, su3_vector *b0, su3_vector *b1, su3_vector *b2, su3_vector *b3, su3_vector *c ){ mult_su3_mat_vec( a,b0,c ); mult_su3_mat_vec_sum( ++a,b1,c ); mult_su3_mat_vec_sum( ++a,b2,c ); mult_su3_mat_vec_sum( ++a,b3,c ); } //-------------------------------------------------------------------------------- #include <stdio.h> inline void dumpmat( su3_matrix *m ){ int i,j; for(i=0;i<3;i++){ for(j=0;j<3;j++) printf("(%.2e,%.2e)\t", m->e[i][j].real,m->e[i][j].imag); printf("\n"); } printf("\n"); } static int nx = LDIM; static int ny = LDIM; static int nz = LDIM; static int nt = LDIM; #define XUP 0 #define YUP 1 #define ZUP 2 #define TUP 3 //-------------------------------------------------------------------------------- inline size_t node_index(const int x, const int y, const int z, const int t) { size_t i; int xr,yr,zr,tr; xr = (x+nx)%squaresize[XUP]; yr = (y+ny)%squaresize[YUP]; zr = (z+nz)%squaresize[ZUP]; tr = (t+nt)%squaresize[TUP]; i = xr + squaresize[XUP]*( yr + squaresize[YUP]*( zr + squaresize[ZUP]*tr)); if( (x+y+z+t)%2==0 ){ /* even site */ return( i/2 ); } else { return( (i + sites_on_node)/2 ); } } //-------------------------------------------------------------------------------- // Set the indices for gathers from neighbors inline void set_neighbors( size_t *fwd, size_t *bck, size_t *fwd3, size_t *bck3 ) { for(int x = 0; x < nx; x++) for(int y = 0; y < ny; y++) for(int z = 0; z < nz; z++) for(int t = 0; t < nt; t++) { int i = node_index(x,y,z,t); fwd[4*i+0] = node_index(x+1,y,z,t); bck[4*i+0] = node_index(x-1,y,z,t); fwd[4*i+1] = node_index(x,y+1,z,t); bck[4*i+1] = node_index(x,y-1,z,t); fwd[4*i+2] = node_index(x,y,z+1,t); bck[4*i+2] = node_index(x,y,z-1,t); fwd[4*i+3] = node_index(x,y,z,t+1); bck[4*i+3] = node_index(x,y,z,t-1); fwd3[4*i+0] = node_index(x+3,y,z,t); bck3[4*i+0] = node_index(x-3,y,z,t); fwd3[4*i+1] = node_index(x,y+3,z,t); bck3[4*i+1] = node_index(x,y-3,z,t); fwd3[4*i+2] = node_index(x,y,z+3,t); bck3[4*i+2] = node_index(x,y,z-3,t); fwd3[4*i+3] = node_index(x,y,z,t+3); bck3[4*i+3] = node_index(x,y,z,t-3); } } // Used for validation void dslash_fn_field(su3_vector *src, su3_vector *dst, int parity, su3_matrix *fat, su3_matrix *lng, su3_matrix *fatbck, su3_matrix *lngbck ); double dslash_fn( const std::vector<su3_vector> &src, std::vector<su3_vector> &dst, const std::vector<su3_matrix> &fat, const std::vector<su3_matrix> &lng, std::vector<su3_matrix> &fatbck, std::vector<su3_matrix> &lngbck, size_t *fwd, size_t *bck, size_t *fwd3, size_t *bck3, const size_t iterations, size_t wgsize ); typedef std::chrono::system_clock Clock; #endif
sha256.c
/* * Copyright (c) 2010, Michal Tomlein * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Original code by Angel Marin, Paul Johnston. */ #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <omp.h> #include "sha256.h" typedef struct { uint32_t * data; unsigned int length; } uint32_a; void uint32_a_init(uint32_a * a, unsigned int length) { a->data = (uint32_t *)calloc(length, sizeof(uint32_t)); a->length = length; } void uint32_a_resize(uint32_a * a, unsigned int length) { if (a->length == length) return; a->data = (uint32_t *)realloc(a->data, length * sizeof(uint32_t)); for (unsigned int i = a->length; i < length; i++) a->data[i] = 0; a->length = length; } void uint32_a_free(uint32_a * a) { free(a->data); a->data = NULL; a->length = 0; } int uint32_a_eq(uint32_a a, uint32_a b) { if (a.length != b.length) return 0; for (int i = 0; i < a.length; ++i) if (a.data[i] != b.data[i]) return 0; return 1; } uint32_a bytes_to_binb(const char * bytes, unsigned int length) { uint32_t mask = (1 << 8) - 1; length *= 8; uint32_a bin; uint32_a_init(&bin, ((length - 1) >> 5) + 1); for (uint32_t i = 0; i < length; i += 8) { bin.data[i >> 5] |= (bytes[i / 8] & mask) << (32 - 8 - i % 32); } return bin; } char * binb_to_hex(const uint32_t binarray[], uint32_t length, char * result) { const char * hex_tab = "0123456789abcdef"; if (!result) result = (char *)malloc(65 * sizeof(char)); result[64] = '\0'; length *= 4; for (uint32_t i = 0; i < length; i++) { result[2 * i] = hex_tab[(binarray[i >> 2] >> ((3 - i % 4) * 8 + 4)) & 0xF]; result[2 * i + 1] = hex_tab[(binarray[i >> 2] >> ((3 - i % 4) * 8)) & 0xF]; } return result; } uint32_t rotr(uint32_t x, int n) { if (n < 32) return (x >> n) | (x << (32 - n)); return x; } uint32_t ch(uint32_t x, uint32_t y, uint32_t z) { return (x & y) ^ (~x & z); } uint32_t maj(uint32_t x, uint32_t y, uint32_t z) { return (x & y) ^ (x & z) ^ (y & z); } uint32_t sigma0(uint32_t x) { return rotr(x, 2) ^ rotr(x, 13) ^ rotr(x, 22); } uint32_t sigma1(uint32_t x) { return rotr(x, 6) ^ rotr(x, 11) ^ rotr(x, 25); } uint32_t gamma0(uint32_t x) { return rotr(x, 7) ^ rotr(x, 18) ^ (x >> 3); } uint32_t gamma1(uint32_t x) { return rotr(x, 17) ^ rotr(x, 19) ^ (x >> 10); } void sha256core(uint32_a message_a, uint32_t source_binlength, uint32_t H[]) { uint32_t K[] = { 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174, 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967, 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85, 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070, 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3, 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2 }; H[0] = 0x6A09E667; H[1] = 0xBB67AE85; H[2] = 0x3C6EF372; H[3] = 0xA54FF53A; H[4] = 0x510E527F; H[5] = 0x9B05688C; H[6] = 0x1F83D9AB; H[7] = 0x5BE0CD19; uint32_a W_a; uint32_a_init(&W_a, 64); uint32_t * W = W_a.data; uint32_t a, b, c, d, e, f, g, h; uint32_t T1, T2; uint32_t message_length = (((source_binlength + 1 + 64) >> 9) << 4) + 16; uint32_a_resize(&message_a, message_length); uint32_t * message = message_a.data; message[source_binlength >> 5] |= 0x80 << (24 - source_binlength % 32); message[message_length - 1] = source_binlength; for (uint32_t i = 0; i < message_length; i += 16) { a = H[0]; b = H[1]; c = H[2]; d = H[3]; e = H[4]; f = H[5]; g = H[6]; h = H[7]; for (uint32_t t = 0; t < 64; t++) { if (t < 16) W[t] = message[t + i]; else W[t] = gamma1(W[t - 2]) + W[t - 7] + gamma0(W[t - 15]) + W[t - 16]; T1 = h + sigma1(e) + ch(e, f, g) + K[t] + W[t]; T2 = sigma0(a) + maj(a, b, c); h = g; g = f; f = e; e = d + T1; d = c; c = b; b = a; a = T1 + T2; } H[0] += a; H[1] += b; H[2] += c; H[3] += d; H[4] += e; H[5] += f; H[6] += g; H[7] += h; } uint32_a_free(&message_a); uint32_a_free(&W_a); } char * sha256(const char * source, unsigned int length, char * result) { uint32_t H[8]; sha256core(bytes_to_binb(source, length), length * 8, H); return binb_to_hex(H, 8, result); } char * reverse_sha256(const char * hash, int min_length, int max_length, const char * character_set) { int character_set_length = strlen(character_set), str_length; char * str_hash; char * str; int i, t, range; int64_t id, remainder; int64_t max_id = pow(character_set_length, max_length); int done = 0; char * result = NULL; #pragma omp parallel private(id, i, t, range, remainder, str, str_hash) { str_hash = (char *)malloc(65 * sizeof(char)); str = (char *)malloc((max_length + 1) * sizeof(char)); str[max_length] = '\0'; #pragma omp for schedule(dynamic) for (id = 0; id < max_id; ++id) { #pragma omp flush (done) if (done) continue; str_length = id == 0 ? 0 : (floor(log(id) / log(character_set_length)) + 1); t = 2 * max_length - str_length - 1; for (i = 0; i < max_length - str_length; ++i) str[i] = character_set[0]; for (remainder = id; remainder && i < max_length; ++i) { str[t - i] = character_set[remainder % character_set_length]; remainder /= character_set_length; } range = max_length - (str_length > min_length ? str_length : min_length); for (i = 0; i <= range; ++i) { if (strcmp(hash, sha256(str + i, max_length - i, str_hash)) == 0) { result = (char *)malloc((max_length - i + 1) * sizeof(char)); strncpy(result, str + i, max_length - i); result[max_length - i] = '\0'; done = 1; break; } } } free(str_hash); free(str); } /* end parallel */ return result; }
hmacSHA1_fmt_plug.c
/* * This software is Copyright (c) 2012, 2013 magnum, and it is hereby released * to the general public under the following terms: Redistribution and use in * source and binary forms, with or without modification, are permitted. * * Originally based on hmac-md5 by Bartavelle */ #if FMT_EXTERNS_H extern struct fmt_main fmt_hmacSHA1; #elif FMT_REGISTERS_H john_register_one(&fmt_hmacSHA1); #else #include <string.h> #include "arch.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 2048 // tuned for i7 using SSE2 and w/o HT #endif #endif #include "misc.h" #include "common.h" #include "formats.h" #include "sha.h" #include "johnswap.h" #include "simd-intrinsics.h" #include "memdbg.h" #define FORMAT_LABEL "HMAC-SHA1" #define FORMAT_NAME "" #ifdef SIMD_COEF_32 #define SHA1_N (SIMD_PARA_SHA1 * SIMD_COEF_32) #endif #define ALGORITHM_NAME "password is key, SHA1 " SHA1_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 #define PAD_SIZE 64 #define BINARY_SIZE 20 #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_LENGTH PAD_SIZE #define SALT_ALIGN MEM_ALIGN_NONE #define CIPHERTEXT_LENGTH (2 * SALT_LENGTH + 2 * BINARY_SIZE) #define HEXCHARS "0123456789abcdef" #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SHA1_N #define MAX_KEYS_PER_CRYPT SHA1_N #define GETPOS(i, index) ((index & (SIMD_COEF_32 - 1)) * 4 + ((i) & (0xffffffff - 3)) * SIMD_COEF_32 + (3 - ((i) & 3)) + (unsigned int)index/SIMD_COEF_32 * SHA_BUF_SIZ * 4 * SIMD_COEF_32) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests tests[] = { {"The quick brown fox jumps over the lazy dog#de7c9b85b8b78aa6bc8a7a36f70a90701c9db4d9", "key"}, {"#fbdb1d1b18aa6c08324b7d64b71fb76370690e1d", ""}, {"Beppe#Grillo#DEBBDB4D549ABE59FAB67D0FB76B76FDBC4431F1", "Io credo nella reincarnazione e sono di Genova; per cui ho fatto testamento e mi sono lasciato tutto a me."}, {"7oTwG04WUjJ0BTDFFIkTJlgl#293b75c1f28def530c17fc8ae389008179bf4091", "late*night"}, // from the test suite {"D2hIU7fdd78WARm5dt95k6MD#e741a6100ccfd1205a8ffe1321b61fc5aa06f6db", "123"}, {"6Fv5kYoxuEuroTkagbf3ZRsV#370edad3540b1ad3e96b03ccf3956645306074b7", "123456789"}, {"3uqqtMBC7vzh9tdVMPJ9bAwE#65ed35cf94e2180d6a797e5ad5e4175891427572", "passWOrd"}, {NULL} }; #ifdef SIMD_COEF_32 #define cur_salt hmacsha1_cur_salt static unsigned char *crypt_key; static unsigned char *ipad, *prep_ipad; static unsigned char *opad, *prep_opad; JTR_ALIGN(MEM_ALIGN_SIMD) unsigned char cur_salt[SHA_BUF_SIZ * 4 * SHA1_N]; static int bufsize; #else static unsigned char cur_salt[SALT_LENGTH]; static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static unsigned char (*ipad)[PAD_SIZE]; static unsigned char (*opad)[PAD_SIZE]; static SHA_CTX *ipad_ctx; static SHA_CTX *opad_ctx; #endif static char (*saved_plain)[PLAINTEXT_LENGTH + 1]; static int new_keys; #define SALT_SIZE sizeof(cur_salt) #ifdef SIMD_COEF_32 static void clear_keys(void) { memset(ipad, 0x36, bufsize); memset(opad, 0x5C, bufsize); } #endif static void init(struct fmt_main *self) { #ifdef SIMD_COEF_32 unsigned int i; #endif #ifdef _OPENMP int omp_t = omp_get_num_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef SIMD_COEF_32 bufsize = sizeof(*opad) * self->params.max_keys_per_crypt * SHA_BUF_SIZ * 4; crypt_key = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); ipad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); opad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); prep_ipad = mem_calloc_align(self->params.max_keys_per_crypt * BINARY_SIZE, sizeof(*prep_ipad), MEM_ALIGN_SIMD); prep_opad = mem_calloc_align(self->params.max_keys_per_crypt * BINARY_SIZE, sizeof(*prep_opad), MEM_ALIGN_SIMD); for (i = 0; i < self->params.max_keys_per_crypt; ++i) { crypt_key[GETPOS(BINARY_SIZE, i)] = 0x80; ((unsigned int*)crypt_key)[15 * SIMD_COEF_32 + (i&(SIMD_COEF_32-1)) + i/SIMD_COEF_32 * SHA_BUF_SIZ * SIMD_COEF_32] = (BINARY_SIZE + 64) << 3; } clear_keys(); #else crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); ipad = mem_calloc(self->params.max_keys_per_crypt, sizeof(*ipad)); opad = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad)); ipad_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*ipad_ctx)); opad_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad_ctx)); #endif saved_plain = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_plain)); } static void done(void) { MEM_FREE(saved_plain); #ifdef SIMD_COEF_32 MEM_FREE(prep_opad); MEM_FREE(prep_ipad); #else MEM_FREE(opad_ctx); MEM_FREE(ipad_ctx); #endif MEM_FREE(opad); MEM_FREE(ipad); MEM_FREE(crypt_key); } static int valid(char *ciphertext, struct fmt_main *self) { int pos, i; char *p; p = strrchr(ciphertext, '#'); // allow # in salt if (!p || p > &ciphertext[strlen(ciphertext) - 1]) return 0; i = (int)(p - ciphertext); #if SIMD_COEF_32 if(i > 55) return 0; #else if(i > SALT_LENGTH) return 0; #endif pos = i + 1; if (strlen(ciphertext+pos) != BINARY_SIZE * 2) return 0; for (i = pos; i < BINARY_SIZE*2+pos; i++) { if (!((('0' <= ciphertext[i])&&(ciphertext[i] <= '9')) || (('a' <= ciphertext[i])&&(ciphertext[i] <= 'f')) || (('A' <= ciphertext[i])&&(ciphertext[i] <= 'F')))) return 0; } return 1; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[CIPHERTEXT_LENGTH + 1]; strnzcpy(out, ciphertext, CIPHERTEXT_LENGTH + 1); strlwr(strrchr(out, '#')); return out; } static void set_salt(void *salt) { memcpy(&cur_salt, salt, SALT_SIZE); } static void set_key(char *key, int index) { int len; #ifdef SIMD_COEF_32 ARCH_WORD_32 *ipadp = (ARCH_WORD_32*)&ipad[GETPOS(3, index)]; ARCH_WORD_32 *opadp = (ARCH_WORD_32*)&opad[GETPOS(3, index)]; const ARCH_WORD_32 *keyp = (ARCH_WORD_32*)key; unsigned int temp; len = strlen(key); memcpy(saved_plain[index], key, len); saved_plain[index][len] = 0; if (len > PAD_SIZE) { unsigned char k0[BINARY_SIZE]; SHA_CTX ctx; int i; SHA1_Init(&ctx); SHA1_Update(&ctx, key, len); SHA1_Final(k0, &ctx); keyp = (unsigned int*)k0; for(i = 0; i < BINARY_SIZE / 4; i++, ipadp += SIMD_COEF_32, opadp += SIMD_COEF_32) { temp = JOHNSWAP(*keyp++); *ipadp ^= temp; *opadp ^= temp; } } else while(((temp = JOHNSWAP(*keyp++)) & 0xff000000)) { if (!(temp & 0x00ff0000) || !(temp & 0x0000ff00)) { ((unsigned short*)ipadp)[1] ^= (unsigned short)(temp >> 16); ((unsigned short*)opadp)[1] ^= (unsigned short)(temp >> 16); break; } *ipadp ^= temp; *opadp ^= temp; if (!(temp & 0x000000ff)) break; ipadp += SIMD_COEF_32; opadp += SIMD_COEF_32; } #else int i; len = strlen(key); memcpy(saved_plain[index], key, len); saved_plain[index][len] = 0; memset(ipad[index], 0x36, PAD_SIZE); memset(opad[index], 0x5C, PAD_SIZE); if (len > PAD_SIZE) { SHA_CTX ctx; unsigned char k0[BINARY_SIZE]; SHA1_Init(&ctx); SHA1_Update(&ctx, key, len); SHA1_Final(k0, &ctx); len = BINARY_SIZE; for(i = 0; i < len; i++) { ipad[index][i] ^= k0[i]; opad[index][i] ^= k0[i]; } } else for(i = 0; i < len; i++) { ipad[index][i] ^= key[i]; opad[index][i] ^= key[i]; } #endif new_keys = 1; } static char *get_key(int index) { return saved_plain[index]; } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int x, y = 0; for(; y < (unsigned int)(count + SIMD_COEF_32 - 1) / SIMD_COEF_32; y++) for(x = 0; x < SIMD_COEF_32; x++) { // NOTE crypt_key is in input format (4 * SHA_BUF_SIZ * SIMD_COEF_32) if(((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)crypt_key)[x + y * SIMD_COEF_32 * SHA_BUF_SIZ]) return 1; } return 0; #else int index = 0; #if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1) for (index = 0; index < count; index++) #endif if (((ARCH_WORD_32*)binary)[0] == crypt_key[index][0]) return 1; return 0; #endif } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 int i; for(i = 0; i < (BINARY_SIZE/4); i++) // NOTE crypt_key is in input format (4 * SHA_BUF_SIZ * SIMD_COEF_32) if (((ARCH_WORD_32*)binary)[i] != ((ARCH_WORD_32*)crypt_key)[i * SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32 * SHA_BUF_SIZ * SIMD_COEF_32]) return 0; return 1; #else return !memcmp(binary, crypt_key[index], BINARY_SIZE); #endif } static int cmp_exact(char *source, int index) { return (1); } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #if _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { #ifdef SIMD_COEF_32 if (new_keys) { SIMDSHA1body(&ipad[index * SHA_BUF_SIZ * 4], (unsigned int*)&prep_ipad[index * BINARY_SIZE], NULL, SSEi_MIXED_IN); SIMDSHA1body(&opad[index * SHA_BUF_SIZ * 4], (unsigned int*)&prep_opad[index * BINARY_SIZE], NULL, SSEi_MIXED_IN); } SIMDSHA1body(cur_salt, (unsigned int*)&crypt_key[index * SHA_BUF_SIZ * 4], (unsigned int*)&prep_ipad[index * BINARY_SIZE], SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT); SIMDSHA1body(&crypt_key[index * SHA_BUF_SIZ * 4], (unsigned int*)&crypt_key[index * SHA_BUF_SIZ * 4], (unsigned int*)&prep_opad[index * BINARY_SIZE], SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT); #else SHA_CTX ctx; if (new_keys) { SHA1_Init(&ipad_ctx[index]); SHA1_Update(&ipad_ctx[index], ipad[index], PAD_SIZE); SHA1_Init(&opad_ctx[index]); SHA1_Update(&opad_ctx[index], opad[index], PAD_SIZE); } memcpy(&ctx, &ipad_ctx[index], sizeof(ctx)); SHA1_Update(&ctx, cur_salt, strlen((char*)cur_salt)); SHA1_Final((unsigned char*) crypt_key[index], &ctx); memcpy(&ctx, &opad_ctx[index], sizeof(ctx)); SHA1_Update(&ctx, crypt_key[index], BINARY_SIZE); SHA1_Final((unsigned char*) crypt_key[index], &ctx); #endif } new_keys = 0; return count; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD_32 dummy; } buf; unsigned char *out = buf.c; char *p; int i; // allow # in salt p = strrchr(ciphertext, '#') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } #ifdef SIMD_COEF_32 alter_endianity(out, BINARY_SIZE); #endif return (void*)out; } static void *get_salt(char *ciphertext) { static unsigned char salt[SALT_LENGTH]; #ifdef SIMD_COEF_32 unsigned int i = 0; unsigned int j; unsigned total_len = 0; #endif memset(salt, 0, sizeof(salt)); // allow # in salt memcpy(salt, ciphertext, strrchr(ciphertext, '#') - ciphertext); #ifdef SIMD_COEF_32 while(((unsigned char*)salt)[total_len]) { for (i = 0; i < SHA1_N; ++i) cur_salt[GETPOS(total_len, i)] = ((unsigned char*)salt)[total_len]; ++total_len; } for (i = 0; i < SHA1_N; ++i) cur_salt[GETPOS(total_len, i)] = 0x80; for (j = total_len + 1; j < SALT_LENGTH; ++j) for (i = 0; i < SHA1_N; ++i) cur_salt[GETPOS(j, i)] = 0; for (i = 0; i < SHA1_N; ++i) ((unsigned int*)cur_salt)[15 * SIMD_COEF_32 + (i&(SIMD_COEF_32-1)) + i/SIMD_COEF_32 * SHA_BUF_SIZ * SIMD_COEF_32] = (total_len + 64) << 3; return cur_salt; #else return salt; #endif } #ifdef SIMD_COEF_32 // NOTE crypt_key is in input format (4 * SHA_BUF_SIZ * SIMD_COEF_32) #define HASH_OFFSET (index & (SIMD_COEF_32 - 1)) + ((unsigned int)index / SIMD_COEF_32) * SIMD_COEF_32 * SHA_BUF_SIZ static int get_hash_0(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_OFFSET] & PH_MASK_0; } static int get_hash_1(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_OFFSET] & PH_MASK_1; } static int get_hash_2(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_OFFSET] & PH_MASK_2; } static int get_hash_3(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_OFFSET] & PH_MASK_3; } static int get_hash_4(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_OFFSET] & PH_MASK_4; } static int get_hash_5(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_OFFSET] & PH_MASK_5; } static int get_hash_6(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_OFFSET] & PH_MASK_6; } #else static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; } #endif struct fmt_main fmt_hmacSHA1 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD | FMT_SPLIT_UNIFIES_CASE, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, #ifdef SIMD_COEF_32 clear_keys, #else fmt_default_clear_keys, #endif crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_binop__lt_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lt_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__lt_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__lt_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__lt_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_uint32) // A*D function (colscale): GB (_AxD__lt_uint32) // D*A function (rowscale): GB (_DxB__lt_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__lt_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__lt_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_uint32) // C=scalar+B GB (_bind1st__lt_uint32) // C=scalar+B' GB (_bind1st_tran__lt_uint32) // C=A+scalar GB (_bind2nd__lt_uint32) // C=A'+scalar GB (_bind2nd_tran__lt_uint32) // C type: bool // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_UINT32 || GxB_NO_LT_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lt_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lt_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lt_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lt_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lt_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lt_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lt_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lt_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lt_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lt_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lt_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lt_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__lt_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__lt_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
proj_EM_step.c
/* NAME: proj_EM_step PURPOSE: one proj_EM step CALLING SEQUENCE: proj_EM_step(struct datapoint * data, int N, struct gaussian * gaussians, int K,bool * fixamp, bool * fixmean, bool * fixcovar, double * avgloglikedata, bool likeonly, double w, bool noproj, bool diagerrs, bool noweight) INPUT: data - the data N - number of data points gaussians - model gaussians K - number of model gaussians fixamp - fix the amplitude? fixmean - fix the mean? fixcovar - fix the covar? likeonly - only compute likelihood? w - regularization parameter noproj - don't perform any projections diagerrs - the data->SS errors-squared are diagonal noweight - don't use data-weights OUTPUT: avgloglikedata - average loglikelihood of the data REVISION HISTORY: 2008-09-21 - Written Bovy 2010-03-01 Added noproj option - Bovy 2010-04-01 Added noweight option - Bovy */ #ifdef _OPENMP #include <omp.h> #endif #include <math.h> #include <float.h> #include <gsl/gsl_matrix.h> #include <gsl/gsl_vector.h> #include <gsl/gsl_linalg.h> #include <gsl/gsl_blas.h> #include "proj_gauss_mixtures.h" #define CHUNKSIZE 1 void proj_EM_step(struct datapoint * data, int N, struct gaussian * gaussians, int K,bool * fixamp, bool * fixmean, bool * fixcovar, double * avgloglikedata, bool likeonly, double w, bool noproj, bool diagerrs, bool noweight){ *avgloglikedata = 0.0; //struct timeval start,time1, time2, time3, time4, time5,end; struct datapoint * thisdata; struct gaussian * thisgaussian; struct gaussian * thisnewgaussian; int signum,di; double exponent; double currqij; struct modelbs * thisbs; int d = (gaussians->VV)->size1;//dim of mm //gettimeofday(&start,NULL); //Initialize new parameters int kk; for (kk=0; kk != K*nthreads; ++kk){ newgaussians->alpha = 0.0; gsl_vector_set_zero(newgaussians->mm); gsl_matrix_set_zero(newgaussians->VV); ++newgaussians; } newgaussians= startnewgaussians; //gettimeofday(&time1,NULL); //check whether for some Gaussians none of the parameters get updated double sumfixedamps= 0; bool * allfixed = (bool *) calloc(K, sizeof (bool) ); double ampnorm; for (kk=0; kk != K; ++kk){ if (*fixamp == true){ sumfixedamps += gaussians->alpha; } ++gaussians; if (*fixamp == true && *fixmean == true && *fixcovar == true) *allfixed= true; ++allfixed; ++fixamp; ++fixmean; ++fixcovar; } gaussians -= K; allfixed -= K; fixamp -= K; fixmean -= K; fixcovar -= K; //gettimeofday(&time2,NULL); //now loop over data and gaussians to update the model parameters int ii, jj, ll; double sumSV; int chunk; chunk= CHUNKSIZE; #pragma omp parallel for schedule(static,chunk) \ private(tid,di,signum,exponent,ii,jj,ll,kk,Tij,Tij_inv,wminusRm,p,VRTTinv,sumSV,VRT,TinvwminusRm,Rtrans,thisgaussian,thisdata,thisbs,thisnewgaussian,currqij) \ shared(newgaussians,gaussians,bs,allfixed,K,d,data,avgloglikedata) for (ii = 0 ; ii < N; ++ii){ thisdata= data+ii; #ifdef _OPENMP tid= omp_get_thread_num(); #else tid = 0; #endif di = (thisdata->SS)->size1; //printf("Datapoint has dimension %i\n",di); p = gsl_permutation_alloc (di); wminusRm = gsl_vector_alloc (di); TinvwminusRm = gsl_vector_alloc (di); Tij = gsl_matrix_alloc(di,di); Tij_inv = gsl_matrix_alloc(di,di); if ( ! noproj ) VRT = gsl_matrix_alloc(d,di); VRTTinv = gsl_matrix_alloc(d,di); if ( ! noproj ) Rtrans = gsl_matrix_alloc(d,di); for (jj = 0; jj != K; ++jj){ //printf("%i,%i\n",(thisdata->ww)->size,wminusRm->size); gsl_vector_memcpy(wminusRm,thisdata->ww); //fprintf(stdout,"Where is the seg fault?\n"); thisgaussian= gaussians+jj; //prepare... if ( ! noproj ) { if ( diagerrs ) { gsl_matrix_set_zero(Tij); for (ll = 0; ll != di; ++ll) gsl_matrix_set(Tij,ll,ll,gsl_matrix_get(thisdata->SS,ll,0));} else gsl_matrix_memcpy(Tij,thisdata->SS); } //Calculate Tij if ( ! noproj ) { gsl_matrix_transpose_memcpy(Rtrans,thisdata->RR); gsl_blas_dsymm(CblasLeft,CblasUpper,1.0,thisgaussian->VV,Rtrans,0.0,VRT);//Only the upper right part of VV is calculated --> use only that part gsl_blas_dgemm(CblasNoTrans,CblasNoTrans,1.0,thisdata->RR,VRT,1.0,Tij);}//This is Tij else { if ( diagerrs ) { for (kk = 0; kk != d; ++kk){ gsl_matrix_set(Tij,kk,kk,gsl_matrix_get(thisdata->SS,kk,0)+gsl_matrix_get(thisgaussian->VV,kk,kk)); for (ll = kk+1; ll != d; ++ll){ sumSV= gsl_matrix_get(thisgaussian->VV,kk,ll); gsl_matrix_set(Tij,kk,ll,sumSV); gsl_matrix_set(Tij,ll,kk,sumSV);}}} else { for (kk = 0; kk != d; ++kk){ gsl_matrix_set(Tij,kk,kk,gsl_matrix_get(thisdata->SS,kk,kk)+gsl_matrix_get(thisgaussian->VV,kk,kk)); for (ll = kk+1; ll != d; ++ll){ sumSV= gsl_matrix_get(thisdata->SS,kk,ll)+gsl_matrix_get(thisgaussian->VV,kk,ll); gsl_matrix_set(Tij,kk,ll,sumSV); gsl_matrix_set(Tij,ll,kk,sumSV);}}}} //gsl_matrix_add(Tij,thisgaussian->VV);} //Calculate LU decomp of Tij and Tij inverse gsl_linalg_LU_decomp(Tij,p,&signum); gsl_linalg_LU_invert(Tij,p,Tij_inv); //Calculate Tijinv*(w-Rm) if ( ! noproj ) gsl_blas_dgemv(CblasNoTrans,-1.0,thisdata->RR,thisgaussian->mm,1.0,wminusRm); else gsl_vector_sub(wminusRm,thisgaussian->mm); //printf("wminusRm = %f\t%f\n",gsl_vector_get(wminusRm,0),gsl_vector_get(wminusRm,1)); gsl_blas_dsymv(CblasUpper,1.0,Tij_inv,wminusRm,0.0,TinvwminusRm); //printf("TinvwminusRm = %f\t%f\n",gsl_vector_get(TinvwminusRm,0),gsl_vector_get(TinvwminusRm,1)); gsl_blas_ddot(wminusRm,TinvwminusRm,&exponent); //printf("Exponent = %f\nDet = %f\n",exponent,gsl_linalg_LU_det(Tij,signum)); gsl_matrix_set(qij,ii,jj,log(thisgaussian->alpha) - di * halflogtwopi - 0.5 * gsl_linalg_LU_lndet(Tij) -0.5 * exponent);//This is actually the log of qij //printf("Here we have = %f\n",gsl_matrix_get(qij,ii,jj)); //Now calculate bij and Bij thisbs= bs+tid*K+jj; gsl_vector_memcpy(thisbs->bbij,thisgaussian->mm); //printf("%i,%i,%i\n",tid,ii,jj); if ( ! noproj ) gsl_blas_dgemv(CblasNoTrans,1.0,VRT,TinvwminusRm,1.0,thisbs->bbij); else gsl_blas_dsymv(CblasUpper,1.0,thisgaussian->VV,TinvwminusRm,1.0,thisbs->bbij); //printf("bij = %f\t%f\n",gsl_vector_get(bs->bbij,0),gsl_vector_get(bs->bbij,1)); gsl_matrix_memcpy(thisbs->BBij,thisgaussian->VV); if ( ! noproj ) { gsl_blas_dgemm(CblasNoTrans,CblasNoTrans,1.0,VRT,Tij_inv,0.0,VRTTinv); gsl_blas_dgemm(CblasNoTrans,CblasTrans,-1.0,VRTTinv,VRT,1.0,thisbs->BBij);} else { gsl_blas_dsymm(CblasLeft,CblasUpper,1.0,thisgaussian->VV,Tij_inv,0.0,VRTTinv); gsl_blas_dsymm(CblasRight,CblasUpper,-1.0,thisgaussian->VV,VRTTinv,1.0,thisbs->BBij);} gsl_blas_dsyr(CblasUpper,1.0,thisbs->bbij,thisbs->BBij);//This is bijbijT + Bij, which is the relevant quantity } gsl_permutation_free (p); gsl_vector_free(wminusRm); gsl_vector_free(TinvwminusRm); gsl_matrix_free(Tij); gsl_matrix_free(Tij_inv); if ( ! noproj ) gsl_matrix_free(VRT); gsl_matrix_free(VRTTinv); if ( ! noproj ) gsl_matrix_free(Rtrans); //Again loop over the gaussians to update the model(can this be more efficient? in any case this is not so bad since generally K << N) #pragma omp critical { //Normalize qij properly *avgloglikedata += normalize_row(qij,ii,true,noweight,thisdata->logweight); } //printf("qij = %f\t%f\n",gsl_matrix_get(qij,ii,0),gsl_matrix_get(qij,ii,1)); //printf("avgloglgge = %f\n",*avgloglikedata); for (jj = 0; jj != K; ++jj){ currqij = exp(gsl_matrix_get(qij,ii,jj)); //printf("Current qij = %f\n",currqij); thisbs= bs+tid*K+jj; thisnewgaussian= newgaussians+tid*K+jj; gsl_vector_scale(thisbs->bbij,currqij); gsl_vector_add(thisnewgaussian->mm,thisbs->bbij); gsl_matrix_scale(thisbs->BBij,currqij); gsl_matrix_add(thisnewgaussian->VV,thisbs->BBij); //printf("bij = %f\t%f\n",gsl_vector_get(bs->bbij,0),gsl_vector_get(bs->bbij,1)); //printf("Bij = %f\t%f\t%f\n",gsl_matrix_get(bs->BBij,0,0),gsl_matrix_get(bs->BBij,1,1),gsl_matrix_get(bs->BBij,0,1)); } } *avgloglikedata /= N; if (likeonly) { free(allfixed); return; } //gettimeofday(&time3,NULL); //gather newgaussians if ( nthreads != 1 ) #pragma omp parallel for schedule(static,chunk) \ private(ll,jj) for (jj = 0; jj < K; ++jj) for (ll = 1; ll != nthreads; ++ll) { gsl_vector_add((newgaussians+jj)->mm,(newgaussians+ll*K+jj)->mm); gsl_matrix_add((newgaussians+jj)->VV,(newgaussians+ll*K+jj)->VV); } //gettimeofday(&time4,NULL); //Now update the parameters //Thus, loop over gaussians again! double qj; #pragma omp parallel for schedule(dynamic,chunk) \ private(jj,qj) for (jj = 0; jj < K; ++jj){ if (*(allfixed+jj)){ continue; } else { qj = exp(logsum(qij,jj,false)); (qj < DBL_MIN) ? qj = 0: 0; //printf("qj = %f\n",qj); if (*(fixamp+jj) != true) { (gaussians+jj)->alpha = qj; if (qj == 0) {//rethink this *(fixamp+jj)=1; *(fixmean+jj)=1; *(fixcovar+jj)=1; continue; } } gsl_vector_scale((newgaussians+jj)->mm,1.0/qj); if (*(fixmean+jj) != true){ gsl_vector_memcpy((gaussians+jj)->mm,(newgaussians+jj)->mm); } if (*(fixcovar+jj) != true){ // if (*(fixmean+jj) != true) // gsl_blas_dsyr(CblasUpper,-qj,(gaussians+jj)->mm,(newgaussians+jj)->VV); //else { gsl_blas_dsyr(CblasUpper,qj,(gaussians+jj)->mm,(newgaussians+jj)->VV); gsl_blas_dsyr2(CblasUpper,-qj,(gaussians+jj)->mm,(newgaussians+jj)->mm,(newgaussians+jj)->VV); //} if (w > 0.){ gsl_matrix_add((newgaussians+jj)->VV,I); gsl_matrix_scale((newgaussians+jj)->VV,1.0/(qj+1.0)); } else gsl_matrix_scale((newgaussians+jj)->VV,1.0/qj); gsl_matrix_memcpy((gaussians+jj)->VV,(newgaussians+jj)->VV); } } } //gettimeofday(&time5,NULL); //normalize the amplitudes if ( sumfixedamps == 0. && noweight ){ for (kk=0; kk != K; ++kk){ if ( noweight ) (gaussians++)->alpha /= (double) N; } } else { ampnorm= 0; for (kk=0; kk != K; ++kk){ if (*(fixamp++) == false) ampnorm += gaussians->alpha; ++gaussians; } fixamp -= K; gaussians -= K; for (kk=0; kk != K; ++kk){ if (*(fixamp++) == false){ gaussians->alpha /= ampnorm; gaussians->alpha *= (1. - sumfixedamps); } ++gaussians; } fixamp -= K; gaussians -= K; } //gettimeofday(&end,NULL); //double diff, diff1, diff2, diff3, diff4, diff5,diff6; //diff= difftime (end.tv_sec,start.tv_sec)+difftime (end.tv_usec,start.tv_usec)/1000000; //diff1= (difftime(time1.tv_sec,start.tv_sec)+difftime(time1.tv_usec,start.tv_usec)/1000000)/diff; //diff2= (difftime(time2.tv_sec,time1.tv_sec)+difftime(time2.tv_usec,time1.tv_usec)/1000000)/diff; //diff3= (difftime(time3.tv_sec,time2.tv_sec)+difftime(time3.tv_usec,time2.tv_usec)/1000000)/diff; //diff4= (difftime(time4.tv_sec,time3.tv_sec)+difftime(time4.tv_usec,time3.tv_usec)/1000000)/diff; //diff5= (difftime(time5.tv_sec,time4.tv_sec)+difftime(time5.tv_usec,time4.tv_usec)/1000000)/diff; //diff6= (difftime(end.tv_sec,time5.tv_sec)+difftime(end.tv_usec,time5.tv_usec)/1000000)/diff; //printf("%f,%f,%f,%f,%f,%f,%f\n",diff,diff1,diff2,diff3,diff4,diff5,diff6); free(allfixed); return; }
io.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include <stddef.h> #include "base.h" #include "io.h" #include "sptensor.h" #include "matrix.h" #include "graph.h" #include "timer.h" /****************************************************************************** * FILE TYPES *****************************************************************************/ struct ftype { char * extension; splatt_file_type type; }; static struct ftype file_extensions[] = { { ".tns", SPLATT_FILE_TEXT_COORD }, { ".coo", SPLATT_FILE_TEXT_COORD }, { ".bin", SPLATT_FILE_BIN_COORD }, { NULL, 0} }; splatt_file_type get_file_type( char const * const fname) { /* find last . in filename */ char const * const suffix = strrchr(fname, '.'); if(suffix == NULL) { goto NOT_FOUND; } size_t idx = 0; do { if(strcmp(suffix, file_extensions[idx].extension) == 0) { return file_extensions[idx].type; } } while(file_extensions[++idx].extension != NULL); /* default to text coordinate format */ NOT_FOUND: fprintf(stderr, "SPLATT: extension for '%s' not recognized. " "Defaulting to ASCII coordinate form.\n", fname); return SPLATT_FILE_TEXT_COORD; } /****************************************************************************** * PRIVATE FUNCTIONS *****************************************************************************/ static sptensor_t * p_tt_read_file( FILE * fin) { char * ptr = NULL; /* first count nnz in tensor */ idx_t nnz = 0; idx_t nmodes = 0; idx_t dims[MAX_NMODES]; idx_t offsets[MAX_NMODES]; tt_get_dims(fin, &nmodes, &nnz, dims, offsets); if(nmodes > MAX_NMODES) { fprintf(stderr, "SPLATT ERROR: maximum %"SPLATT_PF_IDX" modes supported. " "Found %"SPLATT_PF_IDX". Please recompile with " "MAX_NMODES=%"SPLATT_PF_IDX".\n", (idx_t) MAX_NMODES, nmodes, nmodes); return NULL; } /* allocate structures */ sptensor_t * tt = tt_alloc(nnz, nmodes); memcpy(tt->dims, dims, nmodes * sizeof(*dims)); char * line = NULL; int64_t read; size_t len = 0; /* fill in tensor data */ rewind(fin); nnz = 0; while((read = getline(&line, &len, fin)) != -1) { /* skip empty and commented lines */ if(read > 1 && line[0] != '#') { ptr = line; for(idx_t m=0; m < nmodes; ++m) { tt->ind[m][nnz] = strtoull(ptr, &ptr, 10) - offsets[m]; } tt->vals[nnz++] = strtod(ptr, &ptr); } } free(line); return tt; } /** * @brief Write a binary header to an input file. * * @param fout The file to write to. * @param tt The tensor to form a header from. * @param[out] header The header to write. */ static void p_write_tt_binary_header( FILE * fout, sptensor_t const * const tt, bin_header * header) { int32_t type = SPLATT_BIN_COORD; fwrite(&type, sizeof(type), 1, fout); /* now see if all indices fit in 32bit values */ uint64_t idx = tt->nnz < UINT32_MAX ? sizeof(uint32_t) : sizeof(uint64_t); for(idx_t m=0; m < tt->nmodes; ++m) { if(tt->dims[m] > UINT32_MAX) { idx = sizeof(uint64_t); break; } } /* now see if every value can exactly be represented as a float */ uint64_t val = sizeof(float); for(idx_t n=0; n < tt->nnz; ++n) { float conv = tt->vals[n]; if((splatt_val_t) conv != tt->vals[n]) { val = sizeof(splatt_val_t); } } header->magic = type; header->idx_width = idx; header->val_width = val; fwrite(&idx, sizeof(idx), 1, fout); fwrite(&val, sizeof(val), 1, fout); } /** * @brief Read a COORD tensor from a binary file, converting from smaller idx or * val precision if necessary. * * @param fin The file to read from. * * @return The parsed tensor. */ static sptensor_t * p_tt_read_binary_file( FILE * fin) { bin_header header; read_binary_header(fin, &header); idx_t nnz = 0; idx_t nmodes = 0; idx_t dims[MAX_NMODES]; fill_binary_idx(&nmodes, 1, &header, fin); fill_binary_idx(dims, nmodes, &header, fin); fill_binary_idx(&nnz, 1, &header, fin); if(nmodes > MAX_NMODES) { fprintf(stderr, "SPLATT ERROR: maximum %"SPLATT_PF_IDX" modes supported. " "Found %"SPLATT_PF_IDX". Please recompile with " "MAX_NMODES=%"SPLATT_PF_IDX".\n", (idx_t) MAX_NMODES, nmodes, nmodes); return NULL; } /* allocate structures */ sptensor_t * tt = tt_alloc(nnz, nmodes); memcpy(tt->dims, dims, nmodes * sizeof(*dims)); /* fill in tensor data */ for(idx_t m=0; m < nmodes; ++m) { fill_binary_idx(tt->ind[m], nnz, &header, fin); } fill_binary_val(tt->vals, nnz, &header, fin); return tt; } /****************************************************************************** * API FUNCTIONS *****************************************************************************/ int splatt_load( char const * const fname, splatt_idx_t * nmodes, splatt_idx_t ** dims, splatt_idx_t * nnz, splatt_idx_t *** inds, splatt_val_t ** vals) { sptensor_t * tt = tt_read_file(fname); if(tt == NULL) { return SPLATT_ERROR_BADINPUT; } *nmodes = tt->nmodes; *dims = tt->dims; *nnz = tt->nnz; *vals = tt->vals; *inds = tt->ind; free(tt); return SPLATT_SUCCESS; } /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ sptensor_t * tt_read_file( char const * const fname) { FILE * fin; if((fin = fopen(fname, "r")) == NULL) { fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n", fname); return NULL; } sptensor_t * tt = NULL; timer_start(&timers[TIMER_IO]); switch(get_file_type(fname)) { case SPLATT_FILE_TEXT_COORD: tt = p_tt_read_file(fin); break; case SPLATT_FILE_BIN_COORD: tt = p_tt_read_binary_file(fin); break; } timer_stop(&timers[TIMER_IO]); fclose(fin); return tt; } sptensor_t * tt_read_binary_file( char const * const fname) { FILE * fin; if((fin = fopen(fname, "r")) == NULL) { fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n", fname); return NULL; } timer_start(&timers[TIMER_IO]); sptensor_t * tt = p_tt_read_binary_file(fin); timer_stop(&timers[TIMER_IO]); fclose(fin); return tt; } void tt_get_dims( FILE * fin, idx_t * const outnmodes, idx_t * const outnnz, idx_t * outdims, idx_t * offset) { char * ptr = NULL; idx_t nnz = 0; char * line = NULL; ssize_t read; size_t len = 0; /* first count modes in tensor */ idx_t nmodes = 0; while((read = getline(&line, &len, fin)) != -1) { if(read > 1 && line[0] != '#') { /* get nmodes from first nnz line */ ptr = strtok(line, " \t"); while(ptr != NULL) { ++nmodes; ptr = strtok(NULL, " \t"); } break; } } --nmodes; for(idx_t m=0; m < nmodes; ++m) { outdims[m] = 0; offset[m] = 1; } /* fill in tensor dimensions */ rewind(fin); while((read = getline(&line, &len, fin)) != -1) { /* skip empty and commented lines */ if(read > 1 && line[0] != '#') { ptr = line; for(idx_t m=0; m < nmodes; ++m) { idx_t ind = strtoull(ptr, &ptr, 10); /* outdim is maximum */ outdims[m] = (ind > outdims[m]) ? ind : outdims[m]; /* offset is minimum */ offset[m] = (ind < offset[m]) ? ind : offset[m]; } /* skip over tensor val */ strtod(ptr, &ptr); ++nnz; } } *outnnz = nnz; *outnmodes = nmodes; /* only support 0 or 1 indexing */ for(idx_t m=0; m < nmodes; ++m) { if(offset[m] != 0 && offset[m] != 1) { fprintf(stderr, "SPLATT: ERROR tensors must be 0 or 1 indexed. " "Mode %"SPLATT_PF_IDX" is %"SPLATT_PF_IDX" indexed.\n", m, offset[m]); exit(1); } } /* adjust dims when zero-indexing */ for(idx_t m=0; m < nmodes; ++m) { if(offset[m] == 0) { ++outdims[m]; } } rewind(fin); free(line); } void tt_write( sptensor_t const * const tt, char const * const fname) { FILE * fout; if(fname == NULL) { fout = stdout; } else { if((fout = fopen(fname,"w")) == NULL) { fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n.", fname); return; } } tt_write_file(tt, fout); if(fname != NULL) { fclose(fout); } } void tt_write_file( sptensor_t const * const tt, FILE * fout) { timer_start(&timers[TIMER_IO]); for(idx_t n=0; n < tt->nnz; ++n) { for(idx_t m=0; m < tt->nmodes; ++m) { /* files are 1-indexed instead of 0 */ fprintf(fout, "%"SPLATT_PF_IDX" ", tt->ind[m][n] + 1); } fprintf(fout, "%"SPLATT_PF_VAL"\n", tt->vals[n]); } timer_stop(&timers[TIMER_IO]); } void tt_write_binary( sptensor_t const * const tt, char const * const fname) { FILE * fout; if(fname == NULL) { fout = stdout; } else { if((fout = fopen(fname,"w")) == NULL) { fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n.", fname); return; } } tt_write_binary_file(tt, fout); if(fname != NULL) { fclose(fout); } } void tt_write_binary_file( sptensor_t const * const tt, FILE * fout) { timer_start(&timers[TIMER_IO]); bin_header header; p_write_tt_binary_header(fout, tt, &header); /* WRITE INDICES */ /* if we are writing to the same precision they are stored in, just fwrite */ if(header.idx_width == sizeof(splatt_idx_t)) { fwrite(&tt->nmodes, sizeof(tt->nmodes), 1, fout); fwrite(tt->dims, sizeof(*tt->dims), tt->nmodes, fout); fwrite(&tt->nnz, sizeof(tt->nnz), 1, fout); for(idx_t m=0; m < tt->nmodes; ++m) { fwrite(tt->ind[m], sizeof(*tt->ind[m]), tt->nnz, fout); } /* otherwise we convert (downwards) element-wise */ } else if(header.idx_width < sizeof(splatt_idx_t)) { uint32_t buf = tt->nmodes; fwrite(&buf, sizeof(buf), 1, fout); for(idx_t m=0; m < tt->nmodes; ++m) { buf = tt->dims[m]; fwrite(&buf, sizeof(buf), 1, fout); } buf = tt->nnz; fwrite(&buf, sizeof(buf), 1, fout); /* write inds */ for(idx_t m=0; m < tt->nmodes; ++m) { for(idx_t n=0; n < tt->nnz; ++n) { buf = tt->ind[m][n]; fwrite(&buf, sizeof(buf), 1, fout); } } } else { /* XXX this should never be reached */ fprintf(stderr, "SPLATT: the impossible happened, " "idx_width > IDX_TYPEWIDTH.\n"); abort(); } /* WRITE VALUES */ if(header.val_width == sizeof(splatt_val_t)) { fwrite(tt->vals, sizeof(*tt->vals), tt->nnz, fout); /* otherwise we convert (downwards) element-wise */ } else if(header.val_width < sizeof(splatt_val_t)) { for(idx_t n=0; n < tt->nnz; ++n) { float buf = tt->vals[n]; fwrite(&buf, sizeof(buf), 1, fout); } } else { /* XXX this should never be reached */ fprintf(stderr, "SPLATT: the impossible happened, " "val_width > VAL_TYPEWIDTH.\n"); abort(); } timer_stop(&timers[TIMER_IO]); } void read_binary_header( FILE * fin, bin_header * header) { fread(&(header->magic), sizeof(header->magic), 1, fin); fread(&(header->idx_width), sizeof(header->idx_width), 1, fin); fread(&(header->val_width), sizeof(header->val_width), 1, fin); if(header->idx_width > SPLATT_IDX_TYPEWIDTH / 8) { fprintf(stderr, "SPLATT: ERROR input has %zu-bit integers. " "Build with SPLATT_IDX_TYPEWIDTH %zu\n", header->idx_width * 8, header->idx_width * 8); exit(EXIT_FAILURE); } if(header->val_width > SPLATT_VAL_TYPEWIDTH / 8) { fprintf(stderr, "SPLATT: WARNING input has %zu-bit floating-point values. " "Build with SPLATT_VAL_TYPEWIDTH %zu for full precision\n", header->val_width * 8, header->val_width * 8); } } void fill_binary_idx( idx_t * const buffer, idx_t const count, bin_header const * const header, FILE * fin) { if(header->idx_width == sizeof(splatt_idx_t)) { fread(buffer, sizeof(idx_t), count, fin); } else { /* read in uint32_t in a buffered fashion */ idx_t const BUF_LEN = 1024*1024; uint32_t * ubuf = splatt_malloc(BUF_LEN * sizeof(*ubuf)); for(idx_t n=0; n < count; n += BUF_LEN) { idx_t const read_count = SS_MIN(BUF_LEN, count - n); fread(ubuf, sizeof(*ubuf), read_count, fin); #pragma omp parallel for schedule(static) for(idx_t i=0; i < read_count; ++i) { buffer[n + i] = ubuf[i]; } } splatt_free(ubuf); } } void fill_binary_val( val_t * const buffer, idx_t const count, bin_header const * const header, FILE * fin) { if(header->val_width == sizeof(splatt_val_t)) { fread(buffer, sizeof(val_t), count, fin); } else { /* read in float in a buffered fashion */ idx_t const BUF_LEN = 1024*1024; /* select whichever SPLATT *is not* configured with. */ #if SPLATT_VAL_TYPEWIDTH == 64 float * ubuf = splatt_malloc(BUF_LEN * sizeof(*ubuf)); #else double * ubuf = splatt_malloc(BUF_LEN * sizeof(*ubuf)); #endif for(idx_t n=0; n < count; n += BUF_LEN) { idx_t const read_count = SS_MIN(BUF_LEN, count - n); fread(ubuf, sizeof(*ubuf), read_count, fin); #pragma omp parallel for schedule(static) for(idx_t i=0; i < read_count; ++i) { buffer[n + i] = ubuf[i]; } } splatt_free(ubuf); } } void hgraph_write( hgraph_t const * const hg, char const * const fname) { FILE * fout; if(fname == NULL || strcmp(fname, "-") == 0) { fout = stdout; } else { if((fout = fopen(fname,"w")) == NULL) { fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n.", fname); return; } } hgraph_write_file(hg, fout); fclose(fout); } void hgraph_write_file( hgraph_t const * const hg, FILE * fout) { timer_start(&timers[TIMER_IO]); /* print header */ fprintf(fout, "%"SPLATT_PF_IDX" %"SPLATT_PF_IDX, hg->nhedges, hg->nvtxs); if(hg->vwts != NULL) { if(hg->hewts != NULL) { fprintf(fout, " 11"); } else { fprintf(fout, " 10"); } } else if(hg->hewts != NULL) { fprintf(fout, " 1"); } fprintf(fout, "\n"); /* print hyperedges */ for(idx_t e=0; e < hg->nhedges; ++e) { if(hg->hewts != NULL) { fprintf(fout, "%"SPLATT_PF_IDX" ", hg->hewts[e]); } for(idx_t v=hg->eptr[e]; v < hg->eptr[e+1]; ++v) { fprintf(fout, "%"SPLATT_PF_IDX" ", hg->eind[v]+1); } fprintf(fout, "\n"); } /* print vertex weights */ if(hg->vwts != NULL) { for(idx_t v=0; v < hg->nvtxs; ++v) { fprintf(fout, "%"SPLATT_PF_IDX"\n", hg->vwts[v]); } } timer_stop(&timers[TIMER_IO]); } void graph_write_file( splatt_graph const * const graph, FILE * fout) { timer_start(&timers[TIMER_IO]); /* print header */ fprintf(fout, "%"SPLATT_PF_IDX" %"SPLATT_PF_IDX" 0%d%d", graph->nvtxs, graph->nedges/2, graph->vwgts != NULL, graph->ewgts != NULL); /* handle multi-constraint partitioning */ if(graph->nvwgts > 1) { fprintf(fout, " %"SPLATT_PF_IDX, graph->nvwgts); } fprintf(fout, "\n"); /* now write adj list */ for(vtx_t v=0; v < graph->nvtxs; ++v) { /* vertex weights */ if(graph->vwgts != NULL) { for(idx_t x=0; x < graph->nvwgts; ++x) { fprintf(fout, "%"SPLATT_PF_IDX" ", graph->vwgts[x+(v*graph->nvwgts)]); } } for(adj_t e=graph->eptr[v]; e < graph->eptr[v+1]; ++e) { fprintf(fout, "%"SPLATT_PF_IDX" ", graph->eind[e] + 1); /* edge weight */ if(graph->ewgts != NULL) { fprintf(fout, "%"SPLATT_PF_IDX" ", graph->ewgts[e]); } } fprintf(fout, "\n"); } timer_stop(&timers[TIMER_IO]); } void spmat_write( spmatrix_t const * const mat, char const * const fname) { FILE * fout; if(fname == NULL || strcmp(fname, "-") == 0) { fout = stdout; } else { if((fout = fopen(fname,"w")) == NULL) { fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n.", fname); return; } } spmat_write_file(mat, fout); if(fout != stdout) { fclose(fout); } } void spmat_write_file( spmatrix_t const * const mat, FILE * fout) { timer_start(&timers[TIMER_IO]); /* write CSR matrix */ for(idx_t i=0; i < mat->I; ++i) { for(idx_t j=mat->rowptr[i]; j < mat->rowptr[i+1]; ++j) { fprintf(fout, "%"SPLATT_PF_IDX" %"SPLATT_PF_VAL" ", mat->colind[j], mat->vals[j]); } fprintf(fout, "\n"); } timer_stop(&timers[TIMER_IO]); } void mat_write( matrix_t const * const mat, char const * const fname) { FILE * fout; if(fname == NULL) { fout = stdout; } else { if((fout = fopen(fname,"w")) == NULL) { fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n.", fname); return; } } mat_write_file(mat, fout); if(fout != stdout) { fclose(fout); } } void mat_write_file( matrix_t const * const mat, FILE * fout) { timer_start(&timers[TIMER_IO]); idx_t const I = mat->I; idx_t const J = mat->J; val_t const * const vals = mat->vals; if(mat->rowmajor) { for(idx_t i=0; i < mat->I; ++i) { for(idx_t j=0; j < J; ++j) { fprintf(fout, "%+0.8le ", vals[j + (i*J)]); } fprintf(fout, "\n"); } } else { for(idx_t i=0; i < mat->I; ++i) { for(idx_t j=0; j < J; ++j) { fprintf(fout, "%+0.8le ", vals[i + (j*I)]); } fprintf(fout, "\n"); } } timer_stop(&timers[TIMER_IO]); } void mat_read( idx_t I, idx_t J, matrix_t *mat, char const * const fname) { FILE * fin; if(fname == NULL) { fin = stdin; } else { if((fin = fopen(fname,"r")) == NULL) { fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n.", fname); return; } } mat_read_file(I, J, mat, fin); if(fin != stdin) { fclose(fin); } } void mat_read_file( idx_t I, idx_t J, matrix_t *mat, FILE * fin) { timer_start(&timers[TIMER_IO]); if(mat->rowmajor) { for(idx_t i=0; i < I; ++i) { for(idx_t j=0; j < J; ++j) { fscanf(fin, "%lf", &(mat->vals[j + (i*J)])); } } } else { for(idx_t i=0; i < mat->I; ++i) { for(idx_t j=0; j < J; ++j) { fscanf(fin, "%lf", &(mat->vals[i + (j*I)])); } } } timer_stop(&timers[TIMER_IO]); } void vec_write( val_t const * const vec, idx_t const len, char const * const fname) { FILE * fout; if(fname == NULL) { fout = stdout; } else { if((fout = fopen(fname,"w")) == NULL) { fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n.", fname); return; } } vec_write_file(vec, len, fout); if(fout != stdout) { fclose(fout); } } void vec_write_file( val_t const * const vec, idx_t const len, FILE * fout) { timer_start(&timers[TIMER_IO]); for(idx_t i=0; i < len; ++i) { fprintf(fout, "%le\n", vec[i]); } timer_stop(&timers[TIMER_IO]); } idx_t * part_read( char const * const ifname, idx_t const nvtxs, idx_t * nparts) { FILE * pfile; if((pfile = fopen(ifname, "r")) == NULL) { fprintf(stderr, "SPLATT ERROR: unable to open '%s'\n", ifname); return NULL; } *nparts = 0; idx_t ret; idx_t * arr = (idx_t *) splatt_malloc(nvtxs * sizeof(idx_t)); for(idx_t i=0; i < nvtxs; ++i) { if((ret = fscanf(pfile, "%"SPLATT_PF_IDX, &(arr[i]))) == 0) { fprintf(stderr, "SPLATT ERROR: not enough elements in '%s'\n", ifname); free(arr); return NULL; } if(arr[i] > *nparts) { *nparts = arr[i]; } } fclose(pfile); /* increment to adjust for 0-indexing of partition ids */ *nparts += 1; return arr; } /****************************************************************************** * PERMUTATION FUNCTIONS *****************************************************************************/ void perm_write( idx_t * perm, idx_t const dim, char const * const fname) { FILE * fout; if(fname == NULL) { fout = stdout; } else { if((fout = fopen(fname,"w")) == NULL) { fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n.", fname); return; } } perm_write_file(perm, dim, fout); if(fname != NULL) { fclose(fout); } } void perm_write_file( idx_t * perm, idx_t const dim, FILE * fout) { for(idx_t i=0; i < dim; ++i) { fprintf(fout, "%"SPLATT_PF_IDX"\n", perm[i]); } }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 24; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
matriz-operacoes-omp.c
#include "matriz-operacoes-omp.h" int multiplicarOmp (int **mat_a, int **mat_b, int **mat_c, int N, int L, int M) { if(mat_a == NULL || mat_b == NULL || mat_c == NULL ){ printf("\nMatriz nao alocada!!!\n"); exit(1); } #pragma omp parallel for schedule(guided) private(i,j,k) shared(mat_a,mat_b,mat_c) for(int i = 0; i < N; i++){ for(int j = 0; j < M; j++){ mat_c[i][j] = 0; for(int k = 0; k < L; k++){ mat_c[i][j] += mat_a[i][k] * mat_b[k][j]; } } } return 0; } void multiplicaBlocoOMP(int size, int n, int **A, int **B, int **C, int TILE){ int i, j, k, x, y, z; #pragma omp parallel for schedule(guided) private(i,j,k,x,y,z) shared(A,B,C) for(i = 0; i < size; i += TILE){ for(j = 0; j < size; j += TILE){ for(k = 0; k < size; k += TILE){ for(x = i; x < i + TILE; x++){ for(y = j; y < j + TILE; y++){ for(z = k; z < k + TILE; z++){ C[x][y] += A[x][z] * B[z][y]; } } } } } } }
GB_unaryop__lnot_uint16_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint16_fp32 // op(A') function: GB_tran__lnot_uint16_fp32 // C type: uint16_t // A type: float // cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16) // unaryop: cij = !(aij != 0) #define GB_ATYPE \ float #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint16_t z ; GB_CAST_UNSIGNED(z,x,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint16_fp32 ( uint16_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint16_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pr25874.c
/* { dg-options "-O -fopenmp" } */ void foo(); inline void bar() { int i; for ( i=0; i<1; ++i ) #pragma omp parallel foo(); } void baz() { #pragma omp parallel bar(); }
GB_unop__identity_uint32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint32_fp32) // op(A') function: GB (_unop_tran__identity_uint32_fp32) // C type: uint32_t // A type: float // cast: uint32_t cij = GB_cast_to_uint32_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint32_fp32) ( uint32_t *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__isge_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_int64) // A.*B function (eWiseMult): GB (_AemultB_08__isge_int64) // A.*B function (eWiseMult): GB (_AemultB_02__isge_int64) // A.*B function (eWiseMult): GB (_AemultB_04__isge_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_int64) // A*D function (colscale): GB (_AxD__isge_int64) // D*A function (rowscale): GB (_DxB__isge_int64) // C+=B function (dense accum): GB (_Cdense_accumB__isge_int64) // C+=b function (dense accum): GB (_Cdense_accumb__isge_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_int64) // C=scalar+B GB (_bind1st__isge_int64) // C=scalar+B' GB (_bind1st_tran__isge_int64) // C=A+scalar GB (_bind2nd__isge_int64) // C=A'+scalar GB (_bind2nd_tran__isge_int64) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_INT64 || GxB_NO_ISGE_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isge_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_int64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isge_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isge_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
core_dgelqt.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zgelqt.c, normal z -> d, Fri Sep 28 17:38:20 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <omp.h> /***************************************************************************//** * * @ingroup core_gelqt * * Computes the LQ factorization of an m-by-n tile A: * The factorization has the form * \f[ * A = L \times Q * \f] * The tile Q is represented as a product of elementary reflectors * \f[ * Q = H(k)^T . . . H(2)^T H(1)^T, * \f] * where \f$ k = min(m,n) \f$. * * Each \f$ H(i) \f$ has the form * \f[ * H(i) = I - \tau \times v \times v^T * \f] * where \f$ tau \f$ is a scalar, and \f$ v \f$ is a vector with * v(1:i-1) = 0 and v(i) = 1; v(i+1:n)^T is stored on exit in A(i,i+1:n), * and \f$ tau \f$ in tau(i). * ******************************************************************************* * * @param[in] m * The number of rows of the tile A. m >= 0. * * @param[in] n * The number of columns of the tile A. n >= 0. * * @param[in] ib * The inner-blocking size. ib >= 0. * * @param[in,out] A * On entry, the m-by-n tile A. * On exit, the elements on and below the diagonal of the array * contain the m-by-min(m,n) lower trapezoidal tile L (L is * lower triangular if m <= n); the elements above the diagonal, * with the array tau, represent the orthogonal tile Q as a * product of elementary reflectors (see Further Details). * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * * @param[out] T * The ib-by-m triangular factor T of the block reflector. * T is upper triangular by block (economic storage); * The rest of the array is not referenced. * * @param[in] ldt * The leading dimension of the array T. ldt >= ib. * * @param tau * Auxiliarry workspace array of length m. * * @param work * Auxiliary workspace array of length ib*m. * * @param[in] lwork * Size of the array work. Should be at least ib*m. * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************/ __attribute__((weak)) int plasma_core_dgelqt(int m, int n, int ib, double *A, int lda, double *T, int ldt, double *tau, double *work) { // Check input arguments. if (m < 0) { plasma_coreblas_error("illegal value of m"); return -1; } if (n < 0) { plasma_coreblas_error("illegal value of n"); return -2; } if ((ib < 0) || ( (ib == 0) && ((m > 0) && (n > 0)) )) { plasma_coreblas_error("illegal value of ib"); return -3; } if (A == NULL) { plasma_coreblas_error("NULL A"); return -4; } if (lda < imax(1, m) && m > 0) { plasma_coreblas_error("illegal value of lda"); return -5; } if (T == NULL) { plasma_coreblas_error("NULL T"); return -6; } if (ldt < imax(1,ib) && ib > 0) { plasma_coreblas_error("illegal value of ldt"); return -7; } if (tau == NULL) { plasma_coreblas_error("NULL tau"); return -8; } if (work == NULL) { plasma_coreblas_error("NULL work"); return -9; } // quick return if (m == 0 || n == 0 || ib == 0) return PlasmaSuccess; int k = imin(m, n); for (int i = 0; i < k; i += ib) { int sb = imin(ib, k-i); LAPACKE_dgelq2_work(LAPACK_COL_MAJOR, sb, n-i, &A[lda*i+i], lda, &tau[i], work); LAPACKE_dlarft_work(LAPACK_COL_MAJOR, lapack_const(PlasmaForward), lapack_const(PlasmaRowwise), n-i, sb, &A[lda*i+i], lda, &tau[i], &T[ldt*i], ldt); if (m > i+sb) { LAPACKE_dlarfb_work(LAPACK_COL_MAJOR, lapack_const(PlasmaRight), lapack_const(PlasmaNoTrans), lapack_const(PlasmaForward), lapack_const(PlasmaRowwise), m-i-sb, n-i, sb, &A[lda*i+i], lda, &T[ldt*i], ldt, &A[lda*i+(i+sb)], lda, work, m-i-sb); } } return PlasmaSuccess; } /******************************************************************************/ void plasma_core_omp_dgelqt(int m, int n, int ib, double *A, int lda, double *T, int ldt, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(inout:A[0:lda*n]) \ depend(out:T[0:ib*m]) // T should be mxib, but is stored // as ibxm { if (sequence->status == PlasmaSuccess) { // Prepare workspaces. int tid = omp_get_thread_num(); double *tau = (double*)work.spaces[tid]; // Call the kernel. int info = plasma_core_dgelqt(m, n, ib, A, lda, T, ldt, tau, tau+m); if (info != PlasmaSuccess) { plasma_error("core_dgelqt() failed"); plasma_request_fail(sequence, request, PlasmaErrorInternal); } } } }
atomic_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // Denis Demidov // #if !defined(KRATOS_ATOMIC_UTILITIES_H_INCLUDED ) #define KRATOS_ATOMIC_UTILITIES_H_INCLUDED // System includes // External includes #ifdef KRATOS_SMP_OPENMP #include <omp.h> #endif // Project includes #include "includes/define.h" #include "containers/array_1d.h" namespace Kratos { ///@addtogroup KratosCore /** * collection of utilities for atomic updates of simple types. (essentially mimics the omp atomic) */ /** * @param target variable being atomically updated by doing target += value * @param value value being added */ template<class TDataType> inline void AtomicAdd(TDataType& target, const TDataType& value) { #pragma omp atomic target += value; } /** * @param target variable being atomically updated by doing target += value * @param value value being added * Specialization for array_1d<double,3> * Note that the update is not really atomic, but rather is done component by component */ inline void AtomicAdd(array_1d<double,3>& target, const array_1d<double,3>& value) { AtomicAdd(target[0], value[0]); AtomicAdd(target[1], value[1]); AtomicAdd(target[2], value[2]); } /** * @param target vector variable being atomically updated by doing target += value * @param value vector value being added * Note that the update is not really atomic, but rather is done component by component */ template<class TVectorType1, class TVectorType2> inline void AtomicAddVector(TVectorType1& target, const TVectorType2& value) { KRATOS_DEBUG_ERROR_IF(target.size() != value.size()) << "vector size mismatch in vector AtomicAddVector- Sizes are: " << target.size() << " for target and " << value.size() << " for value " << std::endl; for(std::size_t i=0; i<target.size(); ++i) { AtomicAdd(target[i], value[i]); } } /** * @param target matrix variable being atomically updated by doing target -= value * @param value matrix value being subtracted * Note that the update is not really atomic, but rather is done component by component */ template<class TMatrixType1, class TMatrixType2> inline void AtomicAddMatrix(TMatrixType1& target, const TMatrixType2& value) { KRATOS_DEBUG_ERROR_IF(target.size1() != value.size1() || target.size2() != value.size2()) << "matrix size mismatch in matrix AtomicAddMatrix- Sizes are: " << target.size1() << "x" << target.size2() << " for target and " << value.size1() << "x" << value.size2() << " for value " << std::endl; for(std::size_t i=0; i<target.size1(); ++i) { for(std::size_t j=0; j<target.size2(); ++j) { AtomicAdd(target(i,j), value(i,j)); } } } /** * @param target vector variable being atomically updated by doing target -= value * @param value vector value being subtracted */ template<class TDataType> inline void AtomicSub(TDataType& target, const TDataType& value) { #pragma omp atomic target -= value; } /** * @param target variable being atomically updated by doing target -= value * @param value value being subtracted * Specialization for array_1d<double,3> * Note that the update is not really atomic, but rather is done component by component */ inline void AtomicSub(array_1d<double,3>& target, const array_1d<double,3>& value) { AtomicSub(target[0], value[0]); AtomicSub(target[1], value[1]); AtomicSub(target[2], value[2]); } /** * @param target vector variable being atomically updated by doing target -= value * @param value vector value being subtracted * Note that the update is not really atomic, but rather is done component by component */ template<class TVectorType1, class TVectorType2> inline void AtomicSubVector(TVectorType1& target, const TVectorType2& value) { KRATOS_DEBUG_ERROR_IF(target.size() != value.size()) << "vector size mismatch in vector AtomicSubVector- Sizes are: " << target.size() << " for target and " << value.size() << " for value " << std::endl; for(std::size_t i=0; i<target.size(); ++i) { AtomicSub(target[i], value[i]); } } /** * @param target matrix variable being atomically updated by doing target -= value * @param value matrix value being subtracted * Note that the update is not really atomic, but rather is done component by component */ template<class TMatrixType1, class TMatrixType2> inline void AtomicSubMatrix(TMatrixType1& target, const TMatrixType2& value) { KRATOS_DEBUG_ERROR_IF(target.size1() != value.size1() || target.size2() != value.size2()) << "matrix size mismatch in matrix AtomicSubMatrix- Sizes are: " << target.size1() << "x" << target.size2() << " for target and " << value.size1() << "x" << value.size2() << " for value " << std::endl; for(std::size_t i=0; i<target.size1(); ++i) { for(std::size_t j=0; j<target.size2(); ++j) { AtomicSub(target(i,j), value(i,j)); } } } /** @param target vector variable being atomically updated by doing target *= value * @param value vector value being multiplied */ template<class TDataType> inline void AtomicMult(TDataType& target, const TDataType& value) { #pragma omp atomic target *= value; } /** @param target variable being atomically updated by doing target *= value * @param value value being multiplied * Specialization for array_1d<double,3> * Note that the update is not really atomic, but rather is done component by component */ inline void AtomicMult(array_1d<double,3>& target, const array_1d<double,3>& value) { AtomicMult(target[0], value[0]); AtomicMult(target[1], value[1]); AtomicMult(target[2], value[2]); } /** @param target vector variable being atomically updated by doing target *= value * @param value vector value being multiplied * Note that the update is not really atomic, but rather is done component by component */ template<class TVectorType1, class TVectorType2> inline void AtomicMultVector(TVectorType1& target, const TVectorType2& value) { KRATOS_DEBUG_ERROR_IF(target.size() != value.size()) << "vector size mismatch in vector AtomicMultVector- Sizes are: " << target.size() << " for target and " << value.size() << " for value " << std::endl; for(std::size_t i=0; i<target.size(); ++i) { AtomicMult(target[i], value[i]); } } /** * @param target matrix variable being atomically updated by doing target *= value * @param value matrix value being multiplied * Note that the update is not really atomic, but rather is done component by component */ template<class TMatrixType1, class TMatrixType2> inline void AtomicMultMatrix(TMatrixType1& target, const TMatrixType2& value) { KRATOS_DEBUG_ERROR_IF(target.size1() != value.size1() || target.size2() != value.size2()) << "matrix size mismatch in matrix AtomicMultMatrix- Sizes are: " << target.size1() << "x" << target.size2() << " for target and " << value.size1() << "x" << value.size2() << " for value " << std::endl; for(std::size_t i=0; i<target.size1(); ++i) { for(std::size_t j=0; j<target.size2(); ++j) { AtomicMult(target(i,j), value(i,j)); } } } /** @param target vector variable being atomically updated by doing target *= 1.0/value * @param value vector value being divided */ template<class TDataType> inline void AtomicDiv(TDataType& target, const TDataType& value) { AtomicMult(target, 1.0/value); } /** @param target variable being atomically updated by doing target *= 1.0/value * @param value value being divided * Specialization for array_1d<double,3> * Note that the update is not really atomic, but rather is done component by component */ inline void AtomicDiv(array_1d<double,3>& target, const array_1d<double,3>& value) { AtomicDiv(target[0], value[0]); AtomicDiv(target[1], value[1]); AtomicDiv(target[2], value[2]); } /** @param target vector variable being atomically updated by doing target *= 1.0/value * @param value vector value being divided * Note that the update is not really atomic, but rather is done component by component */ template<class TVectorType1, class TVectorType2> inline void AtomicDivVector(TVectorType1& target, const TVectorType2& value) { KRATOS_DEBUG_ERROR_IF(target.size() != value.size()) << "vector size mismatch in vector AtomicDivVector- Sizes are: " << target.size() << " for target and " << value.size() << " for value " << std::endl; for(std::size_t i=0; i<target.size(); ++i) { AtomicDiv(target[i], value[i]); } } /** * @param target matrix variable being atomically updated by doing target *= 1.0/value * @param value matrix value being divided * Note that the update is not really atomic, but rather is done component by component */ template<class TMatrixType1, class TMatrixType2> inline void AtomicDivMatrix(TMatrixType1& target, const TMatrixType2& value) { KRATOS_DEBUG_ERROR_IF(target.size1() != value.size1() || target.size2() != value.size2()) << "matrix size mismatch in matrix AtomicDivMatrix- Sizes are: " << target.size1() << "x" << target.size2() << " for target and " << value.size1() << "x" << value.size2() << " for value " << std::endl; for(std::size_t i=0; i<target.size1(); ++i) { for(std::size_t j=0; j<target.size2(); ++j) { AtomicDiv(target(i,j), value(i,j)); } } } } // namespace Kratos. #endif // KRATOS_ATOMIC_UTILITIES_H_INCLUDED defined
Q2_Solution_Batch.h
#pragma once #include <queue> #include <algorithm> #include <cassert> #include <numeric> #include <memory> #include <set> #include "utils.h" #include "load.h" #include "Q2_Solution.h" class Q2_Solution_Batch : public Q2_Solution { protected: static std::vector<uint64_t> convert_score_type_to_comment_id(const std::vector<score_type> &top_scores, const Q2_Input &input) { std::vector<uint64_t> top_scores_vector; top_scores_vector.reserve(top_count); // convert row indices to original comment IDs std::transform(top_scores.rbegin(), top_scores.rend(), std::back_inserter(top_scores_vector), [&input](const auto &score_tuple) { return input.comments[std::get<2>(score_tuple)].comment_id; }); return top_scores_vector; } static inline void compute_score_for_comment(const Q2_Input &input, GrB_Index comment_col, const GrB_Index *likes_comment_array_begin, const GrB_Index *likes_comment_array_end, const GrB_Index *likes_user_array_begin, std::vector<score_type> &top_scores) __attribute__ ((always_inline)) { // find tuple sequences of each comment in row-major array // users liking a comment are stored consecutively auto[likes_comment_begin, likes_comment_end] = std::equal_range(likes_comment_array_begin, likes_comment_array_end, comment_col); if (likes_comment_begin != likes_comment_end) { GrB_Index likes_count = std::distance(likes_comment_begin, likes_comment_end); // get position of first user liking that comment const GrB_Index *likes_user_begin = likes_user_array_begin + std::distance(likes_comment_array_begin, likes_comment_begin); // extract friendships submatrix of users liking the comment GBxx_Object<GrB_Matrix> friends_overlay_graph = GB(GrB_Matrix_new, GrB_BOOL, likes_count, likes_count); ok(GrB_Matrix_extract(friends_overlay_graph.get(), GrB_NULL, GrB_NULL, input.friends_matrix.get(), likes_user_begin, likes_count, likes_user_begin, likes_count, GrB_NULL)); // assuming that all component_ids will be in [0, n) GBxx_Object<GrB_Vector> components_vector = GB(LAGraph_ConnectedComponents, to_LAGraph(std::move(friends_overlay_graph), LAGRAPH_ADJACENCY_UNDIRECTED).get(), nullptr); GrB_Index nvals; #ifndef NDEBUG nvals = input.likes_num; ok(GrB_Vector_nvals(&nvals, components_vector.get())); assert(nvals == likes_count); GrB_Index n; ok(GrB_Vector_size(&n, components_vector.get())); assert(n == likes_count); #endif std::vector<uint64_t> components(likes_count), component_sizes(likes_count); // nullptr: SuiteSparse extension nvals = likes_count; ok(GrB_Vector_extractTuples_UINT64(nullptr, components.data(), &nvals, components_vector.get())); assert(nvals == likes_count); // count size of each component for (auto component_id:components) ++component_sizes[component_id]; std::transform(component_sizes.begin(), component_sizes.end(), component_sizes.begin(), [](uint64_t n) { return n * n; }); uint64_t score = std::accumulate(component_sizes.begin(), component_sizes.end(), uint64_t()); add_score_to_toplist(top_scores, std::make_tuple(score, input.comments[comment_col].timestamp, comment_col)); } } public: using Q2_Solution::Q2_Solution; virtual void compute_score_for_all_comments(const GrB_Index *likes_comment_array_begin, const GrB_Index *likes_comment_array_end, const GrB_Index *likes_user_array_begin, std::vector<score_type> &top_scores) const { int nthreads; ok(LAGraph_GetNumThreads(&nthreads, nullptr)); #pragma omp parallel num_threads(nthreads) { std::vector<score_type> top_scores_local; #pragma omp for schedule(dynamic) for (GrB_Index comment_col = 0; comment_col < input.comments_size(); ++comment_col) { compute_score_for_comment(input, comment_col, likes_comment_array_begin, likes_comment_array_end, likes_user_array_begin, top_scores_local); } #pragma omp critical(Q2_add_score_to_toplist) for (auto score : top_scores_local) { add_score_to_toplist(top_scores, score); } } } std::vector<score_type> calculate_score() { std::vector<score_type> top_scores; std::unique_ptr<GrB_Index[]> likes_trg_comment_columns{new GrB_Index[input.likes_num]}, likes_src_user_columns{new GrB_Index[input.likes_num]}; GrB_Index *likes_comment_array_begin = likes_trg_comment_columns.get(), *likes_comment_array_end = likes_trg_comment_columns.get() + input.likes_num, *likes_user_array_begin = likes_src_user_columns.get(); // nullptr to avoid extracting matrix values (SuiteSparse extension) GrB_Index nvals = input.likes_num; // extract likes edges row-wise, users liking a comment are stored consecutively ok(GrB_Matrix_extractTuples_BOOL(likes_trg_comment_columns.get(), likes_src_user_columns.get(), nullptr, &nvals, input.likes_matrix_tran.get())); assert(nvals == input.likes_num); compute_score_for_all_comments(likes_comment_array_begin, likes_comment_array_end, likes_user_array_begin, top_scores); // if comments with likes are not enough collect comments without like if (top_scores.size() < top_count) { for (GrB_Index comment_col = 0; comment_col < input.comments_size(); ++comment_col) { if (std::none_of(top_scores.begin(), top_scores.end(), [comment_col](auto const &tuple) { return std::get<2>(tuple) == comment_col; })) { // try to add this comment if not present add_score_to_toplist(top_scores, std::make_tuple(0, input.comments[comment_col].timestamp, comment_col)); } } } sort_top_scores(top_scores); return top_scores; } std::vector<uint64_t> initial_calculation() override { return convert_score_type_to_comment_id(calculate_score(), input); } std::vector<uint64_t> update_calculation(int iteration, const Q2_Input::Update_Type &current_updates) override { return convert_score_type_to_comment_id(calculate_score(), input); } };
bench.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> //------------------------------------------------------------------------------------------------------------------------------ #include <omp.h> #ifdef __MPI #include <mpi.h> #endif //------------------------------------------------------------------------------------------------------------------------------ #include "defines.h" #include "box.h" #include "mg.h" #include "operators.h" //------------------------------------------------------------------------------------------------------------------------------ int main(int argc, char **argv){ int MPI_Rank=0; int MPI_Tasks=1; int OMP_Threads = 1; #pragma omp parallel { #pragma omp master { OMP_Threads = omp_get_num_threads(); } } #ifdef __MPI #warning Compiling for MPI... int MPI_threadingModel = -1; //int MPI_threadingModelRequested = MPI_THREAD_SINGLE; //int MPI_threadingModelRequested = MPI_THREAD_SERIALIZED; int MPI_threadingModelRequested = MPI_THREAD_FUNNELED; //int MPI_threadingModelRequested = MPI_THREAD_MULTIPLE; #ifdef __MPI_THREAD_MULTIPLE MPI_threadingModelRequested = MPI_THREAD_MULTIPLE; #endif MPI_Init_thread(&argc, &argv, MPI_threadingModelRequested, &MPI_threadingModel); MPI_Comm_size(MPI_COMM_WORLD, &MPI_Tasks); MPI_Comm_rank(MPI_COMM_WORLD, &MPI_Rank); if(MPI_threadingModel>MPI_threadingModelRequested)MPI_threadingModel=MPI_threadingModelRequested; if(MPI_Rank==0){ if(MPI_threadingModelRequested == MPI_THREAD_MULTIPLE )printf("Requested MPI_THREAD_MULTIPLE, "); else if(MPI_threadingModelRequested == MPI_THREAD_SINGLE )printf("Requested MPI_THREAD_SINGLE, "); else if(MPI_threadingModelRequested == MPI_THREAD_FUNNELED )printf("Requested MPI_THREAD_FUNNELED, "); else if(MPI_threadingModelRequested == MPI_THREAD_SERIALIZED)printf("Requested MPI_THREAD_SERIALIZED, "); else if(MPI_threadingModelRequested == MPI_THREAD_MULTIPLE )printf("Requested MPI_THREAD_MULTIPLE, "); else printf("Requested Unknown MPI Threading Model (%d), ",MPI_threadingModelRequested); if(MPI_threadingModel == MPI_THREAD_MULTIPLE )printf("got MPI_THREAD_MULTIPLE\n"); else if(MPI_threadingModel == MPI_THREAD_SINGLE )printf("got MPI_THREAD_SINGLE\n"); else if(MPI_threadingModel == MPI_THREAD_FUNNELED )printf("got MPI_THREAD_FUNNELED\n"); else if(MPI_threadingModel == MPI_THREAD_SERIALIZED)printf("got MPI_THREAD_SERIALIZED\n"); else if(MPI_threadingModel == MPI_THREAD_MULTIPLE )printf("got MPI_THREAD_MULTIPLE\n"); else printf("got Unknown MPI Threading Model (%d)\n",MPI_threadingModel); fflush(stdout); } #ifdef __MPI_THREAD_MULTIPLE if( (MPI_threadingModelRequested == MPI_THREAD_MULTIPLE) && (MPI_threadingModel != MPI_THREAD_MULTIPLE) ){MPI_Finalize();exit(0);} #endif #endif int log2_subdomain_dim = 6; int subdomains_per_rank_in_i=256 / (1<<log2_subdomain_dim); int subdomains_per_rank_in_j=256 / (1<<log2_subdomain_dim); int subdomains_per_rank_in_k=256 / (1<<log2_subdomain_dim); int ranks_in_i=1; int ranks_in_j=1; int ranks_in_k=1; if(argc==2){ log2_subdomain_dim=atoi(argv[1]); subdomains_per_rank_in_i=256 / (1<<log2_subdomain_dim); subdomains_per_rank_in_j=256 / (1<<log2_subdomain_dim); subdomains_per_rank_in_k=256 / (1<<log2_subdomain_dim); }else if(argc==5){ log2_subdomain_dim=atoi(argv[1]); subdomains_per_rank_in_i=atoi(argv[2]); subdomains_per_rank_in_j=atoi(argv[3]); subdomains_per_rank_in_k=atoi(argv[4]); }else if(argc==8){ log2_subdomain_dim=atoi(argv[1]); subdomains_per_rank_in_i=atoi(argv[2]); subdomains_per_rank_in_j=atoi(argv[3]); subdomains_per_rank_in_k=atoi(argv[4]); ranks_in_i=atoi(argv[5]); ranks_in_j=atoi(argv[6]); ranks_in_k=atoi(argv[7]); }else if(argc!=1){ if(MPI_Rank==0){printf("usage: ./a.out [log2_subdomain_dim] [subdomains per rank in i,j,k] [ranks in i,j,k]\n");} #ifdef __MPI MPI_Finalize(); #endif exit(0); } /* if(log2_subdomain_dim>7){ if(MPI_Rank==0){printf("error, log2_subdomain_dim(%d)>7\n",log2_subdomain_dim);} #ifdef __MPI MPI_Finalize(); #endif exit(0); } */ if(ranks_in_i*ranks_in_j*ranks_in_k != MPI_Tasks){ if(MPI_Rank==0){printf("error, ranks_in_i*ranks_in_j*ranks_in_k(%d*%d*%d=%d) != MPI_Tasks(%d)\n",ranks_in_i,ranks_in_j,ranks_in_k,ranks_in_i*ranks_in_j*ranks_in_k,MPI_Tasks);} #ifdef __MPI MPI_Finalize(); #endif exit(0); } if(MPI_Rank==0)printf("%d MPI Tasks of %d threads\n",MPI_Tasks,OMP_Threads); int subdomain_dim_i=1<<log2_subdomain_dim; int subdomain_dim_j=1<<log2_subdomain_dim; int subdomain_dim_k=1<<log2_subdomain_dim; // fine dim = 128 64 32 16 8 4 // levels = 6 5 4 3 2 1 int log2_coarse_dim = 2; // i.e. coarsen to 4^3 //int log2_coarse_dim = 1; // i.e. coarsen to 2^3 int levels_in_vcycle=1+log2_subdomain_dim-log2_coarse_dim; // ie 1+log2(fine grid size)-log2(bottom grid size) if(MPI_Rank==0){printf("truncating the v-cycle at %d^3 subdomains\n",1<<log2_coarse_dim);fflush(stdout);} //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - int box; domain_type domain_1 ; domain_type domain_CA; int boundary_conditions[3] = {__BOUNDARY_PERIODIC,__BOUNDARY_PERIODIC,__BOUNDARY_PERIODIC}; // i-, j-, and k-directions //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - create_domain(&domain_1 , subdomain_dim_i,subdomain_dim_j,subdomain_dim_k, subdomains_per_rank_in_i,subdomains_per_rank_in_j,subdomains_per_rank_in_k, ranks_in_i,ranks_in_j,ranks_in_k, MPI_Rank, boundary_conditions, __NumGrids,1,levels_in_vcycle); create_domain(&domain_CA, subdomain_dim_i,subdomain_dim_j,subdomain_dim_k, subdomains_per_rank_in_i,subdomains_per_rank_in_j,subdomains_per_rank_in_k, ranks_in_i,ranks_in_j,ranks_in_k, MPI_Rank, boundary_conditions, __NumGrids,4,levels_in_vcycle); double h0=1.0/((double)(domain_1.dim.i)); //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if(MPI_Rank==0){printf("initializing alpha, beta, RHS ...");fflush(stdout);} double a=0.9; double b=0.9; initialize_problem(&domain_1 ,0,h0,a,b); initialize_problem(&domain_CA,0,h0,a,b); if(MPI_Rank==0){printf("done\n");fflush(stdout);} //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - MGBuild(&domain_1 ,a,b,h0); // restrictions, dominant eigenvalue, etc... MGBuild(&domain_CA,a,b,h0); // restrictions, dominant eigenvalue, etc... //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - int s,sMax=2; #ifdef __MPI sMax=4; #endif //Make initial an guess for u(=0)... Solve Lu=f to precision of 1e-10...print the benchmarking timing results... MGResetTimers(&domain_1 );for(s=0;s<sMax;s++){zero_grid(&domain_1 ,0,__u); MGSolve(&domain_1 ,__u,__f,a,b,1e-15);}print_timing(&domain_1 ); MGResetTimers(&domain_CA);for(s=0;s<sMax;s++){zero_grid(&domain_CA,0,__u); MGSolve(&domain_CA,__u,__f,a,b,1e-15);}print_timing(&domain_CA); //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - destroy_domain(&domain_1 ); destroy_domain(&domain_CA); //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #ifdef __MPI MPI_Finalize(); #endif //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - return(0); }
common.h
#pragma once /// MIT License /// Copyright (c) 2021 Thanh Long Le (longlt00502@gmail.com) #if (HAVE_OPENMP) #include <functional> #include <vector> #include <atomic> #include <thread> #include <chrono> #include <fstream> #include <cassert> #include <condition_variable> #include <mutex> #include <cstring> #include <string> #include <omp.h> #include <tll.h> extern bool gPlotting; #define NOP_LOOP() for(int i__=0; i__<0x100; i__++) __asm__("nop") namespace tll::test { template <int num_of_threads> std::pair<size_t, size_t> fifing( const std::function<size_t(int, size_t, size_t, size_t, size_t)> &do_push, /// true == producer const std::function<size_t(int, size_t, size_t, size_t, size_t)> &do_pop, /// true == producer const std::function<bool(int)> &is_prod, /// true == producer const std::function<void()> &do_wait, /// std::this_thread::yield() const std::function<void()> &do_dump, size_t max_size, // std::vector<double> &speed_lst, // std::vector<double> &throughputs, std::vector<double> &time_lst, std::vector<size_t> &tt_count_lst, bool scale=true) { static_assert(num_of_threads > 0); std::atomic<size_t> running_prod{0}, tt_push_count{0}, tt_pop_count{0}, tt_push_size{0}, tt_pop_size{0}; tll::util::Counter<1, std::chrono::duration<double, std::ratio<1, 1>>> counter; /// second // double tt_time; std::atomic<bool> is_done{false}; std::condition_variable cv; std::mutex cv_m; std::thread dumpt = std::thread{[&](){ std::unique_lock<std::mutex> lk(cv_m); while(! cv.wait_for(lk, std::chrono::seconds(10), [&]{return is_done.load();}) ) { do_dump(); } }}; counter.start(); #pragma omp parallel num_threads ( num_of_threads ) { const int kTid = omp_get_thread_num(); size_t loop_num = 0; size_t lc_tt_size = 0; if(is_prod(kTid)) /// Producer { LOGVB("Producer: %s", tll::util::str_tidcpu().data()); running_prod.fetch_add(1); for(;tt_push_size.load(std::memory_order_relaxed) < (max_size);) { size_t ret = do_push(kTid, loop_num, lc_tt_size, tt_push_count.load(std::memory_order_relaxed), tt_push_size.load(std::memory_order_relaxed)); if(ret) { tt_push_count.fetch_add(1, std::memory_order_relaxed); tt_push_size.fetch_add(ret, std::memory_order_relaxed); lc_tt_size+=ret; // NOP_LOOP(); do_wait(); } loop_num++; } running_prod.fetch_add(-1); // LOGD("Prod Done"); } else /// Consumer { LOGVB("Consumer: %s", tll::util::str_tidcpu().data()); for(; running_prod.load(std::memory_order_relaxed) > 0 || (tt_pop_count.load(std::memory_order_relaxed) < tt_push_count.load(std::memory_order_relaxed)) || (tt_pop_size.load(std::memory_order_relaxed) < max_size) ;) { size_t ret = do_pop(kTid, loop_num, lc_tt_size, tt_pop_count.load(std::memory_order_relaxed), tt_pop_size.load(std::memory_order_relaxed)); if(ret) { tt_pop_count.fetch_add(1, std::memory_order_relaxed); tt_pop_size.fetch_add(ret, std::memory_order_relaxed); lc_tt_size+=ret; // NOP_LOOP(); do_wait(); } loop_num++; } // LOGD("Cons Done"); } LOGVB(" - Done: %s", tll::util::str_tidcpu().data()); } double tt_time = counter.elapsed().count(); if(do_push) { tt_count_lst.push_back(tt_push_count.load()); // speed_lst.push_back(tt_push_size.load() / tt_time); // throughputs.push_back(tt_push_count.load() / tt_time); } if(do_pop) { tt_count_lst.push_back(tt_pop_count.load()); // speed_lst.push_back(tt_pop_size.load() / tt_time); // throughputs.push_back(tt_push_count.load() / tt_time); } if(scale) tt_time /= num_of_threads; time_lst.push_back(tt_time); // LOGD("%f", tt_time); is_done.store(true); cv.notify_all(); dumpt.join(); return {tt_push_size.load(), tt_pop_size.load()}; } template <typename T> void plot_data(const std::string &file_name, const std::string &title, const std::string &y_title, std::vector<std::string> &column_lst, const std::vector<int> &x_axes, const std::vector<T> &data) { if(!gPlotting) return; std::ofstream ofs{file_name, std::ios::out | std::ios::binary}; assert(ofs.is_open()); // assert(x_axes.size() == column_lst.size()); // assert(x_axes.size() * column_lst.size() == data.size()); size_t col_size = data.size() / x_axes.size(); if(column_lst.size() < col_size) { for(size_t i=column_lst.size(); i<col_size; i++) { column_lst.push_back(std::to_string(i)); } } // ofs << tll::util::stringFormat("#%d\n", column_lst.size()); // ofs << tll::util::stringFormat("#%d\n", NUM_CPU); // auto col_size = column_lst.size(); // auto x_size = data.size() / col_size; ofs << "#" << col_size << "\n"; ofs << "#" << NUM_CPU << "\n"; ofs << "#" << title << "\n"; ofs << "#" << y_title << "\n"; ofs << "#Number of threads\n"; ofs << "\"\" "; for(int i=0; i<col_size; i++) { ofs << "\"" << column_lst[i] << "\" "; } ofs << "\n"; // LOGD("%ld %ld", col_size, x_axes.size()); for(int i=0; i<x_axes.size(); i++) { ofs << x_axes[i] << " "; for(int j=0; j<col_size; j++) ofs << data[i*col_size + j] << " "; ofs << "\n"; } ofs.flush(); // return; int cmd = 0; cmd = system(tll::util::stringFormat("./plothist.sh %s", file_name.data()).data()); cmd = system(tll::util::stringFormat("./plot.sh %s", file_name.data()).data()); } } /// tll::test #endif
task_reduction1.c
// RUN: %libomp-compile-and-run // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8 #include <stdio.h> #include <stdlib.h> int a = 0, b = 1; int main(int argc, char **argv) { #pragma omp parallel #pragma omp single { #pragma omp taskgroup task_reduction(+: a) task_reduction(*: b) { int i; for (i = 1; i <= 5; ++i) { #pragma omp task in_reduction(+: a) in_reduction(*: b) { a += i; b *= i; #pragma omp task in_reduction(+: a) { a += i; } } } } } if (a != 30) { fprintf(stderr, "error: a != 30. Instead a = %d\n", a); exit(EXIT_FAILURE); } if (b != 120) { fprintf(stderr, "error: b != 120. Instead b = %d\n", b); exit(EXIT_FAILURE); } return EXIT_SUCCESS; }
task-create.c
/* Copyright (c) 2015-2019, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Simone Atzeni (simone@cs.utah.edu), Joachim Protze (joachim.protze@tu-dresden.de), Jonas Hahnfeld (hahnfeld@itc.rwth-aachen.de), Ganesh Gopalakrishnan, Zvonimir Rakamaric, Dong H. Ahn, Gregory L. Lee, Ignacio Laguna, and Martin Schulz. LLNL-CODE-773957 All rights reserved. This file is part of Archer. For details, see https://pruners.github.io/archer. Please also read https://github.com/PRUNERS/archer/blob/master/LICENSE. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // RUN: %libarcher-compile-and-run | FileCheck %s #include <omp.h> #include <stdio.h> #include <unistd.h> int main(int argc, char* argv[]) { int var = 0; #pragma omp parallel num_threads(2) shared(var) #pragma omp master { var++; #pragma omp task shared(var) { var++; } // Give other thread time to steal the task. sleep(1); } fprintf(stderr, "DONE\n"); int error = (var != 2); return error; } // CHECK: DONE
fill_r_3c.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <stdio.h> #include <complex.h> #include "config.h" #include "cint.h" int GTOmax_shell_dim(int *ao_loc, int *shls_slice, int ncenter); int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env); /* * out[naoi,naoj,naok,comp] in F-order */ void GTOr3c_fill_s1(int (*intor)(), double complex *out, double complex *buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t nij = naoi * naoj; const int dims[] = {naoi, naoj, naok}; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += jp * naoi + ip; int ksh, k0; int shls[3]; shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { shls[2] = ksh; k0 = ao_loc[ksh ] - ao_loc[ksh0]; (*intor)(out+k0*nij, dims, shls, atm, natm, bas, nbas, env, cintopt, buf); } } static void zcopy_s2_igtj(double complex *out, double complex *in, int comp, int ip, int nij, int nijk, int di, int dj, int dk) { const size_t dij = di * dj; const size_t ip1 = ip + 1; int i, j, k, ic; double complex *pout, *pin; for (ic = 0; ic < comp; ic++) { for (k = 0; k < dk; k++) { pout = out + k * nij; pin = in + k * dij; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pout[j] = pin[j*di+i]; } pout += ip1 + i; } } out += nijk; in += dij * dk; } } static void zcopy_s2_ieqj(double complex *out, double complex *in, int comp, int ip, int nij, int nijk, int di, int dj, int dk) { const size_t dij = di * dj; const size_t ip1 = ip + 1; int i, j, k, ic; double complex *pout, *pin; for (ic = 0; ic < comp; ic++) { for (k = 0; k < dk; k++) { pout = out + k * nij; pin = in + k * dij; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { pout[j] = pin[j*di+i]; } pout += ip1 + i; } } out += nijk; in += dij * dk; } } /* * out[comp,naok,nij] in C-order * nij = i1*(i1+1)/2 - i0*(i0+1)/2 * [ \ ] * [**** ] * [***** ] * [*****. ] <= . may not be filled, if jsh-upper-bound < ish-upper-bound * [ \] */ void GTOr3c_fill_s2ij(int (*intor)(), double complex *out, double complex *buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { return; } const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const int i0 = ao_loc[ish0]; const int i1 = ao_loc[ish1]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t off = i0 * (i0 + 1) / 2; const size_t nij = i1 * (i1 + 1) / 2 - off; const size_t nijk = nij * naok; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; out += ip * (ip + 1) / 2 - off + jp; int ksh, dk, k0; int shls[3]; dk = GTOmax_shell_dim(ao_loc, shls_slice, 3); double *cache = (double *)(buf + di * dj * dk * comp); shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { shls[2] = ksh; dk = ao_loc[ksh+1] - ao_loc[ksh]; k0 = ao_loc[ksh ] - ao_loc[ksh0]; (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache); if (ip != jp) { zcopy_s2_igtj(out+k0*nij, buf, comp, ip, nij, nijk, di, dj, dk); } else { zcopy_s2_ieqj(out+k0*nij, buf, comp, ip, nij, nijk, di, dj, dk); } } } void GTOr3c_fill_s2jk(int (*intor)(), double complex *out, double complex *buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { fprintf(stderr, "GTOr3c_fill_s2jk not implemented\n"); exit(1); } void GTOr3c_drv(int (*intor)(), void (*fill)(), double complex *eri, int comp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const int di = GTOmax_shell_dim(ao_loc, shls_slice, 3); const int cache_size = GTOmax_cache_size(intor, shls_slice, 3, atm, natm, bas, nbas, env); #pragma omp parallel { int ish, jsh, ij; double complex *buf = malloc(sizeof(double complex) * (di*di*di*comp + cache_size/2)); #pragma omp for schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { ish = ij / njsh; jsh = ij % njsh; (*fill)(intor, eri, buf, comp, ish, jsh, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } free(buf); } }
openMP.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> int someMath(int num,int i){ num = num * num * num * i; return num; } int main(int argc, char** argv){ int nthreads, tid; int i; int num = atoi(argv[1]); int data[10000]; #pragma omp parallel { #pragma omp for for(i=0; i<10000; i++){ data[i] = someMath(num,i); } } FILE *fp; fp = fopen("data.csv","w+"); for(i=0; i<10000; i++){ fprintf(fp, "%d,",data[i]); } fprintf(fp, "\n"); for(i=0; i<10000; i++){ fprintf(fp, "%d,",data[i]); } fprintf(fp, "\n"); fclose(fp); }
DRB012-minusminus-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The -- operation is not protected, causing race condition. Data race pair: numNodes2@75 vs. numNodes2@75 */ #include <stdlib.h> int main(int argc, char* argv[]) { int i; int len=100; if (argc>1) len = atoi(argv[1]); int numNodes=len, numNodes2=0; int x[len]; for (i=0; i< len; i++) { if (i%2==0) x[i]=5; else x[i]= -5; } #pragma omp parallel for schedule(dynamic) for (i=numNodes-1 ; i>-1 ; --i) { if (x[i]<=0) { numNodes2-- ; } } return 0; }
kdtree_eigen.h
/* kdtree_eigen.h * * Author: Fabian Meyer * Created On: 08 Nov 2018 * License: MIT * * Implementation is based on cKDtree of the scipy project and the splitting * midpoint rule described in "Analysis of Approximate Nearest Neighbor * Searching with Clustered Point Sets" by Songrit Maneewongvatana and David M. * Mount. */ #ifndef KDT_KDTREE_EIGEN_H_ #define KDT_KDTREE_EIGEN_H_ #include <Eigen/Geometry> #include <vector> #include <algorithm> namespace kdt { /** Functor for manhatten distance. This the same as the L1 minkowski * distance but more efficient.*/ template <typename Scalar> struct ManhattenDistance { typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector; typedef typename Vector::Index Index; Scalar unrooted(const Scalar *vecA, const Scalar *vecB, const Index len) const { Scalar result = 0.0; for(Index i = 0; i < len; ++i) { Scalar diff = vecA[i] - vecB[i]; result += power(diff); } return result; } Scalar rooted(const Scalar *vecA, const Scalar *vecB, const Index len) const { return unrooted(vecA, vecB, len); } Scalar root(const Scalar val) const { return val; } Scalar power(const Scalar val) const { return std::abs(val); } }; /** Functor for euclidean distance. This the same as the L2 minkowski * distance but more efficient. */ template <typename Scalar> struct EuclideanDistance { typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector; typedef typename Vector::Index Index; Scalar unrooted(const Scalar *vecA, const Scalar *vecB, const Index len) const { Scalar result = 0.0; for(Index i = 0; i < len; ++i) { Scalar diff = vecA[i] - vecB[i]; result += power(diff); } return result; } Scalar rooted(const Scalar *vecA, const Scalar *vecB, const Index len) const { return root(unrooted(vecA, vecB, len)); } Scalar root(const Scalar val) const { return std::sqrt(val); } Scalar power(const Scalar val) const { return val * val; } }; /** Functor for general minkowski distance. The infinite version is only * available through the ChebyshevDistance functor. */ template <typename Scalar, int P> struct MinkowskiDistance { typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector; typedef typename Vector::Index Index; Scalar unrooted(const Scalar *vecA, const Scalar *vecB, const Index len) const { Scalar result = 0.0; for(Index i = 0; i < len; ++i) { Scalar diff = vecA[i] - vecB[i]; result += power(diff); } return result; } Scalar rooted(const Scalar *vecA, const Scalar *vecB, const Index len) const { return root(unrooted(vecA, vecB, len)); } Scalar root(const Scalar val) const { return std::pow(val, 1.0 / static_cast<Scalar>(P)); } Scalar power(const Scalar val) const { Scalar result = std::abs(val); for(int i = 1; i < P; ++i) result *= result; return result; } }; /** Chebyshev distance functor. This distance is the same as infinity * minkowski distance. */ template<typename Scalar> struct ChebyshevDistance { typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector; typedef typename Vector::Index Index; Scalar unrooted(const Scalar *vecA, const Scalar *vecB, const Index len) const { Scalar result = 0.0; for(Index i = 0; i < len; ++i) { Scalar diff = vecA[i] - vecB[i]; result = std::max(result, power(diff)); } return result; } Scalar rooted(const Scalar *vecA, const Scalar *vecB, const Index len) const { return root(unrooted(vecA, vecB, len)); } Scalar root(const Scalar val) const { return val; } Scalar power(const Scalar val) const { return std::abs(val); } }; /** Class for performing k nearest neighbour searches. */ template<typename Scalar, typename Distance=EuclideanDistance<Scalar>> class KDTree { public: typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix; typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector; typedef typename Matrix::Index Index; typedef Eigen::Matrix<Index, Eigen::Dynamic, Eigen::Dynamic> MatrixI; typedef Eigen::Matrix<Index, Eigen::Dynamic, 1> VectorI; /** Struct representing a node in the KDTree. * It can be either a inner node or a leaf node. */ struct Node { /** Indices of data points in this leaf node. */ Index startIdx; Index length; /** Left child of this inner node. */ Index left; /** Right child of this inner node. */ Index right; /** Axis of the axis aligned splitting hyper plane. */ Index splitaxis; /** Translation of the axis aligned splitting hyper plane. */ Scalar splitpoint; Node() : startIdx(0), length(0), left(-1), right(-1), splitaxis(-1), splitpoint(0) { } /** Constructor for leaf nodes */ Node(const Index startIdx, const Index length) : startIdx(startIdx), length(length), left(-1), right(-1), splitaxis(-1), splitpoint(0) { } /** Constructor for inner nodes */ Node(const Index splitaxis, const Scalar splitpoint, const Index left, const Index right) : startIdx(0), length(0), left(left), right(right), splitaxis(splitaxis), splitpoint(splitpoint) { } bool isLeaf() const { return !hasLeft() && !hasRight(); } bool isInner() const { return hasLeft() && hasRight(); } bool hasLeft() const { return left >= 0; } bool hasRight() const { return right >= 0; } }; private: class QueryHeap { private: Index *indices_; Scalar *distances_; size_t maxSize_; size_t size_; public: QueryHeap(Index *indices, Scalar *distances, const size_t maxSize) : indices_(indices), distances_(distances), maxSize_(maxSize), size_(0) { } void push(const Index idx, const Scalar dist) { if(full()) throw std::runtime_error("heap is full"); // add new value at the end indices_[size_] = idx; distances_[size_] = dist; ++size_; // upheap size_t k = size_ - 1; while(k > 0 && distances_[(k - 1) / 2] < dist) { size_t tmp = (k - 1) / 2; distances_[k] = distances_[tmp]; indices_[k] = indices_[tmp]; k = tmp; } distances_[k] = dist; indices_[k] = idx; } void pop() { if(empty()) throw std::runtime_error("heap is empty"); // replace first element with last distances_[0] = distances_[size_-1]; indices_[0] = indices_[size_-1]; --size_; // downheap size_t k = 0; Scalar dist = distances_[0]; Index idx = indices_[0]; while(2 * k + 1 < size_) { size_t j = 2 * k + 1; if(j + 1 < size_ && distances_[j+1] > distances_[j]) ++j; // j references now greates child if(dist >= distances_[j]) break; distances_[k] = distances_[j]; indices_[k] = indices_[j]; k = j; } distances_[k] = dist; indices_[k] = idx; } Scalar front() const { if(empty()) throw std::runtime_error("heap is empty"); return distances_[0]; } bool full() const { return size_ >= maxSize_; } bool empty() const { return size_ == 0; } size_t size() const { return size_; } void clear() { size_ = 0; } void sort() { size_t cnt = size_; for(size_t i = 0; i < cnt; ++i) { Index idx = indices_[0]; Scalar dist = distances_[0]; pop(); indices_[cnt - i - 1] = idx; distances_[cnt - i - 1] = dist; } } }; Matrix dataCopy_; const Matrix *data_; std::vector<Index> indices_; std::vector<Node> nodes_; Index bucketSize_; bool sorted_; bool compact_; bool balanced_; bool takeRoot_; int threads_; Scalar maxDist_; Scalar maxDistP_; Distance distance_; /** Finds the minimum and maximum values of each dimension (row) in the * data matrix. Only respects the columns specified by the index * vector. */ void findDataMinMax(const Index startIdx, const Index length, Vector &mins, Vector &maxes) const { assert(length > 0); assert(startIdx >= 0); assert(static_cast<size_t>(startIdx + length) <= indices_.size()); const Matrix &data = *data_; // initialize mins and maxes with first element of currIndices mins = data.col(indices_[startIdx]); maxes = mins; // search for min / max values in data for(Index i = 0; i < length; ++i) { // retrieve data index Index col = indices_[startIdx + i]; assert(col >= 0 && col < data.cols()); // check min and max for each dimension individually for(Index j = 0; j < data.rows(); ++j) { Scalar val = data(j, col); mins(j) = val < mins(j) ? val : mins(j); maxes(j) = val > maxes(j) ? val : maxes(j); } } } Index buildInnerNode(const Index startIdx, const Index length, const Vector &mins, const Vector &maxes) { assert(startIdx >= 0); assert(length > 0); assert(static_cast<size_t>(startIdx + length) <= indices_.size()); assert(maxes.size() == mins.size()); const Matrix &data = *data_; // create node Index nodeIdx = nodes_.size(); nodes_.push_back(Node()); // search for axis with longest distance Index splitaxis = 0; Scalar splitsize = 0; for(Index i = 0; i < maxes.size(); ++i) { Scalar diff = maxes(i) - mins(i); if(diff > splitsize) { splitaxis = i; splitsize = diff; } } // retrieve the corresponding values Scalar minval = mins(splitaxis); Scalar maxval = maxes(splitaxis); // check if min and max are the same // this basically means that all data points are the same if(minval == maxval) { nodes_[nodeIdx] = Node(startIdx, length); return nodeIdx; } // determine split point Scalar splitpoint; // check if tree should be balanced if(balanced_) { // use median for splitpoint auto compPred = [&data, splitaxis](const Index lhs, const Index rhs) { return data(splitaxis, lhs) < data(splitaxis, rhs); }; std::sort(indices_.begin() + startIdx, indices_.begin() + startIdx + length, compPred); Index idx = indices_[startIdx + length / 2]; splitpoint = data(splitaxis, idx); } else { // use sliding midpoint rule splitpoint = (minval + maxval) / 2; } Index leftIdx = startIdx; Index rightIdx = startIdx + length - 1; while(leftIdx <= rightIdx) { Scalar leftVal = data(splitaxis, indices_[leftIdx]); Scalar rightVal = data(splitaxis, indices_[rightIdx]); if(leftVal < splitpoint) { // left value is less than split point // keep it on left side ++leftIdx; } else if(rightVal >= splitpoint) { // right value is greater than split point // keep it on right side --rightIdx; } else { // right value is less than splitpoint and left value is // greater than split point // simply swap sides Index tmpIdx = indices_[leftIdx]; indices_[leftIdx] = indices_[rightIdx]; indices_[rightIdx] = tmpIdx; ++leftIdx; --rightIdx; } } if(leftIdx == startIdx) { // no values on left side, resolve trivial split // find value with minimum distance to splitpoint Index minIdx = startIdx; splitpoint = data(splitaxis, indices_[minIdx]); for(Index i = 0; i < length; ++i) { Index idx = startIdx + i; Scalar val = data(splitaxis, indices_[idx]); if(val < splitpoint) { minIdx = idx; splitpoint = val; } } // put value with minimum distance on the left // this way there is exactly one value on the left Index tmpIdx = indices_[startIdx]; indices_[startIdx] = indices_[minIdx]; indices_[minIdx] = tmpIdx; leftIdx = startIdx + 1; rightIdx = startIdx; } else if(leftIdx == startIdx + length) { // no values on right side, resolve trivial split // find value with maximum distance to splitpoint Index maxIdx = startIdx + length - 1; splitpoint = data(splitaxis, indices_[maxIdx]); for(Index i = 0; i < length; ++i) { Index idx = startIdx + i; Scalar val = data(splitaxis, indices_[idx]); if(val > splitpoint) { maxIdx = idx; splitpoint = val; } } // put value with maximum distance on the right // this way there is exactly one value on the right Index tmpIdx = indices_[startIdx + length - 1]; indices_[startIdx + length - 1] = indices_[maxIdx]; indices_[maxIdx] = tmpIdx; leftIdx = startIdx + length - 1; rightIdx = startIdx + length - 2; } Index leftNode = -1; Index rightNode = -1; Index leftStart = startIdx; Index rightStart = leftIdx; Index leftLength = leftIdx - startIdx; Index rightLength = length - leftLength; if(compact_) { // recompute mins and maxes to make the tree more compact Vector minsN, maxesN; findDataMinMax(leftStart, leftLength, minsN, maxesN); leftNode = buildR(leftStart, leftLength, minsN, maxesN); findDataMinMax(rightStart, rightLength, minsN, maxesN); rightNode = buildR(rightStart, rightLength, minsN, maxesN); } else { // just re-use mins and maxes, but set splitaxies to value of // splitpoint Vector mids(maxes.size()); for(Index i = 0; i < maxes.size(); ++i) mids(i) = maxes(i); mids(splitaxis) = splitpoint; leftNode = buildR(leftStart, leftLength, mins, mids); for(Index i = 0; i < mins.size(); ++i) mids(i) = mins(i); mids(splitaxis) = splitpoint; rightNode = buildR(rightStart, rightLength, mids, maxes); } nodes_[nodeIdx] = Node(splitaxis, splitpoint, leftNode, rightNode); return nodeIdx; } Index buildR(const Index startIdx, const Index length, const Vector &mins, const Vector &maxes) { // check for base case if(length <= bucketSize_) { nodes_.push_back(Node(startIdx, length)); return nodes_.size() - 1; } else return buildInnerNode(startIdx, length, mins, maxes); } void queryLeafNode(const Node &node, const Scalar *queryPoint, const Index len, QueryHeap &dataHeap) const { assert(node.isLeaf()); const Matrix &data = *data_; const Scalar *dataRaw = data.data(); // go through all points in this leaf node and do brute force search for(Index i = 0; i < node.length; ++i) { size_t idx = node.startIdx + i; assert(idx < indices_.size()); // retrieve index of the current data point Index dataIdx = indices_[idx]; const Scalar *dataPoint = &dataRaw[dataIdx * len]; Scalar dist = distance_.unrooted(queryPoint, dataPoint, len); // check if point is in range if max distance was set bool isInRange = maxDist_ <= 0 || dist < maxDistP_; // check if this node was an improvement if heap is already full bool isImprovement = !dataHeap.full() || dist < dataHeap.front(); if(isInRange && isImprovement) { if(dataHeap.full()) dataHeap.pop(); dataHeap.push(dataIdx, dist); } } } void queryInnerNode(const Node &node, const Scalar *queryPoint, const Index len, QueryHeap &dataHeap) const { assert(node.isInner()); Scalar splitval = queryPoint[node.splitaxis]; // check if right or left child should be visited bool visitRight = splitval >= node.splitpoint; if(visitRight) queryR(nodes_[node.right], queryPoint, len, dataHeap); else queryR(nodes_[node.left], queryPoint, len, dataHeap); // get distance to split point Scalar splitdist = distance_.power(splitval - node.splitpoint); // check if node is in range if max distance was set bool isInRange = maxDist_ <= 0 || splitdist < maxDistP_; // check if this node was an improvement if heap is already full bool isImprovement = !dataHeap.full() || splitdist < dataHeap.front(); if(isInRange && isImprovement) { if(visitRight) queryR(nodes_[node.left], queryPoint, len, dataHeap); else queryR(nodes_[node.right], queryPoint, len, dataHeap); } } void queryR(const Node &node, const Scalar *queryPoint, const Index len, QueryHeap &dataHeap) const { if(node.isLeaf()) queryLeafNode(node, queryPoint, len, dataHeap); else queryInnerNode(node, queryPoint, len, dataHeap); } Index depthR(const Node &node) const { if(node.isLeaf()) return 1; else { Index left = depthR(nodes_[node.left]); Index right = depthR(nodes_[node.right]); return std::max(left, right) + 1; } } public: /** Constructs an empty KDTree. */ KDTree() : dataCopy_(), data_(nullptr), indices_(), nodes_(), bucketSize_(16), sorted_(true), compact_(true), balanced_(false), takeRoot_(true), threads_(0), maxDist_(0), maxDistP_(0), distance_() { } /** Constructs KDTree with the given data. This does not build the * the index of the tree. * * @param data NxM matrix, M points of dimension N * @param copy if true copies the data, otherwise assumes static data */ KDTree(const Matrix &data, const bool copy=false) : KDTree() { setData(data, copy); } ~KDTree() { } /** Set the maximum amount of data points per leaf in the tree (aka * bucket size). * * @param bucketSize amount of points per leaf. */ void setBucketSize(const Index bucketSize) { bucketSize_ = bucketSize; } /** Set if the points returned by the queries should be sorted * according to their distance to the query points. * * @param sorted sort query results */ void setSorted(const bool sorted) { sorted_ = sorted; } /** Set if the tree should be built as balanced as possible. * * This increases build time, but decreases search time. * @param balanced set true to build a balanced tree */ void setBalanced(const bool balanced) { balanced_ = balanced; } /** Set if the distances after the query should be rooted or not. * * Taking the root of the distances increases query time, but the * function will return true distances instead of their powered * versions. * @param takeRoot set true if root should be taken else false */ void setTakeRoot(const bool takeRoot) { takeRoot_ = takeRoot; } /** Set if the tree should be built with compact leaf nodes. * * This increases build time, but makes leaf nodes denser (more) * points. Thus less visits are necessary. * @param compact set true ti build a tree with compact leafs */ void setCompact(const bool compact) { compact_ = compact; } /** Set the amount of threads that should be used for building and * querying the tree. * * OpenMP has to be enabled for this to work. * @param threads amount of threads, 0 for optimal choice */ void setThreads(const unsigned int threads) { threads_ = threads; } /** Set the maximum distance for querying the tree. * * The search will be pruned if the maximum distance is set to any * positive number. * @param maxDist maximum distance, <= 0 for no limit */ void setMaxDistance(const Scalar maxDist) { maxDist_ = maxDist; maxDistP_ = distance_.power(maxDist); } /** Set the data points used for this tree. * * This does not build the tree. * @param data NxM matrix, M points of dimension N * @param copy if true data is copied, assumes static data otherwise */ void setData(const Matrix &data, const bool copy = false) { clear(); if(copy) { dataCopy_ = data; data_ = &dataCopy_; } else { data_ = &data; } } /** Builds the search index of the tree. * * Data has to be set and must be non-empty. */ void build() { if(data_ == nullptr) throw std::runtime_error("cannot build KDTree; data not set"); if(data_->size() == 0) throw std::runtime_error("cannot build KDTree; data is empty"); clear(); indices_.resize(data_->cols()); for(size_t i = 0; i < indices_.size(); ++i) indices_[i] = i; Vector mins, maxes; Index startIdx = 0; Index length = indices_.size(); findDataMinMax(startIdx, length, mins, maxes); Index root = buildR(startIdx, length, mins, maxes); } /** Queries the tree for the nearest neighbours of the given query * points. * * The tree has to be built before it can be queried. * * The query points have to have the same dimension as the data points * of the tree. * * The result matrices will be resized appropriatley. * Indices and distances will be set to -1 if less than knn neighbours * were found. * * @param queryPoints NxM matrix, M points of dimension N * @param knn amount of neighbours to be found * @param indices KNNxM matrix, indices of neighbours in the data set * @param distances KNNxM matrix, distance between query points and * neighbours */ void query(const Matrix &queryPoints, const size_t knn, MatrixI &indices, Matrix &distances) const { if(nodes_.size() == 0) throw std::runtime_error("cannot query KDTree; not built yet"); if(queryPoints.rows() != dimension()) throw std::runtime_error("cannot query KDTree; data and query points do not have same dimension"); Index queryNum = queryPoints.cols(); Index dim = queryPoints.rows(); distances.setConstant(knn, queryNum, -1); indices.setConstant(knn, queryNum, -1); const Scalar *queryPointsRaw = queryPoints.data(); Index *indicesRaw = indices.data(); Scalar *distsRaw = distances.data(); #pragma omp parallel for num_threads(threads_) for(Index i = 0; i < queryNum; ++i) { const Scalar *queryPoint = &queryPointsRaw[i * dim]; Scalar *distPoint = &distsRaw[i * knn]; Index *idxPoint = &indicesRaw[i * knn]; // create heap to find nearest neighbours QueryHeap dataHeap(idxPoint, distPoint, knn); queryR(nodes_[0], queryPoint, dim, dataHeap); if(sorted_) dataHeap.sort(); if(takeRoot_) { for(size_t j = 0; j < knn; ++j) { if(distPoint[j] < 0) break; distPoint[j] = distance_.root(distPoint[j]); } } } } /** Clears the tree. */ void clear() { nodes_.clear(); } /** Returns the amount of data points stored in the search index. * * @return number of data points */ Index size() const { return data_ == nullptr ? 0 : data_->cols(); } /** Returns the dimension of the data points in the search index. * * @return dimension of data points */ Index dimension() const { return data_ == nullptr ? 0 : data_->rows(); } /** Returns the maxximum depth of the tree. * * @return maximum depth of the tree */ Index depth() const { return nodes_.size() == 0 ? 0 : depthR(nodes_.front()); } const std::vector<Node>& nodes() { return nodes_; } }; typedef KDTree<float> KDTreef; typedef KDTree<double> KDTreed; } #endif
GB_binop__iseq_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__iseq_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__iseq_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__iseq_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__iseq_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_fp32) // A*D function (colscale): GB (_AxD__iseq_fp32) // D*A function (rowscale): GB (_DxB__iseq_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__iseq_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__iseq_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_fp32) // C=scalar+B GB (_bind1st__iseq_fp32) // C=scalar+B' GB (_bind1st_tran__iseq_fp32) // C=A+scalar GB (_bind2nd__iseq_fp32) // C=A'+scalar GB (_bind2nd_tran__iseq_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_FP32 || GxB_NO_ISEQ_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__iseq_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__iseq_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__iseq_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__iseq_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__iseq_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__iseq_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__iseq_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__iseq_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__iseq_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__iseq_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__iseq_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__iseq_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__iseq_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__iseq_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__bnot_int8_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__bnot_int8_int8) // op(A') function: GB (_unop_tran__bnot_int8_int8) // C type: int8_t // A type: int8_t // cast: int8_t cij = aij // unaryop: cij = ~(aij) #define GB_ATYPE \ int8_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ~(x) ; // casting #define GB_CAST(z, aij) \ int8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = aij ; \ Cx [pC] = ~(z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BNOT || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__bnot_int8_int8) ( int8_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = ~(z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = ~(z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__bnot_int8_int8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
point_point_search.h
// // Project Name: Kratos // Last Modified by: $Author: clabra $ // Date: $Date: 2007-03-29 19:37:47 $ // Revision: $Revision: 1.2 $ // // #if !defined(KRATOS_POINT_POINT_SEARCH_H_INCLUDED) #define KRATOS_POINT_POINT_SEARCH_H_INCLUDED // System includes #include <string> #include <iostream> // include kratos definitions #include "includes/define.h" // Project includes #include "utilities/openmp_utils.h" // Configures #include "spatial_containers/spatial_search.h" #include "point_configure.h" // Search #include "spatial_containers/bins_dynamic_objects.h" #include "spatial_containers/bins_dynamic.h" // External includes /* Timer defines */ #include "utilities/timer.h" #ifdef CUSTOMTIMER #define KRATOS_TIMER_START(t) Timer::Start(t); #define KRATOS_TIMER_STOP(t) Timer::Stop(t); #else #define KRATOS_TIMER_START(t) #define KRATOS_TIMER_STOP(t) #endif namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// Short class definition. /** Detail class definition. */ class PointPointSearch: public SpatialSearch { public: ///@name Type Definitions ///@{ /// Pointer definition of PointPointSearch KRATOS_CLASS_POINTER_DEFINITION(PointPointSearch); typedef PointType* PointPointerType; typedef std::vector<PointPointerType>* PointVector; typedef std::vector<PointPointerType>::iterator PointIterator; typedef double* DistanceVector; typedef double* DistanceIterator; //Configure Types typedef PointConfigure<3> PointConfigureType; //Bin Types typedef BinsObjectDynamic<PointConfigureType> PointBinsType; typedef PointerVectorSet<Point, IndexedObject> PointSetType; ///@} ///@name Life Cycle ///@{ /// Default constructor. PointPointSearch(){} /// Destructor. ~PointPointSearch(){} void SearchPointsImplementation( NodesContainerType const& r_nodes, NodesContainerType const& r_nodes_to_find, RadiusArrayType const& radius, VectorResultNodesContainerType& r_results, VectorDistanceType& r_results_distances) { KRATOS_TRY int max_n_of_neigh_nodes = r_nodes_to_find.size(); NodesContainerType::ContainerType& nodes = const_cast <NodesContainerType::ContainerType&> (r_nodes.GetContainer()); NodesContainerType::ContainerType& nodes_to_find = const_cast <NodesContainerType::ContainerType&> (r_nodes_to_find.GetContainer()); PointSetType::ContainerType nodes_temp; PointSetType::ContainerType nodes_to_find_temp; nodes_temp.reserve(nodes.size()); nodes_to_find_temp.reserve(nodes_to_find.size()); for (NodesContainerType::ContainerType::iterator it = nodes.begin(); it != nodes.end(); ++it){ nodes_temp.push_back(*it); } for (NodesContainerType::ContainerType::iterator it = nodes_to_find.begin(); it != nodes_to_find.end(); ++it){ nodes_to_find_temp.push_back(*it); } PointBinsType bins(nodes_to_find_temp.begin(), nodes_to_find_temp.end()); #pragma omp parallel { PointSetType::ContainerType local_results(max_n_of_neigh_nodes); DistanceType local_results_distances(max_n_of_neigh_nodes); std::size_t n_of_results = 0; #pragma omp for for (int i = 0; i < static_cast<int>(nodes.size()); ++i){ PointSetType::ContainerType::iterator i_results_begin = local_results.begin(); DistanceType::iterator i_distances_results_begin = local_results_distances.begin(); n_of_results = bins.SearchObjectsInRadiusExclusive(nodes_temp[i], radius[i], i_results_begin, i_distances_results_begin, max_n_of_neigh_nodes); r_results[i].reserve(n_of_results); for (PointSetType::ContainerType::iterator it = local_results.begin(); it != local_results.begin() + n_of_results; ++it){ Node<3>::Pointer p_node = Kratos::dynamic_pointer_cast<Node<3> >(*it); r_results[i].push_back(p_node); } r_results_distances[i].insert(r_results_distances[i].begin(), local_results_distances.begin(), local_results_distances.begin() + n_of_results); } } KRATOS_CATCH("") } /// Turn back information as a string. virtual std::string Info() const override { std::stringstream buffer; buffer << "PointPointSearch" ; return buffer.str(); } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const override {rOStream << "PointPointSearch";} /// Print object's data. virtual void PrintData(std::ostream& rOStream) const override {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. PointPointSearch& operator=(PointPointSearch const& rOther) { return *this; } /// Copy constructor. PointPointSearch(PointPointSearch const& rOther) { *this = rOther; } }; // Class PointPointSearch } // namespace Kratos. #endif // KRATOS_POINT_POINT_SEARCH_H_INCLUDED defined
io.c
/****************************************************************************** * * * IO.C * * * * HDF5 OUTPUT AND RESTART * * * ******************************************************************************/ #include "decs.h" #include <hdf5.h> #define MAX_GRID_DIM (5) void write_array(void *data, const char *name, hsize_t rank, hsize_t *fdims, hsize_t *fstart, hsize_t *fcount, hsize_t *mdims, hsize_t *mstart, hsize_t type); void read_array(void *data, const char *name, hsize_t rank, hsize_t *fdims, hsize_t *fstart, hsize_t *fcount, hsize_t *mdims, hsize_t *mstart, hsize_t type); hsize_t product_hsize_t(hsize_t a[], int size); // Some macro tricks to drastically reduce number of lines of code #define TYPE_FLOAT H5T_NATIVE_FLOAT #define TYPE_DBL H5T_NATIVE_DOUBLE #define TYPE_INT H5T_NATIVE_INT #define TYPE_STR H5T_NATIVE_CHAR #define WRITE_HDR(x, type) \ write_array((void *)&x, #x, 1, fdims_hdr, fstart_hdr, fcount_hdr, mdims_hdr, \ mstart_hdr, type) #define READ_HDR(x, type) \ read_array((void *)&x, #x, 1, fdims_hdr, fstart_hdr, fcount_hdr, mdims_hdr, \ mstart_hdr, type) #define WRITE_ARRAY(x, rank, fdims, fstart, fcount, mdims, mstart, type) \ write_array((void *)x, #x, rank, fdims, fstart, fcount, mdims, mstart, type) #define WRITE_GRID(x, type) \ write_array((void *)x, #x, 3, fdims_grid, fstart_grid, fcount_grid, \ mdims_grid, mstart_grid, type) #define WRITE_GRID_NO_GHOSTS(x, type) \ write_array((void *)x, #x, 3, fdims_grid, fstart_grid, fcount_grid, \ mdims_grid_noghost, mstart_grid_noghost, type) #define WRITE_PRIM(x, type) \ write_array((void *)x, #x, 4, fdims_prim, fstart_prim, fcount_prim, \ mdims_prim, mstart_prim, type) #define WRITE_VEC(x, type) \ write_array((void *)x, #x, 4, fdims_vec, fstart_vec, fcount_vec, mdims_vec, \ mstart_vec, type) #define WRITE_RADG(x, type) \ write_array((void *)x, #x, 4, fdims_radg, fstart_vec, fcount_radg, \ mdims_radg, mstart_vec, type) #define WRITE_RADTYPEVEC(x, type) \ write_array((void *)x, #x, 4, fdims_radtypevec, fstart_vec, \ fcount_radtypevec, mdims_radtypevec, mstart_vec, type) #define WRITE_VEC_NO_GHOSTS(x, type) \ write_array((void *)x, #x, 4, fdims_vec, fstart_vec, fcount_vec, \ mdims_vec_noghost, mstart_vec_noghost, type) #define WRITE_TENSOR(x, type) \ write_array((void *)x, #x, 5, fdims_tens, fstart_tens, fcount_tens, \ mdims_tens, mstart_tens, type) #define WRITE_TENSOR_NO_GHOSTS(x, type) \ write_array((void *)x, #x, 5, fdims_tens, fstart_tens, fcount_tens, \ mdims_tens_noghost, mstart_tens_noghost, type) #define READ_ARRAY(x, rank, fdims, fstart, fcount, mdims, mstart, type) \ read_array((void *)x, #x, rank, fdims, fstart, fcount, mdims, mstart, type) #define READ_GRID(x, type) \ read_array((void *)x, #x, 3, fdims_grid, fstart_grid, fcount_grid, \ mdims_grid, mstart_grid, type) #define READ_PRIM(x, type) \ read_array((void *)x, #x, 4, fdims_prim, fstart_prim, fcount_prim, \ mdims_prim, mstart_prim, type) #define READ_VEC(x, type) \ read_array((void *)x, #x, 4, fdims_vec, fstart_vec, fcount_vec, mdims_vec, \ mstart_vec, type) #define READ_RADTYPEVEC(x, type) \ read_array((void *)x, #x, 4, fdims_radtypevec, fstart_vec, \ fcount_radtypevec, mdims_radtypevec, mstart_vec, type) hid_t file_id, plist_id; hid_t filespace, memspace; hid_t filespace_hdr, memspace_hdr; hsize_t hdr_dims[1] = {1}, zero = 0, one = 1; hsize_t mem_start[MAX_GRID_DIM], file_grid_start[MAX_GRID_DIM]; hsize_t file_grid_count[MAX_GRID_DIM], file_grid_dims[MAX_GRID_DIM]; hsize_t file_hdr_start[1] = {0}, file_hdr_count[1], file_hdr_dims[1] = {1}; hsize_t hdr_rank = 1, grid_rank; hsize_t fdims_grid[3] = {N1TOT, N2TOT, N3TOT}; hsize_t fstart_grid[3]; hsize_t fcount_grid[3] = {N1, N2, N3}; hsize_t mdims_grid[3] = {N1 + 2 * NG, N2 + 2 * NG, N3 + 2 * NG}; hsize_t mstart_grid[3] = {NG, NG, NG}; hsize_t mdims_grid_noghost[3] = {N1, N2, N3}; hsize_t mstart_grid_noghost[3] = {0, 0, 0}; hsize_t fdims_prim[4] = {N1TOT, N2TOT, N3TOT, NVAR}; hsize_t fstart_prim[4]; hsize_t fcount_prim[4] = {N1, N2, N3, NVAR}; hsize_t mdims_prim[4] = {N1 + 2 * NG, N2 + 2 * NG, N3 + 2 * NG, NVAR}; hsize_t mstart_prim[4] = {NG, NG, NG, 0}; hsize_t fdims_vec[4] = {N1TOT, N2TOT, N3TOT, NDIM}; hsize_t fstart_vec[4]; hsize_t fcount_vec[4] = {N1, N2, N3, NDIM}; hsize_t mdims_vec[4] = {N1 + 2 * NG, N2 + 2 * NG, N3 + 2 * NG, NDIM}; hsize_t mstart_vec[4] = {NG, NG, NG, 0}; hsize_t fdims_radtypevec[4] = {N1TOT, N2TOT, N3TOT, RAD_NUM_TYPES}; hsize_t fcount_radtypevec[4] = {N1, N2, N3, RAD_NUM_TYPES}; hsize_t mdims_radtypevec[4] = { N1 + 2 * NG, N2 + 2 * NG, N3 + 2 * NG, RAD_NUM_TYPES}; hsize_t fdims_radg[4] = {N1TOT, N2TOT, N3TOT, NDIM + NRADCOMP}; hsize_t fcount_radg[4] = {N1, N2, N3, NDIM + NRADCOMP}; hsize_t mdims_radg[4] = { N1 + 2 * NG, N2 + 2 * NG, N3 + 2 * NG, NDIM + NRADCOMP}; hsize_t mdims_vec_noghost[4] = {N1, N2, N3, NDIM}; hsize_t mstart_vec_noghost[4] = {0, 0, 0, 0}; hsize_t fdims_tens[5] = {N1TOT, N2TOT, N3TOT, NDIM, NDIM}; hsize_t fstart_tens[5]; hsize_t fcount_tens[5] = {N1, N2, N3, NDIM, NDIM}; hsize_t mdims_tens[5] = {N1 + 2 * NG, N2 + 2 * NG, N3 + 2 * NG, NDIM, NDIM}; hsize_t mstart_tens[5] = {NG, NG, NG, 0, 0}; hsize_t mdims_tens_noghost[5] = {N1, N2, N3, NDIM, NDIM}; hsize_t mstart_tens_noghost[5] = {0, 0, 0, 0, 0}; hsize_t fdims_hdr[1] = {1}; hsize_t fstart_hdr[1] = {0}; hsize_t fcount_hdr[1] = {1}; hsize_t mdims_hdr[1] = {1}; hsize_t mstart_hdr[1] = {0}; hid_t phfiletype, phmemtype, trackphfiletype, trackphmemtype; const char *vnams_base[] = {"RHO", "UU", "U1", "U2", "U3", "B1", "B2", "B3"}; const char *vnams[NVAR]; static char version[STRLEN]; static int dump_id = 0, restart_id = 0, restart_perm_id = 0, fdump_id = 0; #if RADIATION && TRACERS static int dumptrace_id = 0; #endif static grid_double_type divb; #if OUTPUT_EOSVARS static grid_double_type PRESS, ENT, TEMP, CS2; #endif #if RADIATION static grid_double_type tau_cool; static grid_double_type dtau_scatt, dtau_tot; static grid_double_type denominator_scatt, denominator_tot; #endif void init_io() { strcpy(dumpdir, "dumps/"); strcpy(restartdir, "restarts/"); strcpy(xmfdir, "dumps/xmf/"); #if RADIATION && TRACERS strcpy(tracerdir, "dumps/tracers/"); #endif strcpy(version, VERSION); int len = strlen(outputdir); memmove(dumpdir + len, dumpdir, strlen(dumpdir) + 1); memmove(restartdir + len, restartdir, strlen(restartdir) + 1); memmove(xmfdir + len, xmfdir, strlen(xmfdir) + 1); #if RADIATION && TRACERS memmove(tracerdir + len, tracerdir, strlen(tracerdir) + 1); #endif for (int n = 0; n < len; ++n) { dumpdir[n] = outputdir[n]; restartdir[n] = outputdir[n]; xmfdir[n] = outputdir[n]; #if RADIATION && TRACERS tracerdir[n] = outputdir[n]; #endif } if (mpi_io_proc()) { char mkdircall[STRLEN]; strcpy(mkdircall, "mkdir -p "); strcat(mkdircall, dumpdir); strcat(mkdircall, " "); strcat(mkdircall, restartdir); strcat(mkdircall, " "); strcat(mkdircall, xmfdir); #if RADIATION && TRACERS strcat(mkdircall, " "); strcat(mkdircall, tracerdir); #endif safe_system(mkdircall); } fstart_grid[0] = global_start[1]; fstart_grid[1] = global_start[2]; fstart_grid[2] = global_start[3]; fstart_prim[0] = global_start[1]; fstart_prim[1] = global_start[2]; fstart_prim[2] = global_start[3]; fstart_prim[3] = 0; fstart_vec[0] = global_start[1]; fstart_vec[1] = global_start[2]; fstart_vec[2] = global_start[3]; fstart_vec[3] = 0; fstart_tens[0] = global_start[1]; fstart_tens[1] = global_start[2]; fstart_tens[2] = global_start[3]; fstart_tens[3] = 0; fstart_tens[4] = 0; if (!mpi_io_proc()) fcount_hdr[0] = 0; filespace_hdr = H5Screate_simple(1, file_hdr_dims, NULL); file_hdr_count[0] = (mpi_io_proc() ? one : zero); H5Sselect_hyperslab(filespace_hdr, H5S_SELECT_SET, file_hdr_start, NULL, file_hdr_count, NULL); memspace_hdr = H5Screate_simple(hdr_rank, file_hdr_count, NULL); #if RADIATION // Create custom datatypes for arrays inside struct hsize_t array_dim[2] = {NSUP, NDIM}; hid_t array_tid = H5Tarray_create(H5T_NATIVE_DOUBLE, 2, array_dim); hsize_t int_vec_dim[1] = {NDIM}; hid_t int_vec_tid = H5Tarray_create(H5T_NATIVE_INT, 1, int_vec_dim); // Memory contiguous in file phfiletype = H5Tcreate(H5T_COMPOUND, sizeof(struct of_photon)); int offset = 0; // Function that accepts phfiletype, name, offset, size H5Tinsert(phfiletype, "X", offset, array_tid); offset += NSUP * NDIM * sizeof(double); H5Tinsert(phfiletype, "Kcov", offset, array_tid); offset += NSUP * NDIM * sizeof(double); H5Tinsert(phfiletype, "Kcon", offset, array_tid); offset += NSUP * NDIM * sizeof(double); H5Tinsert(phfiletype, "w", offset, H5T_NATIVE_DOUBLE); offset += sizeof(double); H5Tinsert(phfiletype, "KdotKprev", offset, H5T_NATIVE_DOUBLE); offset += sizeof(double); H5Tinsert(phfiletype, "type", offset, H5T_NATIVE_INT); offset += sizeof(int); H5Tinsert(phfiletype, "nscatt", offset, H5T_NATIVE_INT); offset += sizeof(int); H5Tinsert(phfiletype, "origin", offset, int_vec_tid); offset += NDIM * sizeof(int); H5Tinsert(phfiletype, "t0", offset, H5T_NATIVE_DOUBLE); offset += sizeof(double); H5Tinsert(phfiletype, "is_tracked", offset, H5T_NATIVE_INT); // Use HOFFSET to account for struct padding in memory phmemtype = H5Tcreate(H5T_COMPOUND, sizeof(struct of_photon)); H5Tinsert(phmemtype, "X", HOFFSET(struct of_photon, X), array_tid); H5Tinsert(phmemtype, "Kcov", HOFFSET(struct of_photon, Kcov), array_tid); H5Tinsert(phmemtype, "Kcon", HOFFSET(struct of_photon, Kcon), array_tid); H5Tinsert(phmemtype, "w", HOFFSET(struct of_photon, w), H5T_NATIVE_DOUBLE); H5Tinsert(phmemtype, "KdotKprev", HOFFSET(struct of_photon, KdotKprev), H5T_NATIVE_DOUBLE); H5Tinsert(phmemtype, "type", HOFFSET(struct of_photon, type), H5T_NATIVE_INT); H5Tinsert( phmemtype, "nscatt", HOFFSET(struct of_photon, nscatt), H5T_NATIVE_INT); H5Tinsert( phmemtype, "origin", HOFFSET(struct of_photon, origin), int_vec_tid); H5Tinsert(phmemtype, "t0", HOFFSET(struct of_photon, t0), H5T_NATIVE_DOUBLE); H5Tinsert(phmemtype, "is_tracked", HOFFSET(struct of_photon, is_tracked), H5T_NATIVE_INT); trackphfiletype = H5Tcreate(H5T_COMPOUND, sizeof(struct of_photon)); offset = 0; H5Tinsert(trackphfiletype, "X1", offset, H5T_NATIVE_DOUBLE); offset += sizeof(double); H5Tinsert(trackphfiletype, "X2", offset, H5T_NATIVE_DOUBLE); offset += sizeof(double); H5Tinsert(trackphfiletype, "X3", offset, H5T_NATIVE_DOUBLE); offset += sizeof(double); H5Tinsert(trackphfiletype, "nscatt", offset, H5T_NATIVE_INT); trackphmemtype = H5Tcreate(H5T_COMPOUND, sizeof(struct of_track_photon)); H5Tinsert(trackphmemtype, "X1", HOFFSET(struct of_track_photon, X1), H5T_NATIVE_DOUBLE); H5Tinsert(trackphmemtype, "X2", HOFFSET(struct of_track_photon, X2), H5T_NATIVE_DOUBLE); H5Tinsert(trackphmemtype, "X3", HOFFSET(struct of_track_photon, X3), H5T_NATIVE_DOUBLE); H5Tinsert(trackphmemtype, "nscatt", HOFFSET(struct of_track_photon, nscatt), H5T_NATIVE_INT); #endif // RADIATION BASELOOP { vnams[ip] = vnams_base[ip]; } #if NVAR_PASSIVE > 0 PASSLOOP { vnams[ipass] = PASSNAME(ipass); } #endif #if ELECTRONS vnams[KEL] = "KEL"; vnams[KTOT] = "KTOT"; #endif // ELECTRONS } #if RADIATION void track_ph() { #if TRACK_PH static int track_id = 0; // Count number of tracked photons on this processor int num_tracked = 0; #pragma omp parallel { struct of_photon *ph = photon_lists[omp_get_thread_num()]; while (ph != NULL) { if (ph->is_tracked == 1) { #pragma omp atomic num_tracked++; } ph = ph->next; } } // Create buffer for output struct of_track_photon *io_track = safe_malloc((size_t)num_tracked * sizeof(struct of_track_photon)); int nio = 0; double X[NDIM], Kcov[NDIM], Kcon[NDIM]; for (int n = 0; n < nthreads; n++) { struct of_photon *ph = photon_lists[n]; while (ph != NULL) { if (ph->is_tracked == 1) { get_X_K_interp(ph, t, P, X, Kcov, Kcon); io_track[nio].X1 = X[1]; io_track[nio].X2 = X[2]; io_track[nio].X3 = X[3]; io_track[nio].nscatt = ph->nscatt; nio++; } ph = ph->next; } } if (Nph_to_track > 0.) { char name[STRLEN]; char fname[STRLEN]; sprintf(fname, "trackph_%08d.h5", track_id); strcpy(name, dumpdir); strcat(name, fname); hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL); hid_t file_id = H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id); H5Pclose(plist_id); // One dataset per processor; each processor must create all datasets hid_t *ph_dsets = safe_malloc((size_t)mpi_nprocs() * sizeof(hid_t)); for (int n = 0; n < mpi_nprocs(); n++) { char dsetnam[STRLEN]; sprintf(dsetnam, "trackph_%08d", n); int num_tracked_buf = num_tracked; mpi_int_broadcast_proc(&num_tracked_buf, n); hsize_t dims[1] = {num_tracked_buf}; hid_t space = H5Screate_simple(1, dims, NULL); ph_dsets[n] = H5Dcreate(file_id, dsetnam, trackphfiletype, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); H5Sclose(space); } if (num_tracked > 0) H5Dwrite(ph_dsets[mpi_myrank()], trackphmemtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, io_track); // Close and release resources free(io_track); for (int n = 0; n < mpi_nprocs(); n++) { H5Dclose(ph_dsets[n]); } free(ph_dsets); H5Fflush(file_id, H5F_SCOPE_GLOBAL); H5Fclose(file_id); track_id++; } #endif // TRACK_PH } #endif // RADIATION void dump_grid() { char name[STRLEN], fname[STRLEN]; sprintf(fname, "grid.h5"); strcpy(name, dumpdir); strcat(name, fname); if (mpi_io_proc()) { fprintf(stdout, "WRITING GEOMETRY TO %s\n", name); } hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL); file_id = H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id); if (file_id < 0) { fprintf(stderr, "Could not create grid file! Exiting...\n"); exit(-1); } H5Pclose(plist_id); { double *Xharm = safe_malloc(N1 * N2 * N3 * (size_t)NDIM * sizeof(double)); int n = 0; ZLOOP { coord(i, j, k, CENT, &Xharm[n]); n += 4; } WRITE_VEC_NO_GHOSTS(Xharm, TYPE_DBL); free(Xharm); } { // If metric == Minkowski, then Xcart == Xharm double *Xcart = safe_malloc(N1 * N2 * N3 * (size_t)NDIM * sizeof(double)); double X[NDIM], Xcart_loc[NDIM]; int n = 0; ZLOOP { coord(i, j, k, CENT, X); cart_coord(X, Xcart_loc); /* #if METRIC == MKS { // poles should be at theta = 0 and pi // TODO: is this right? Maybe causes a weird offset. if (global_start[2] == 0 && j == NG) Xcart_loc[2] = 0; if (global_stop[2] == N2TOT && j == N2 + NG) Xcart_loc[2] = M_PI; } #endif // METRIC */ for (int l = 0; l < NDIM; l++) Xcart[n + l] = Xcart_loc[l]; n += 4; } WRITE_VEC_NO_GHOSTS(Xcart, TYPE_DBL); free(Xcart); } { // Face locations, in HARM and Cartesoan coordinates #define RANK (4) // metadata hsize_t fdims[RANK] = {N1TOT + 1, N2TOT + 1, N3TOT + 1, NDIM}; hsize_t fstart[RANK] = { global_start[1], global_start[2], global_start[3], 0}; hsize_t fcount[RANK] = {N1, N2, N3, NDIM}; hsize_t mdims[RANK] = {N1, N2, N3, NDIM}; for (int d = 0; d < 3; d++) { if (global_stop[d + 1] == fdims[d] - 1) { fcount[d]++; mdims[d]++; } } hsize_t mstart[RANK] = {0, 0, 0, 0}; hsize_t memsize_tot = product_hsize_t(mdims, RANK); // Values double *XFharm = safe_malloc(memsize_tot * sizeof(double)); double *XFcart = safe_malloc(memsize_tot * sizeof(double)); double X[NDIM], Xcart_loc[NDIM]; int n = 0; ZSLOOP(0, mdims[0] - 1, 0, mdims[1] - 1, 0, mdims[2] - 1) { coord(i, j, k, CORN, X); cart_coord(X, Xcart_loc); for (int l = 0; l < NDIM; l++) { XFharm[n + l] = X[l]; XFcart[n + l] = Xcart_loc[l]; } n += 4; } WRITE_ARRAY(XFharm, RANK, fdims, fstart, fcount, mdims, mstart, TYPE_DBL); WRITE_ARRAY(XFcart, RANK, fdims, fstart, fcount, mdims, mstart, TYPE_DBL); free(XFharm); free(XFcart); #undef RANK } #if METRIC == MKS { double *Xbl = safe_malloc((size_t)N1 * N2 * N3 * NDIM * sizeof(double)); int n = 0; ZLOOP { double X[NDIM], r, th; coord(i, j, k, CENT, X); bl_coord(X, &r, &th); Xbl[n + 1] = r; Xbl[n + 2] = th; Xbl[n + 3] = X[3]; n += 4; } WRITE_VEC_NO_GHOSTS(Xbl, TYPE_DBL); free(Xbl); } #endif { double *gcov = safe_malloc((size_t)N1 * N2 * N3 * NDIM * NDIM * sizeof(double)); int n = 0; ZLOOP { DLOOP2 { gcov[n] = ggeom[i][j][CENT].gcov[mu][nu]; n++; } } WRITE_TENSOR_NO_GHOSTS(gcov, TYPE_DBL); free(gcov); } { double *gcon = safe_malloc((size_t)N1 * N2 * N3 * NDIM * NDIM * sizeof(double)); int n = 0; ZLOOP { DLOOP2 { gcon[n] = ggeom[i][j][CENT].gcon[mu][nu]; n++; } } WRITE_TENSOR_NO_GHOSTS(gcon, TYPE_DBL); free(gcon); } { double *gdet = safe_malloc((size_t)N1 * N2 * N3 * sizeof(double)); int n = 0; ZLOOP { gdet[n] = ggeom[i][j][CENT].g; n++; } WRITE_GRID_NO_GHOSTS(gdet, TYPE_DBL); free(gdet); } { double *alpha = safe_malloc((size_t)N1 * N2 * N3 * sizeof(double)); int n = 0; ZLOOP { alpha[n] = ggeom[i][j][CENT].alpha; n++; } WRITE_GRID_NO_GHOSTS(alpha, TYPE_DBL); free(alpha); } #if METRIC == MKS { // connection coefficients #define RANK (6) hsize_t fdims[RANK] = {N1TOT, N2TOT, N3TOT, NDIM, NDIM, NDIM}; hsize_t fstart[RANK] = { global_start[1], global_start[2], global_start[3], 0, 0, 0}; hsize_t fcount[RANK] = {N1, N2, N3, NDIM, NDIM, NDIM}; hsize_t mdims[RANK] = {N1, N2, N3, NDIM, NDIM, NDIM}; hsize_t mstart[RANK] = {0, 0, 0, 0, 0, 0}; double *Gamma = safe_malloc(N1 * N2 * N3 * NDIM * NDIM * NDIM * sizeof(double)); int n = 0; ZLOOP { DLOOP3 { Gamma[n] = conn[i][j][mu][nu][sigma]; n++; } } WRITE_ARRAY(Gamma, RANK, fdims, fstart, fcount, mdims, mstart, TYPE_DBL); #undef RANK } #endif // METRIC == MKS #if METRIC == MKS { double X[NDIM]; double Lambda_con_local[NDIM][NDIM]; double Lambda_cov_local[NDIM][NDIM]; double *Lambda_h2bl_con = safe_malloc((size_t)N1 * N2 * N3 * NDIM * NDIM * sizeof(double)); double *Lambda_h2bl_cov = safe_malloc((size_t)N1 * N2 * N3 * NDIM * NDIM * sizeof(double)); int n = 0; ZLOOP { coord(i, j, k, CENT, X); jac_harm_to_bl(X, Lambda_cov_local, Lambda_con_local); DLOOP2 { Lambda_h2bl_con[n] = Lambda_con_local[mu][nu]; Lambda_h2bl_cov[n] = Lambda_cov_local[mu][nu]; n++; } } WRITE_TENSOR_NO_GHOSTS(Lambda_h2bl_con, TYPE_DBL); WRITE_TENSOR_NO_GHOSTS(Lambda_h2bl_cov, TYPE_DBL); free(Lambda_h2bl_con); free(Lambda_h2bl_cov); } { double X[NDIM]; double Lambda_con_local[NDIM][NDIM]; double Lambda_cov_local[NDIM][NDIM]; double *Lambda_bl2cart_con = safe_malloc((size_t)N1 * N2 * N3 * NDIM * NDIM * sizeof(double)); double *Lambda_bl2cart_cov = safe_malloc((size_t)N1 * N2 * N3 * NDIM * NDIM * sizeof(double)); int n = 0; ZLOOP { coord(i, j, k, CENT, X); jac_bl_to_cart(X, Lambda_cov_local, Lambda_con_local); DLOOP2 { Lambda_bl2cart_con[n] = Lambda_con_local[mu][nu]; Lambda_bl2cart_cov[n] = Lambda_cov_local[mu][nu]; n++; } } WRITE_TENSOR_NO_GHOSTS(Lambda_bl2cart_con, TYPE_DBL); WRITE_TENSOR_NO_GHOSTS(Lambda_bl2cart_cov, TYPE_DBL); free(Lambda_bl2cart_con); free(Lambda_bl2cart_cov); } #endif // METRIC == MKS { double X[NDIM]; double Lambda_con_local[NDIM][NDIM]; double Lambda_cov_local[NDIM][NDIM]; double *Lambda_h2cart_con = safe_malloc((size_t)N1 * N2 * N3 * NDIM * NDIM * sizeof(double)); double *Lambda_h2cart_cov = safe_malloc((size_t)N1 * N2 * N3 * NDIM * NDIM * sizeof(double)); int n = 0; ZLOOP { coord(i, j, k, CENT, X); jac_harm_to_cart(X, Lambda_cov_local, Lambda_con_local); DLOOP2 { Lambda_h2cart_con[n] = Lambda_con_local[mu][nu]; Lambda_h2cart_cov[n] = Lambda_cov_local[mu][nu]; n++; } } WRITE_TENSOR_NO_GHOSTS(Lambda_h2cart_con, TYPE_DBL); WRITE_TENSOR_NO_GHOSTS(Lambda_h2cart_cov, TYPE_DBL); free(Lambda_h2cart_con); free(Lambda_h2cart_cov); } H5Fflush(file_id, H5F_SCOPE_GLOBAL); H5Fclose(file_id); } void dump() { timer_start(TIMER_OUT); char name[STRLEN], fname[STRLEN]; sprintf(fname, "dump_%08d.h5", dump_id); strcpy(name, dumpdir); strcat(name, fname); if (mpi_io_proc()) { fprintf(stdout, "DUMP %s\n", name); } hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL); file_id = H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id); if (file_id < 0) { fprintf(stderr, "Could not create dump file! Exiting...\n"); exit(-1); } H5Pclose(plist_id); WRITE_HDR(version, TYPE_STR); WRITE_HDR(metric, TYPE_STR); WRITE_HDR(tracers, TYPE_INT); WRITE_HDR(eos, TYPE_STR); int gamma_fallback = GAMMA_FALLBACK; WRITE_HDR(gamma_fallback, TYPE_INT); WRITE_HDR(nulnutype, TYPE_STR); #if METRIC == MKS int derefine_poles = DEREFINE_POLES; WRITE_HDR(derefine_poles, TYPE_INT); #endif int full_dump = (dump_id % DTf == 0); WRITE_HDR(full_dump, TYPE_INT); int electrons = ELECTRONS; WRITE_HDR(electrons, TYPE_INT); int radiation = RADIATION; WRITE_HDR(radiation, TYPE_INT); int nvar = NVAR; WRITE_HDR(nvar, TYPE_INT); int nvar_passive = NVAR_PASSIVE; WRITE_HDR(nvar_passive, TYPE_INT); int output_eosvars = OUTPUT_EOSVARS; WRITE_HDR(output_eosvars, TYPE_INT); #if DIAGNOSTICS_USE_RADTYPES int diagnostics_use_radtypes = DIAGNOSTICS_USE_RADTYPES; #else int diagnostics_use_radtypes = 0; #endif WRITE_HDR(diagnostics_use_radtypes, TYPE_INT); WRITE_HDR(t, TYPE_DBL); WRITE_HDR(tf, TYPE_DBL); WRITE_HDR(nstep, TYPE_INT); int N1tot = N1TOT; WRITE_HDR(N1tot, TYPE_INT); int N2tot = N2TOT; WRITE_HDR(N2tot, TYPE_INT); int N3tot = N3TOT; WRITE_HDR(N3tot, TYPE_INT); WRITE_HDR(startx[1], TYPE_DBL); WRITE_HDR(startx[2], TYPE_DBL); WRITE_HDR(startx[3], TYPE_DBL); WRITE_HDR(dx[1], TYPE_DBL); WRITE_HDR(dx[2], TYPE_DBL); WRITE_HDR(dx[3], TYPE_DBL); #if (METRIC == MKS) WRITE_HDR(Rin, TYPE_DBL); WRITE_HDR(Rout, TYPE_DBL); WRITE_HDR(Rout_vis, TYPE_DBL); WRITE_HDR(Reh, TYPE_DBL); WRITE_HDR(Risco, TYPE_DBL); WRITE_HDR(hslope, TYPE_DBL); WRITE_HDR(a, TYPE_DBL); WRITE_HDR(poly_xt, TYPE_DBL); WRITE_HDR(poly_alpha, TYPE_DBL); WRITE_HDR(mks_smooth, TYPE_DBL); #if NEED_UNITS WRITE_HDR(Mbh, TYPE_DBL); WRITE_HDR(mbh, TYPE_DBL); #endif // NEED_UNITS #if RADIATION WRITE_HDR(Rout_rad, TYPE_DBL); #endif // RADIATION #endif // METRIC #if EOS == EOS_TYPE_GAMMA || GAMMA_FALLBACK WRITE_HDR(gam, TYPE_DBL); #endif // GAMMA #if EOS == EOS_TYPE_POLYTROPE WRITE_HDR(poly_K, TYPE_DBL); WRITE_HDR(poly_gam, TYPE_DBL); #endif // POLYTROPE #if EOS == EOS_TYPE_TABLE WRITE_HDR(eospath, TYPE_STR); #endif // TABLE #if ELECTRONS WRITE_HDR(game, TYPE_DBL); WRITE_HDR(gamp, TYPE_DBL); #endif #if NEED_UNITS WRITE_HDR(L_unit, TYPE_DBL); WRITE_HDR(M_unit, TYPE_DBL); WRITE_HDR(RHO_unit, TYPE_DBL); WRITE_HDR(T_unit, TYPE_DBL); WRITE_HDR(U_unit, TYPE_DBL); WRITE_HDR(B_unit, TYPE_DBL); #if EOS == EOS_TYPE_TABLE WRITE_HDR(TEMP_unit, TYPE_DBL); #endif // EOS_TYPE_TABLE #if RADIATION WRITE_HDR(tp_over_te, TYPE_DBL); WRITE_HDR(Ne_unit, TYPE_DBL); WRITE_HDR(Thetae_unit, TYPE_DBL); #endif // RADIATION #endif WRITE_HDR(cour, TYPE_DBL); WRITE_HDR(DTd, TYPE_DBL); WRITE_HDR(DTl, TYPE_DBL); WRITE_HDR(DTr, TYPE_DBL); WRITE_HDR(DNr, TYPE_INT); WRITE_HDR(DTp, TYPE_INT); WRITE_HDR(DTf, TYPE_INT); WRITE_HDR(dump_cnt, TYPE_INT); WRITE_HDR(dump_id, TYPE_INT); WRITE_HDR(dt, TYPE_DBL); WRITE_HDR(failed, TYPE_INT); #if RADIATION int maxnscatt = MAXNSCATT; WRITE_HDR(maxnscatt, TYPE_INT); int nubins = NU_BINS; WRITE_HDR(nubins, TYPE_INT); int nth = NTH; WRITE_HDR(nth, TYPE_INT); int nphi = NPHI; WRITE_HDR(nphi, TYPE_INT); int nubins_spec = NU_BINS_SPEC; WRITE_HDR(nubins_spec, TYPE_INT); WRITE_HDR(numin, TYPE_DBL); WRITE_HDR(numax, TYPE_DBL); #endif #if NVAR_PASSIVE > 0 { // passive_type #define RANK (1) hsize_t fdims[RANK] = {NVAR_PASSIVE}; hsize_t fstart[RANK] = {0}; hsize_t fcount[RANK] = {NVAR_PASSIVE}; if (!mpi_io_proc()) { fcount[0] = 0; } hsize_t mdims[RANK] = {NVAR_PASSIVE}; hsize_t mstart[RANK] = {0}; WRITE_ARRAY( passive_type, RANK, fdims, fstart, fcount, mdims, mstart, TYPE_INT); #undef RANK } #endif WRITE_PRIM(P, TYPE_FLOAT); { hid_t prim_plist_id = H5Pcreate(H5P_DATASET_ACCESS); hid_t prim_dset = H5Dopen(file_id, "P", prim_plist_id); hid_t strtype = H5Tcopy(H5T_C_S1); H5Tset_size(strtype, H5T_VARIABLE); hsize_t str_dims[1] = {NVAR}; hid_t prim_space = H5Screate_simple(1, str_dims, NULL); hid_t str_attr = H5Acreate( prim_dset, "vnams", strtype, prim_space, H5P_DEFAULT, H5P_DEFAULT); H5Awrite(str_attr, strtype, vnams); H5Aclose(str_attr); H5Sclose(prim_space); H5Dclose(prim_dset); H5Pclose(prim_plist_id); } if (full_dump) { #pragma omp parallel for collapse(3) ZLOOP divb[i][j][k] = flux_ct_divb(i, j, k); WRITE_GRID(divb, TYPE_FLOAT); #if OUTPUT_EOSVARS { #pragma omp parallel for collapse(3) ZLOOP { #if EOS == EOS_TYPE_TABLE EOS_SC_fill(P[i][j][k], extra[i][j][k]); #endif // EOS_TYPE_TABLE PRESS[i][j][k] = EOS_pressure_rho0_u( P[i][j][k][RHO], P[i][j][k][UU], extra[i][j][k]); ENT[i][j][k] = EOS_entropy_rho0_u(P[i][j][k][RHO], P[i][j][k][UU], extra[i][j][k]); TEMP[i][j][k] = EOS_temperature(P[i][j][k][RHO], P[i][j][k][UU], extra[i][j][k]); CS2[i][j][k] = EOS_sound_speed_rho0_u( P[i][j][k][RHO], P[i][j][k][UU], extra[i][j][k]); } WRITE_GRID(PRESS, TYPE_FLOAT); WRITE_GRID(ENT, TYPE_FLOAT); WRITE_GRID(TEMP, TYPE_FLOAT); WRITE_GRID(CS2, TYPE_FLOAT); } #endif #if ELECTRONS WRITE_GRID(Qvisc, TYPE_FLOAT); #if RADIATION WRITE_GRID(Qcoul, TYPE_FLOAT); #endif #endif WRITE_GRID(fail_save, TYPE_INT); current_calc(); WRITE_VEC(jcon, TYPE_FLOAT); #if RADIATION WRITE_GRID(Nsph, TYPE_INT); WRITE_GRID(nph, TYPE_FLOAT); { #define RANK (4) hsize_t fdims[RANK] = {MAXNSCATT + 2, N1TOT, N2TOT, N3TOT}; hsize_t fstart[RANK] = { 0, global_start[1], global_start[2], global_start[3]}; hsize_t fcount[RANK] = {MAXNSCATT + 2, N1, N2, N3}; hsize_t mdims[RANK] = { MAXNSCATT + 2, N1 + 2 * NG, N2 + 2 * NG, N3 + 2 * NG}; hsize_t mstart[RANK] = {0, NG, NG, NG}; WRITE_ARRAY(Jrad, RANK, fdims, fstart, fcount, mdims, mstart, TYPE_FLOAT); #undef RANK } { ZLOOP { dtau_scatt[i][j][k] = 0.0; dtau_tot[i][j][k] = dtau_avg[0][i][j][k]; denominator_scatt[i][j][k] = SMALL; denominator_tot[i][j][k] = en_int_avg[0][i][j][k] + SMALL; SCATTLOOP { dtau_scatt[i][j][k] += dtau_avg[iscatt + 1][i][j][k]; dtau_tot[i][j][k] += dtau_avg[iscatt + 1][i][j][k]; denominator_scatt[i][j][k] += en_int_avg[iscatt + 1][i][j][k]; denominator_tot[i][j][k] += en_int_avg[iscatt + 1][i][j][k]; } denominator_scatt[i][j][k] *= DTd; denominator_tot[i][j][k] *= DTd; dtau_scatt[i][j][k] /= denominator_scatt[i][j][k]; dtau_tot[i][j][k] /= denominator_tot[i][j][k]; } WRITE_GRID(dtau_scatt, TYPE_DBL); WRITE_GRID(dtau_tot, TYPE_DBL); } { for (int iscatt = 0; iscatt < RAD_SCATT_TYPES + 1; iscatt++) { ZLOOP { dtau_avg[iscatt][i][j][k] /= ((SMALL + en_int_avg[iscatt][i][j][k]) * DTd); } } #define RANK (4) hsize_t fdims[RANK] = {RAD_SCATT_TYPES + 1, N1TOT, N2TOT, N3TOT}; hsize_t fstart[RANK] = { 0, global_start[1], global_start[2], global_start[3]}; hsize_t fcount[RANK] = {RAD_SCATT_TYPES + 1, N1, N2, N3}; hsize_t mdims[RANK] = { RAD_SCATT_TYPES + 1, N1 + 2 * NG, N2 + 2 * NG, N3 + 2 * NG}; hsize_t mstart[RANK] = {0, NG, NG, NG}; WRITE_ARRAY( dtau_avg, RANK, fdims, fstart, fcount, mdims, mstart, TYPE_DBL); #undef RANK } WRITE_GRID(Nem, TYPE_INT); WRITE_GRID(Nabs, TYPE_INT); WRITE_GRID(Nsc, TYPE_INT); set_cooling_time(tau_cool, P, extra); WRITE_GRID(tau_cool, TYPE_FLOAT); set_Rmunu(); WRITE_TENSOR(Rmunu, TYPE_FLOAT); ZLOOP { TYPELOOP { Nem_phys[i][j][k][itp] /= DTd; Nabs_phys[i][j][k][itp] /= DTd; } } WRITE_RADTYPEVEC(Nem_phys, TYPE_DBL); WRITE_RADTYPEVEC(Nabs_phys, TYPE_DBL); ZLOOP { DLOOP1 { radG_int[i][j][k][mu] /= DTd; } #if RADIATION == RADTYPE_NEUTRINOS radG_int[i][j][k][RADG_YE] /= DTd; radG_int[i][j][k][RADG_YE_EM] /= DTd; #endif } WRITE_RADG(radG_int, TYPE_FLOAT); // nuLnu reduced over MPI, now just io proc outputs it { #if METRIC == MINKOWSKI bin_all_superphotons(); #endif mpi_reduce_nuLnu(); #define RANK (4) hsize_t fdims[RANK] = {NULNU_IDX0, NTH, NPHI, NU_BINS_SPEC}; hsize_t fstart[RANK] = {0, 0, 0, 0}; hsize_t fcount[RANK] = {NULNU_IDX0, NTH, NPHI, NU_BINS_SPEC}; hsize_t mdims[RANK] = {NULNU_IDX0, NTH, NPHI, NU_BINS_SPEC}; hsize_t mstart[RANK] = {0, 0, 0, 0}; if (!mpi_io_proc()) { fcount[0] = 0; fcount[1] = 0; fcount[2] = 0; fcount[3] = 0; } WRITE_ARRAY(nuLnu, RANK, fdims, fstart, fcount, mdims, mstart, TYPE_DBL); #undef RANK } #endif // RADIATION } H5Fflush(file_id, H5F_SCOPE_GLOBAL); H5Fclose(file_id); if (mpi_io_proc()) { write_xml_file(dump_id, t, vnams); } dump_id++; fdump_id++; timer_stop(TIMER_OUT); } void restart_write(int restart_type) { timer_start(TIMER_OUT); char name[STRLEN], fname[STRLEN]; if (restart_type == RESTART_TEMP) { sprintf(fname, "restart_%08d.h5", restart_id); } else if (restart_type == RESTART_PERM) { sprintf(fname, "restart_perm_%08d.h5", restart_perm_id); restart_perm_id++; } strcpy(name, restartdir); strcat(name, fname); if (restart_type == RESTART_TEMP) { char lastname[STRLEN]; sprintf(fname, "restart.last"); strcpy(lastname, restartdir); strcat(lastname, fname); restart_id++; FILE *fp = fopen(lastname, "w"); fprintf(fp, "%s\n", name); fclose(fp); } if (mpi_io_proc()) { fprintf(stdout, "RESTART %s\n", name); } hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL); file_id = H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id); if (file_id < 0) { printf("Could not create restart file! Exiting...\n"); exit(-1); } H5Pclose(plist_id); WRITE_HDR(version, TYPE_STR); WRITE_HDR(metric, TYPE_STR); WRITE_HDR(nulnutype, TYPE_STR); int electrons = ELECTRONS; WRITE_HDR(electrons, TYPE_INT); int radiation = RADIATION; WRITE_HDR(radiation, TYPE_INT); int nvar = NVAR; WRITE_HDR(nvar, TYPE_INT); int nvar_passive = NVAR_PASSIVE; WRITE_HDR(nvar_passive, TYPE_INT); int output_eosvars = OUTPUT_EOSVARS; WRITE_HDR(output_eosvars, TYPE_INT); #if DIAGNOSTICS_USE_RADTYPES int diagnostics_use_radtypes = DIAGNOSTICS_USE_RADTYPES; #else int diagnostics_use_radtypes = 0; #endif WRITE_HDR(diagnostics_use_radtypes, TYPE_INT); WRITE_HDR(t, TYPE_DBL); WRITE_HDR(tf, TYPE_DBL); WRITE_HDR(nstep, TYPE_INT); WRITE_HDR(dump_id, TYPE_DBL); WRITE_HDR(fdump_id, TYPE_DBL); #if RADIATION && TRACERS WRITE_HDR(dumptrace_id, TYPE_INT); #endif WRITE_HDR(restart_id, TYPE_INT); WRITE_HDR(restart_perm_id, TYPE_INT); WRITE_HDR(dt, TYPE_DBL); WRITE_HDR(lim, TYPE_INT); WRITE_HDR(failed, TYPE_INT); WRITE_HDR(DTd, TYPE_DBL); WRITE_HDR(DTl, TYPE_DBL); WRITE_HDR(DTr, TYPE_DBL); WRITE_HDR(DNr, TYPE_INT); WRITE_HDR(DTp, TYPE_INT); WRITE_HDR(DTf, TYPE_INT); WRITE_HDR(tdump, TYPE_DBL); WRITE_HDR(trestart, TYPE_DBL); WRITE_HDR(tlog, TYPE_DBL); #if METRIC == MINKOWSKI WRITE_HDR(x1Min, TYPE_DBL); WRITE_HDR(x1Max, TYPE_DBL); WRITE_HDR(x2Min, TYPE_DBL); WRITE_HDR(x2Max, TYPE_DBL); WRITE_HDR(x3Min, TYPE_DBL); WRITE_HDR(x3Max, TYPE_DBL); #elif METRIC == MKS WRITE_HDR(a, TYPE_DBL); WRITE_HDR(Rin, TYPE_DBL); WRITE_HDR(Rout, TYPE_DBL); WRITE_HDR(Rout_vis, TYPE_DBL); WRITE_HDR(hslope, TYPE_DBL); WRITE_HDR(Reh, TYPE_DBL); WRITE_HDR(Risco, TYPE_DBL); WRITE_HDR(poly_xt, TYPE_DBL); WRITE_HDR(poly_alpha, TYPE_DBL); WRITE_HDR(mks_smooth, TYPE_DBL); #if RADIATION WRITE_HDR(mbh, TYPE_DBL); WRITE_HDR(Mbh, TYPE_DBL); #endif // RADIATION #endif // METRIC // EOS #if EOS == EOS_TYPE_GAMMA || GAMMA_FALLBACK WRITE_HDR(gam, TYPE_DBL); #endif // GAMMA EOS #if EOS == EOS_TYPE_POLYTROPE WRITE_HDR(poly_K, TYPE_DBL); WRITE_HDR(poly_gam, TYPE_DBL); #elif EOS == EOS_TYPE_TABLE WRITE_HDR(eospath, TYPE_STR); #endif // EOS // Units #if NEED_UNITS WRITE_HDR(L_unit, TYPE_DBL); WRITE_HDR(M_unit, TYPE_DBL); WRITE_HDR(RHO_unit, TYPE_DBL); WRITE_HDR(T_unit, TYPE_DBL); WRITE_HDR(U_unit, TYPE_DBL); WRITE_HDR(B_unit, TYPE_DBL); #if EOS == EOS_TYPE_TABLE WRITE_HDR(TEMP_unit, TYPE_DBL); #endif // EOS_TYPE_TABLE #if RADIATION WRITE_HDR(tp_over_te, TYPE_DBL); WRITE_HDR(Ne_unit, TYPE_DBL); WRITE_HDR(Thetae_unit, TYPE_DBL); #endif // RADIATION #endif // Fluid WRITE_HDR(cour, TYPE_DBL); #if ELECTRONS WRITE_HDR(game, TYPE_DBL); WRITE_HDR(gamp, TYPE_DBL); WRITE_HDR(fel0, TYPE_DBL); WRITE_HDR(tptemin, TYPE_DBL); WRITE_HDR(tptemax, TYPE_DBL); #endif // ELECTRONS #if RADIATION WRITE_HDR(numin, TYPE_DBL); WRITE_HDR(numax, TYPE_DBL); WRITE_HDR(tune_emiss, TYPE_DBL); WRITE_HDR(tune_scatt, TYPE_DBL); WRITE_HDR(t_tune_emiss, TYPE_DBL); WRITE_HDR(t_tune_scatt, TYPE_DBL); WRITE_HDR(dt_tune_emiss, TYPE_DBL); WRITE_HDR(dt_tune_scatt, TYPE_DBL); WRITE_HDR(made_tune_proc, TYPE_INT); WRITE_HDR(abs_tune_proc, TYPE_INT); WRITE_HDR(scatt_tune_proc, TYPE_INT); WRITE_HDR(thetae_max, TYPE_DBL); WRITE_HDR(sigma_max, TYPE_DBL); WRITE_HDR(kdotk_tol, TYPE_DBL); WRITE_HDR(Nph_to_track, TYPE_DBL); #if METRIC == MKS WRITE_HDR(Rout_rad, TYPE_DBL); #endif #if RADIATION == RADTYPE_NEUTRINOS WRITE_HDR(lepton_tot, TYPE_DBL); WRITE_HDR(lepton_last, TYPE_DBL); WRITE_HDR(lepton_lost, TYPE_DBL); #endif // RADIATION #endif #if NVAR_PASSIVE > 0 // WRITE_ARRAY(passive_type, TYPE_INT); { #define RANK (1) hsize_t fdims[RANK] = {NVAR_PASSIVE}; hsize_t fstart[RANK] = {0}; hsize_t fcount[RANK] = {NVAR_PASSIVE}; if (!mpi_io_proc()) { fcount[0] = 0; } hsize_t mdims[RANK] = {NVAR_PASSIVE}; hsize_t mstart[RANK] = {0}; WRITE_ARRAY( passive_type, RANK, fdims, fstart, fcount, mdims, mstart, TYPE_INT); #undef RANK } #endif WRITE_PRIM(P, TYPE_DBL); #if RADIATION WRITE_GRID(Nsph, TYPE_INT); WRITE_GRID(nph, TYPE_DBL); WRITE_GRID(Nem, TYPE_INT); WRITE_GRID(Nabs, TYPE_INT); WRITE_GRID(Nsc, TYPE_INT); WRITE_RADTYPEVEC(Nem_phys, TYPE_DBL); WRITE_RADTYPEVEC(Nabs_phys, TYPE_DBL); { #define RANK (4) hsize_t fdims[RANK] = {MAXNSCATT + 2, N1TOT, N2TOT, N3TOT}; hsize_t fstart[RANK] = { 0, global_start[1], global_start[2], global_start[3]}; hsize_t fcount[RANK] = {MAXNSCATT + 2, N1, N2, N3}; hsize_t mdims[RANK] = { MAXNSCATT + 2, N1 + 2 * NG, N2 + 2 * NG, N3 + 2 * NG}; hsize_t mstart[RANK] = {0, NG, NG, NG}; WRITE_ARRAY(Jrad, RANK, fdims, fstart, fcount, mdims, mstart, TYPE_DBL); #undef RANK } { #define RANK (4) hsize_t fdims[RANK] = {RAD_SCATT_TYPES + 1, N1TOT, N2TOT, N3TOT}; hsize_t fstart[RANK] = { 0, global_start[1], global_start[2], global_start[3]}; hsize_t fcount[RANK] = {RAD_SCATT_TYPES + 1, N1, N2, N3}; hsize_t mdims[RANK] = { RAD_SCATT_TYPES + 1, N1 + 2 * NG, N2 + 2 * NG, N3 + 2 * NG}; hsize_t mstart[RANK] = {0, NG, NG, NG}; WRITE_ARRAY(dtau_avg, RANK, fdims, fstart, fcount, mdims, mstart, TYPE_DBL); WRITE_ARRAY( en_int_avg, RANK, fdims, fstart, fcount, mdims, mstart, TYPE_DBL); #undef RANK } { mpi_reduce_nuLnu(); #define RANK (4) hsize_t fdims[RANK] = {NULNU_IDX0, NTH, NPHI, NU_BINS_SPEC}; hsize_t fstart[RANK] = {0, 0, 0, 0}; hsize_t fcount[RANK] = {NULNU_IDX0, NTH, NPHI, NU_BINS_SPEC}; hsize_t mdims[RANK] = {NULNU_IDX0, NTH, NPHI, NU_BINS_SPEC}; hsize_t mstart[RANK] = {0, 0, 0, 0}; if (!mpi_io_proc()) { fcount[0] = 0; fcount[1] = 0; fcount[2] = 0; fcount[3] = 0; } WRITE_ARRAY(nuLnu, RANK, fdims, fstart, fcount, mdims, mstart, TYPE_FLOAT); #undef RANK } // Superphoton data hid_t space; char dsetnam[STRLEN]; struct of_photon *wdata; wdata = safe_malloc((size_t)(step_tot) * sizeof(struct of_photon)); // Copy superphotons into buffer int nph = 0; for (int n = 0; n < nthreads; n++) { struct of_photon *ph = photon_lists[n]; while (ph != NULL) { copy_photon(ph, &(wdata[nph])); ph = ph->next; nph++; } } // Each processor must create all datasets hid_t *ph_dsets = safe_malloc((size_t)mpi_nprocs() * sizeof(hid_t)); for (int n = 0; n < mpi_nprocs(); n++) { sprintf(dsetnam, "photons_%08d", n); // int step_tot_buf = step_tot; int step_tot_buf = nph; mpi_int_broadcast_proc(&step_tot_buf, n); hsize_t dims[1] = {step_tot_buf}; space = H5Screate_simple(1, dims, NULL); ph_dsets[n] = H5Dcreate(file_id, dsetnam, phfiletype, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); H5Sclose(space); } if (step_tot > 0) { H5Dwrite(ph_dsets[mpi_myrank()], phmemtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); } // Close and release resources free(wdata); for (int n = 0; n < mpi_nprocs(); n++) { H5Dclose(ph_dsets[n]); } free(ph_dsets); #endif // RADIATION H5Fflush(file_id, H5F_SCOPE_GLOBAL); H5Fclose(file_id); // Keep only two most recent restart files if (mpi_io_proc() && restart_id >= 3 && restart_type == RESTART_TEMP) { char nametodel[STRLEN]; sprintf(fname, "restart_%08d.h5", restart_id - 3); strcpy(nametodel, restartdir); strcat(nametodel, fname); remove(nametodel); } timer_stop(TIMER_OUT); } void restart_read(char *fname) { timer_start(TIMER_OUT); if (mpi_io_proc()) { fprintf(stderr, "Restarting from %s\n\n", fname); } hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL); file_id = H5Fopen(fname, H5F_ACC_RDONLY, plist_id); if (file_id < 0) { fprintf(stderr, "ERROR %s is not a valid restart file\n", fname); exit(-1); } H5Pclose(plist_id); READ_HDR(t, TYPE_DBL); READ_HDR(tf, TYPE_DBL); READ_HDR(nstep, TYPE_INT); READ_HDR(dump_id, TYPE_DBL); READ_HDR(fdump_id, TYPE_DBL); #if RADIATION && TRACERS READ_HDR(dumptrace_id, TYPE_INT); #endif READ_HDR(restart_id, TYPE_INT); READ_HDR(restart_perm_id, TYPE_INT); READ_HDR(dt, TYPE_DBL); READ_HDR(lim, TYPE_INT); READ_HDR(failed, TYPE_INT); READ_HDR(DTd, TYPE_DBL); READ_HDR(DTl, TYPE_DBL); READ_HDR(DTr, TYPE_DBL); READ_HDR(DNr, TYPE_INT); READ_HDR(DTp, TYPE_INT); READ_HDR(DTf, TYPE_INT); READ_HDR(tdump, TYPE_DBL); READ_HDR(trestart, TYPE_DBL); READ_HDR(tlog, TYPE_DBL); #if METRIC == MINKOWSKI READ_HDR(x1Min, TYPE_DBL); READ_HDR(x1Max, TYPE_DBL); READ_HDR(x2Min, TYPE_DBL); READ_HDR(x2Max, TYPE_DBL); READ_HDR(x3Min, TYPE_DBL); READ_HDR(x3Max, TYPE_DBL); #elif METRIC == MKS READ_HDR(a, TYPE_DBL); READ_HDR(Rin, TYPE_DBL); READ_HDR(Rout, TYPE_DBL); READ_HDR(Rout_vis, TYPE_DBL); READ_HDR(hslope, TYPE_DBL); READ_HDR(Reh, TYPE_DBL); READ_HDR(Risco, TYPE_DBL); READ_HDR(poly_xt, TYPE_DBL); READ_HDR(poly_alpha, TYPE_DBL); READ_HDR(mks_smooth, TYPE_DBL); #if RADIATION READ_HDR(mbh, TYPE_DBL); READ_HDR(Mbh, TYPE_DBL); #endif // RADIATION #endif // METRIC // Units #if NEED_UNITS READ_HDR(L_unit, TYPE_DBL); READ_HDR(T_unit, TYPE_DBL); READ_HDR(M_unit, TYPE_DBL); READ_HDR(RHO_unit, TYPE_DBL); READ_HDR(U_unit, TYPE_DBL); READ_HDR(B_unit, TYPE_DBL); #if EOS == EOS_TYPE_TABLE READ_HDR(TEMP_unit, TYPE_DBL); #endif // EOS_TYPE_TABLE #if RADIATION READ_HDR(tp_over_te, TYPE_DBL); READ_HDR(Ne_unit, TYPE_DBL); READ_HDR(Thetae_unit, TYPE_DBL); #endif // RADIATION #endif // NEED_UNITS READ_HDR(cour, TYPE_DBL); #if EOS == EOS_TYPE_GAMMA || GAMMA_FALLBACK READ_HDR(gam, TYPE_DBL); #endif // EOS #if EOS == EOS_TYPE_POLYTROPE READ_HDR(poly_K, TYPE_DBL); READ_HDR(poly_gam, TYPE_DBL); #endif // EOS #if ELECTRONS READ_HDR(game, TYPE_DBL); READ_HDR(gamp, TYPE_DBL); READ_HDR(fel0, TYPE_DBL); READ_HDR(tptemin, TYPE_DBL); READ_HDR(tptemax, TYPE_DBL); #endif // ELECTRONS #if RADIATION READ_HDR(numin, TYPE_DBL); READ_HDR(numax, TYPE_DBL); READ_HDR(tune_emiss, TYPE_DBL); READ_HDR(tune_scatt, TYPE_DBL); READ_HDR(t_tune_emiss, TYPE_DBL); READ_HDR(t_tune_scatt, TYPE_DBL); READ_HDR(dt_tune_emiss, TYPE_DBL); READ_HDR(dt_tune_scatt, TYPE_DBL); READ_HDR(made_tune_proc, TYPE_INT); READ_HDR(abs_tune_proc, TYPE_INT); READ_HDR(scatt_tune_proc, TYPE_INT); READ_HDR(thetae_max, TYPE_DBL); READ_HDR(sigma_max, TYPE_DBL); READ_HDR(kdotk_tol, TYPE_DBL); READ_HDR(Nph_to_track, TYPE_DBL); #if METRIC == MKS READ_HDR(Rout_rad, TYPE_DBL); #endif #if RADIATION == RADTYPE_NEUTRINOS READ_HDR(lepton_tot, TYPE_DBL); READ_HDR(lepton_last, TYPE_DBL); READ_HDR(lepton_lost, TYPE_DBL); #endif // RADIATION #endif #if NVAR_PASSIVE > 0 { #define RANK (1) hsize_t fdims[RANK] = {NVAR_PASSIVE}; hsize_t fstart[RANK] = {0}; hsize_t fcount[RANK] = {NVAR_PASSIVE}; hsize_t mdims[RANK] = {NVAR_PASSIVE}; hsize_t mstart[RANK] = {0}; READ_ARRAY( passive_type, RANK, fdims, fstart, fcount, mdims, mstart, TYPE_INT); #undef RANK } #endif READ_PRIM(P, TYPE_DBL); #if RADIATION READ_GRID(Nsph, TYPE_INT); READ_GRID(nph, TYPE_DBL); READ_GRID(Nem, TYPE_INT); READ_GRID(Nabs, TYPE_INT); READ_GRID(Nsc, TYPE_INT); READ_RADTYPEVEC(Nem_phys, TYPE_DBL); READ_RADTYPEVEC(Nabs_phys, TYPE_DBL); { #define RANK (4) hsize_t fdims[RANK] = {MAXNSCATT + 2, N1TOT, N2TOT, N3TOT}; hsize_t fstart[RANK] = { 0, global_start[1], global_start[2], global_start[3]}; hsize_t fcount[RANK] = {MAXNSCATT + 2, N1, N2, N3}; hsize_t mdims[RANK] = { MAXNSCATT + 2, N1 + 2 * NG, N2 + 2 * NG, N3 + 2 * NG}; hsize_t mstart[RANK] = {0, NG, NG, NG}; READ_ARRAY(Jrad, RANK, fdims, fstart, fcount, mdims, mstart, TYPE_DBL); #undef RANK } { #define RANK (4) hsize_t fdims[RANK] = {RAD_SCATT_TYPES + 1, N1TOT, N2TOT, N3TOT}; hsize_t fstart[RANK] = { 0, global_start[1], global_start[2], global_start[3]}; hsize_t fcount[RANK] = {RAD_SCATT_TYPES + 1, N1, N2, N3}; hsize_t mdims[RANK] = { RAD_SCATT_TYPES + 1, N1 + 2 * NG, N2 + 2 * NG, N3 + 2 * NG}; hsize_t mstart[RANK] = {0, NG, NG, NG}; READ_ARRAY(dtau_avg, RANK, fdims, fstart, fcount, mdims, mstart, TYPE_DBL); READ_ARRAY( en_int_avg, RANK, fdims, fstart, fcount, mdims, mstart, TYPE_DBL); #undef RANK } { #define RANK (4) hsize_t fdims[RANK] = {NULNU_IDX0, NTH, NPHI, NU_BINS_SPEC}; hsize_t fstart[RANK] = {0, 0, 0, 0}; hsize_t fcount[RANK] = {NULNU_IDX0, NTH, NPHI, NU_BINS_SPEC}; hsize_t mdims[RANK] = {NULNU_IDX0, NTH, NPHI, NU_BINS_SPEC}; hsize_t mstart[RANK] = {0, 0, 0, 0}; if (!mpi_io_proc()) { fcount[0] = 0; fcount[1] = 0; fcount[2] = 0; fcount[3] = 0; } READ_ARRAY(nuLnu, RANK, fdims, fstart, fcount, mdims, mstart, TYPE_DBL); #undef RANK } // Superphoton datasets -- each processor must create all datasets hsize_t dims[1]; size_t nph_in = 0; hid_t * ph_dsets = safe_malloc((size_t)mpi_nprocs() * sizeof(hid_t)); char dsetnam[STRLEN]; for (int n = 0; n < mpi_nprocs(); n++) { sprintf(dsetnam, "photons_%08d", n); ph_dsets[n] = H5Dopen(file_id, dsetnam, H5P_DEFAULT); hid_t space = H5Dget_space(ph_dsets[n]); H5Sget_simple_extent_dims(space, dims, NULL); if (n == mpi_myrank()) nph_in = dims[0]; H5Sclose(space); } printf("[%i] nph_in = %lu\n", mpi_myrank(), (unsigned long int)nph_in); struct of_photon *rdata = safe_malloc(nph_in * sizeof(struct of_photon)); if (nph_in > 0) { H5Dread(ph_dsets[mpi_myrank()], phmemtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); struct of_photon *tmp = NULL, *ph_in = NULL; for (int n = 0; n < nph_in; n++) { tmp = malloc(sizeof(struct of_photon)); copy_photon(&(rdata[n]), tmp); if (tmp->Kcov[2][0] > 0.) { printf("BAD PHOTON BEING READ IN!\n"); printf("Kcov[0] = %e\n", tmp->Kcov[2][0]); } tmp->next = ph_in; ph_in = tmp; } int len_list = 0; struct of_photon *ph_in_tmp = ph_in; while (ph_in_tmp != NULL) { len_list++; ph_in_tmp = ph_in_tmp->next; } // Divide linked list among openmp processes equitably int thread_start = (int)(get_rand() * nthreads); // int ph_per_thread = nph_in/nthreads; for (int n = thread_start; n < thread_start + nph_in; n++) { int index = n % nthreads; /*if (ph_in->X[2][0] < 0.1) {printf("%i BAD\n", mpi_myrank()); for (int i = 0; i < 3; i++) { for (int mu = 0; mu < NDIM; mu++) { printf("X Kcov Kcon = %e %e %e\n", ph_in->X[i][mu], ph_in->Kcov[i][mu], ph_in->Kcon[i][mu]); } printf("w = %e\n", ph_in->w); } }*/ swap_ph(&ph_in, &(photon_lists[index])); } } free(rdata); for (int n = 0; n < mpi_nprocs(); n++) { H5Dclose(ph_dsets[n]); } free(ph_dsets); #endif // RADIATION H5Fflush(file_id, H5F_SCOPE_GLOBAL); H5Fclose(file_id); timer_stop(TIMER_OUT); } // Use fluid restart data as initial conditions, usually for a GRRMHD simulation void init_fluid_restart() { hsize_t file_grid_dims[4], file_start[4], file_count[4]; hsize_t mem_grid_dims[4], mem_start[4]; int fdims[3] = {N1TOT, N2TOT, N3TOT}; int mdims[3] = {N1, N2, N3}; // MAKE SURE N1TOT, N2TOT, N3TOT are right! if (mpi_io_proc()) { fprintf(stderr, "Initializing fluid from %s\n\n", init_from_grmhd); } hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL); hid_t file_id = H5Fopen(init_from_grmhd, H5F_ACC_RDONLY, plist_id); H5Pclose(plist_id); // Read some header data? gam? game? gamp? // Read fluid primitive data { char *name = "p"; for (int d = 0; d < 3; d++) file_grid_dims[d] = fdims[d]; file_grid_dims[3] = NVAR; // For vectors hid_t filespace = H5Screate_simple(4, file_grid_dims, NULL); for (int d = 0; d < 3; d++) { file_start[d] = global_start[d + 1]; file_count[d] = mdims[d]; } file_start[3] = 0; file_count[3] = NVAR; H5Sselect_hyperslab( filespace, H5S_SELECT_SET, file_start, NULL, file_count, NULL); for (int d = 0; d < 3; d++) { mem_grid_dims[d] = mdims[d] + 2 * NG; } mem_grid_dims[3] = NVAR; hid_t memspace = H5Screate_simple(4, mem_grid_dims, NULL); for (int d = 0; d < 3; d++) mem_start[d] = NG; mem_start[3] = 0; H5Sselect_hyperslab( memspace, H5S_SELECT_SET, mem_start, NULL, file_count, NULL); plist_id = H5Pcreate(H5P_DATASET_ACCESS); hid_t dset_id = H5Dopen(file_id, name, plist_id); H5Pclose(plist_id); plist_id = H5Pcreate(H5P_DATASET_XFER); H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE); H5Dread(dset_id, H5T_NATIVE_DOUBLE, memspace, filespace, plist_id, &P[0][0][0][0]); H5Dclose(dset_id); H5Pclose(plist_id); } H5Fflush(file_id, H5F_SCOPE_GLOBAL); H5Fclose(file_id); MPI_Barrier(MPI_COMM_WORLD); } int restart_init() { char fname[STRLEN], lastname[STRLEN]; sprintf(fname, "restart.last"); strcpy(lastname, restartdir); strcat(lastname, fname); #if RADIATION { photon_lists = safe_malloc((size_t)nthreads * sizeof(struct of_photon *)); photon_mpi_lists = safe_malloc((size_t)nthreads * sizeof(struct of_photon *)); #pragma omp parallel { photon_lists[omp_get_thread_num()] = NULL; photon_mpi_lists[omp_get_thread_num()] = NULL; } } #endif // RADIATION FILE *fp = fopen(lastname, "r"); if (fp == NULL) { if (mpi_io_proc()) { fprintf(stdout, "No restart file\n\n"); } return 0; } if (mpi_io_proc()) fprintf(stdout, "Loading restart file\n\n"); zero_arrays(); safe_fscanf(fp, "%s\n", fname); restart_read(fname); ZSLOOP(-NG, N1 - 1 + NG, -NG, N2 - 1 + NG, -NG, N3 - 1 + NG) { PLOOP { Ph[i][j][k][ip] = P[i][j][k][ip]; } } set_grid(); bound_prim(P); #if NEED_UNITS { set_units(); #if EOS == EOS_TYPE_TABLE && POLYTROPE_FALLBACK { rho_poly_thresh = EOS_SC_get_min_rho(); } #endif #if RADIATION { ZLOOP { sim_vol += ggeom[i][j][CENT].g * dx[1] * dx[2] * dx[3] * L_unit * L_unit * L_unit; } sim_vol = mpi_reduce(sim_vol); init_emissivity(); set_weight(P, extra); #if SCATTERING { init_all_hotcross(); } #endif } #endif // RADIATION } #endif // NEED_UNITS reset_dump_variables(); return 1; } #if RADIATION && TRACERS void dump_tracers() { timer_start(TIMER_OUT); char name[STRLEN], fname[STRLEN]; sprintf(fname, "tracers_%08d.h5part", dumptrace_id); strcpy(name, tracerdir); strcat(name, fname); if (mpi_io_proc()) { fprintf(stdout, "TRACERS %s\n", name); } hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL); file_id = H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id); if (file_id < 0) { fprintf(stderr, "Could not create dump file! Exiting...\n"); exit(-1); } H5Pclose(plist_id); hid_t group_id = H5Gcreate(file_id, "/Step#0", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); H5Gclose(group_id); double Time = t; WRITE_HDR(Time, TYPE_DBL); WRITE_HDR(nstep, TYPE_INT); WRITE_HDR(dumptrace_id, TYPE_INT); #if NEED_UNITS { WRITE_HDR(L_unit, TYPE_DBL); WRITE_HDR(M_unit, TYPE_DBL); WRITE_HDR(RHO_unit, TYPE_DBL); WRITE_HDR(T_unit, TYPE_DBL); WRITE_HDR(U_unit, TYPE_DBL); WRITE_HDR(B_unit, TYPE_DBL); #if EOS == EOS_TYPE_TABLE WRITE_HDR(TEMP_unit, TYPE_DBL); #endif // EOS_TYPE_TABLE } #endif size_t ntracers_local = count_tracers_local(); size_t ntracers = mpi_reduce_int(ntracers_local); size_t ntracers_before_me = mpi_accumulate_int(ntracers_local); WRITE_HDR(ntracers, TYPE_INT); // arrays to fil int * id = safe_malloc(ntracers_local * sizeof(int)); int * it = safe_malloc(ntracers_local * sizeof(int)); int * active = safe_malloc(ntracers_local * sizeof(int)); double *time = safe_malloc(ntracers_local * sizeof(double)); double *mass = safe_malloc(ntracers_local * sizeof(double)); double *Xharm = safe_malloc(3 * ntracers_local * sizeof(double)); double *Xcart = safe_malloc(3 * ntracers_local * sizeof(double)); double *x = safe_malloc(ntracers_local * sizeof(double)); double *y = safe_malloc(ntracers_local * sizeof(double)); double *z = safe_malloc(ntracers_local * sizeof(double)); double *rho = safe_malloc(ntracers_local * sizeof(double)); double *uu = safe_malloc(ntracers_local * sizeof(double)); double *s = safe_malloc(ntracers_local * sizeof(double)); double *Press = safe_malloc(ntracers_local * sizeof(double)); double *T = safe_malloc(ntracers_local * sizeof(double)); #if EOS == EOS_TYPE_TABLE double *Ye = safe_malloc(ntracers_local * sizeof(double)); double *Ye_em = safe_malloc(ntracers_local * sizeof(double)); #endif double *rate_emitted = safe_malloc(RAD_NUM_TYPES * ntracers_local * sizeof(double)); double *rate_absorbed = safe_malloc(RAD_NUM_TYPES * ntracers_local * sizeof(double)); double *ucon = safe_malloc(NDIM * ntracers_local * sizeof(double)); double *ucov = safe_malloc(NDIM * ntracers_local * sizeof(double)); double *bcon = safe_malloc(NDIM * ntracers_local * sizeof(double)); double *bcov = safe_malloc(NDIM * ntracers_local * sizeof(double)); // horrible and inefficient loop through tracer particles size_t idx, n = 0; double X_local[NDIM]; double Xcart_local[NDIM]; double Kcon[NDIM]; double Kcov[NDIM]; double P_interp[NVAR]; double extra[EOS_NUM_EXTRA]; double Nem_interp[RAD_NUM_TYPES]; double Nabs_interp[RAD_NUM_TYPES]; struct of_geom g; struct of_state q; for (int thread = 0; thread < nthreads; thread++) { struct of_photon *ph = photon_lists[thread]; while (ph != NULL) { if (ph->type == TYPE_TRACER) { // IDs id[n] = tracer_get_id(ph); active[n] = 1; // time it[n] = nstep; time[n] = t; // mass mass[n] = tracer_get_mass(ph); // Interpolate prim to tracer position tracer_get_X_u(ph, t, X_local, Kcov, Kcon); lagrange_interp_prim_3d(P_interp, X_local, P); cart_coord(X_local, Xcart_local); lagrange_interp_grid_3d( Nem_interp, X_local, &(Nem_phys[0][0][0][0]), RAD_NUM_TYPES); lagrange_interp_grid_3d( Nabs_interp, X_local, &(Nabs_phys[0][0][0][0]), RAD_NUM_TYPES); // Set metric at tracer position set_metric(X_local, &g); get_state(P_interp, &g, &q); // Coordinates idx = n * 3; for (int m = 0; m < 3; m++) { Xharm[idx + m] = X_local[m + 1]; } for (int m = 0; m < 3; m++) { Xcart[idx + m] = Xcart_local[m + 1]; } x[n] = Xcart[1]; y[n] = Xcart[2]; z[n] = Xcart[3]; // prims #if EOS == EOS_TYPE_TABLE EOS_SC_fill(P_interp, extra); #endif rho[n] = P_interp[RHO]; uu[n] = P_interp[UU]; s[n] = EOS_entropy_rho0_u(P_interp[RHO], P_interp[UU], extra); Press[n] = EOS_pressure_rho0_u(P_interp[RHO], P_interp[UU], extra); T[n] = EOS_temperature(P_interp[RHO], P_interp[UU], extra); #if EOS == EOS_TYPE_TABLE Ye[n] = extra[EOS_YE]; Ye_em[n] = P_interp[YE_EM]; #endif // Rates idx = n * RAD_NUM_TYPES; TYPELOOP { rate_emitted[idx + itp] = Nem_interp[itp]; rate_absorbed[idx + itp] = Nabs_interp[itp]; } // vectors idx = n * NDIM; DLOOP1 { ucon[idx + mu] = q.ucon[mu]; } DLOOP1 { ucov[idx + mu] = q.ucov[mu]; } DLOOP1 { bcon[idx + mu] = q.bcon[mu]; } DLOOP1 { bcov[idx + mu] = q.bcon[mu]; } // Iterate! n++; } ph = ph->next; } } if (n != ntracers_local) { fprintf(stderr, "[dump_tracers]: INDEXING ERROR!\n" "\tn = %lu\n" "\tntracers_local = %lu\n" "\tntracers_before_me = %lu\n" "\tntracers_total = %lu\n", (unsigned long int)n, (unsigned long int)ntracers_local, (unsigned long int)ntracers_before_me, (unsigned long int)ntracers); exit(1); } { // HDF5 output, scalars #define RANK (1) hsize_t fdims[RANK] = {ntracers}; hsize_t fstart[RANK] = {ntracers_before_me}; hsize_t fcount[RANK] = {ntracers_local}; hsize_t mdims[RANK] = {ntracers_local}; hsize_t mstart[RANK] = {0}; write_array((void *)id, "/Step#0/id", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_INT); write_array((void *)it, "/Step#0/it", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_INT); write_array((void *)active, "/Step#0/active", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_INT); write_array((void *)time, "/Step#0/time", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_FLOAT); write_array((void *)x, "/Step#0/x", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_FLOAT); write_array((void *)y, "/Step#0/y", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_FLOAT); write_array((void *)z, "/Step#0/z", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_FLOAT); write_array((void *)mass, "/Step#0/mass", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_FLOAT); write_array((void *)rho, "/Step#0/rho", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_FLOAT); write_array((void *)uu, "/Step#0/uu", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_FLOAT); write_array((void *)s, "/Step#0/s", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_FLOAT); write_array((void *)Press, "/Step#0/Press", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_FLOAT); write_array((void *)T, "/Step#0/T", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_FLOAT); #if EOS == EOS_TYPE_TABLE write_array((void *)Ye, "/Step#0/Ye", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_FLOAT); write_array((void *)Ye_em, "/Step#0/Ye_em", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_FLOAT); #endif #undef RANK } { // HDF5 output 3-vectors #define RANK (2) hsize_t fdims[RANK] = {ntracers, 3}; hsize_t fstart[RANK] = {ntracers_before_me, 0}; hsize_t fcount[RANK] = {ntracers_local, 3}; hsize_t mdims[RANK] = {ntracers_local, 3}; hsize_t mstart[RANK] = {0, 0}; write_array((void *)Xharm, "/Step#0/Xharm", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_FLOAT); write_array((void *)Xcart, "/Step#0/Xcart", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_FLOAT); #undef RANK } { // HDF5 output rates #define RANK (2) hsize_t fdims[RANK] = {ntracers, RAD_NUM_TYPES}; hsize_t fstart[RANK] = {ntracers_before_me, 0}; hsize_t fcount[RANK] = {ntracers_local, RAD_NUM_TYPES}; hsize_t mdims[RANK] = {ntracers_local, RAD_NUM_TYPES}; hsize_t mstart[RANK] = {0, 0}; write_array((void *)rate_emitted, "/Step#0/rate_emitted", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_DBL); write_array((void *)rate_absorbed, "/Step#0/rate_absorbed", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_DBL); } { // HDF5 output 4-vectors #define RANK (2) hsize_t fdims[RANK] = {ntracers, NDIM}; hsize_t fstart[RANK] = {ntracers_before_me, 0}; hsize_t fcount[RANK] = {ntracers_local, NDIM}; hsize_t mdims[RANK] = {ntracers_local, NDIM}; hsize_t mstart[RANK] = {0, 0}; write_array((void *)ucon, "/Step#0/ucon", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_FLOAT); write_array((void *)ucov, "/Step#0/ucov", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_FLOAT); #if TRACERS_SAVE_BCON write_array((void *)bcon, "/Step#0/bcon", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_FLOAT); write_array((void *)bcov, "/Step#0/bcov", RANK, fdims, fstart, fcount, mdims, mstart, TYPE_FLOAT); #endif #undef RANK } free(id); free(active); free(it); free(time); free(mass); free(Xharm); free(Xcart); free(x); free(y); free(z); free(rho); free(uu); free(T); free(s); free(Press); #if EOS == EOS_TYPE_TABLE free(Ye); #endif free(rate_emitted); free(rate_absorbed); free(ucon); free(ucov); free(bcon); free(bcov); H5Fflush(file_id, H5F_SCOPE_GLOBAL); H5Fclose(file_id); dumptrace_id++; timer_stop(TIMER_OUT); } #endif void write_array(void *data, const char *name, hsize_t rank, hsize_t *fdims, hsize_t *fstart, hsize_t *fcount, hsize_t *mdims, hsize_t *mstart, hsize_t type) { hid_t filespace, memspace, plist_id, dset_id; filespace = H5Screate_simple(rank, fdims, NULL); H5Sselect_hyperslab(filespace, H5S_SELECT_SET, fstart, NULL, fcount, NULL); memspace = H5Screate_simple(rank, mdims, NULL); H5Sselect_hyperslab(memspace, H5S_SELECT_SET, mstart, NULL, fcount, NULL); plist_id = H5Pcreate(H5P_DATASET_CREATE); if (type == TYPE_FLOAT) { size_t ntot = 1; for (size_t n = 0; n < rank; n++) { ntot *= mdims[n]; } float *buf = safe_malloc(ntot * sizeof(float)); for (size_t n = 0; n < ntot; n++) { buf[n] = (float)(((double *)data)[n]); } dset_id = H5Dcreate(file_id, name, H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); H5Pclose(plist_id); plist_id = H5Pcreate(H5P_DATASET_XFER); H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE); H5Dwrite(dset_id, H5T_NATIVE_FLOAT, memspace, filespace, plist_id, buf); free(buf); } else if (type == TYPE_DBL) { dset_id = H5Dcreate(file_id, name, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); H5Pclose(plist_id); plist_id = H5Pcreate(H5P_DATASET_XFER); H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE); H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, memspace, filespace, plist_id, data); } else if (type == TYPE_INT) { dset_id = H5Dcreate(file_id, name, H5T_NATIVE_INT, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); H5Pclose(plist_id); plist_id = H5Pcreate(H5P_DATASET_XFER); H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE); H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace, plist_id, data); } else if (type == TYPE_STR) { hid_t string_type = H5Tcopy(H5T_C_S1); H5Tset_size(string_type, strlen(data)); plist_id = H5Pcreate(H5P_DATASET_CREATE); dset_id = H5Dcreate(file_id, name, string_type, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); H5Pclose(plist_id); plist_id = H5Pcreate(H5P_DATASET_XFER); H5Dwrite(dset_id, string_type, memspace, filespace, plist_id, data); } else { fprintf( stderr, "type %llu not supported by write_array()! Exiting...\n", type); exit(-1); } H5Dclose(dset_id); H5Pclose(plist_id); H5Sclose(memspace); H5Sclose(filespace); } void read_array(void *data, const char *name, hsize_t rank, hsize_t *fdims, hsize_t *fstart, hsize_t *fcount, hsize_t *mdims, hsize_t *mstart, hsize_t type) { hid_t filespace, memspace, plist_id, dset_id; filespace = H5Screate_simple(rank, fdims, NULL); H5Sselect_hyperslab(filespace, H5S_SELECT_SET, fstart, NULL, fcount, NULL); memspace = H5Screate_simple(rank, mdims, NULL); H5Sselect_hyperslab(memspace, H5S_SELECT_SET, mstart, NULL, fcount, NULL); plist_id = H5Pcreate(H5P_DATASET_ACCESS); dset_id = H5Dopen(file_id, name, plist_id); plist_id = H5Pcreate(H5P_DATASET_XFER); if (type == TYPE_DBL) { H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE); H5Dread(dset_id, H5T_NATIVE_DOUBLE, memspace, filespace, plist_id, data); if (rank == 1 && fdims[0] == 1) { mpi_dbl_broadcast(data); } } else if (type == TYPE_INT) { H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE); H5Dread(dset_id, H5T_NATIVE_INT, memspace, filespace, plist_id, data); if (rank == 1 && fdims[0] == 1) { mpi_int_broadcast(data); } } else { fprintf( stderr, "type %llu not supported by read_array()! Exiting...\n", type); exit(-1); } H5Dclose(dset_id); H5Pclose(plist_id); H5Sclose(memspace); H5Sclose(filespace); } hsize_t product_hsize_t(hsize_t a[], int size) { hsize_t out = 1; for (int i = 0; i < size; i++) { out *= a[i]; } return out; }
GB_bitmap_emult_template.c
//------------------------------------------------------------------------------ // GB_bitmap_emult_template: C = A.*B, C<M>=A.*B, and C<!M>=A.*B, C bitmap //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // C is bitmap. A and B are bitmap or full. M depends on the method { //-------------------------------------------------------------------------- // get C, A, and B //-------------------------------------------------------------------------- const int8_t *restrict Ab = A->b ; const int8_t *restrict Bb = B->b ; const int64_t vlen = A->vlen ; ASSERT (GB_IS_BITMAP (A) || GB_IS_FULL (A) || GB_as_if_full (A)) ; ASSERT (GB_IS_BITMAP (B) || GB_IS_FULL (A) || GB_as_if_full (B)) ; const GB_ATYPE *restrict Ax = (GB_ATYPE *) A->x ; const GB_BTYPE *restrict Bx = (GB_BTYPE *) B->x ; int8_t *restrict Cb = C->b ; GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ; const int64_t cnz = GB_NNZ_HELD (C) ; //-------------------------------------------------------------------------- // C=A.*B, C<M>=A.*B, or C<!M>=A.*B: C is bitmap //-------------------------------------------------------------------------- // TODO modify this method so it can modify C in-place, and also use the // accum operator. int64_t cnvals = 0 ; if (ewise_method == GB_EMULT_METHOD_05) { //---------------------------------------------------------------------- // C is bitmap, M is not present //---------------------------------------------------------------------- // ------------------------------------------ // C = A .* B // ------------------------------------------ // bitmap . bitmap bitmap (method: 05) // bitmap . bitmap full (method: 05) // bitmap . full bitmap (method: 05) int tid ; #pragma omp parallel for num_threads(C_nthreads) schedule(static) \ reduction(+:cnvals) for (tid = 0 ; tid < C_nthreads ; tid++) { int64_t pstart, pend, task_cnvals = 0 ; GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ; for (int64_t p = pstart ; p < pend ; p++) { if (GBB (Ab, p) && GBB (Bb,p)) { // C (i,j) = A (i,j) + B (i,j) GB_GETA (aij, Ax, p) ; GB_GETB (bij, Bx, p) ; GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ; Cb [p] = 1 ; task_cnvals++ ; } } cnvals += task_cnvals ; } } else if (ewise_method == GB_EMULT_METHOD_06) { //---------------------------------------------------------------------- // C is bitmap, !M is sparse or hyper //---------------------------------------------------------------------- // ------------------------------------------ // C <!M>= A .* B // ------------------------------------------ // bitmap sparse bitmap bitmap (method: 06) // bitmap sparse bitmap full (method: 06) // bitmap sparse full bitmap (method: 06) // M is sparse and complemented. If M is sparse and not // complemented, then C is constructed as sparse, not bitmap. ASSERT (M != NULL) ; ASSERT (Mask_comp) ; ASSERT (GB_IS_SPARSE (M) || GB_IS_HYPERSPARSE (M)) ; // C(i,j) = A(i,j) .* B(i,j) can only be computed where M(i,j) is // not present in the sparse pattern of M, and where it is present // but equal to zero. //---------------------------------------------------------------------- // scatter M into the C bitmap //---------------------------------------------------------------------- GB_bitmap_M_scatter_whole (C, M, Mask_struct, GB_BITMAP_M_SCATTER_SET_2, M_ek_slicing, M_ntasks, M_nthreads, Context) ; // C(i,j) has been marked, in Cb, with the value 2 where M(i,j)=1. // These positions will not be computed in C(i,j). C(i,j) can only // be modified where Cb [p] is zero. int tid ; #pragma omp parallel for num_threads(C_nthreads) schedule(static) \ reduction(+:cnvals) for (tid = 0 ; tid < C_nthreads ; tid++) { int64_t pstart, pend, task_cnvals = 0 ; GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ; for (int64_t p = pstart ; p < pend ; p++) { if (Cb [p] == 0) { // M(i,j) is zero, so C(i,j) can be computed if (GBB (Ab, p) && GBB (Bb, p)) { // C (i,j) = A (i,j) + B (i,j) GB_GETA (aij, Ax, p) ; GB_GETB (bij, Bx, p) ; GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ; Cb [p] = 1 ; task_cnvals++ ; } } else { // M(i,j) == 1, so C(i,j) is not computed Cb [p] = 0 ; } } cnvals += task_cnvals ; } } else // if (ewise_method == GB_EMULT_METHOD_07) { //---------------------------------------------------------------------- // C is bitmap; M is bitmap or full //---------------------------------------------------------------------- // ------------------------------------------ // C <M> = A .* B // ------------------------------------------ // bitmap bitmap bitmap bitmap (method: 07) // bitmap bitmap bitmap full (method: 07) // bitmap bitmap full bitmap (method: 07) // ------------------------------------------ // C <M> = A .* B // ------------------------------------------ // bitmap full bitmap bitmap (method: 07) // bitmap full bitmap full (method: 07) // bitmap full full bitmap (method: 07) // ------------------------------------------ // C <!M> = A .* B // ------------------------------------------ // bitmap bitmap bitmap bitmap (method: 07) // bitmap bitmap bitmap full (method: 07) // bitmap bitmap full bitmap (method: 07) // ------------------------------------------ // C <!M> = A .* B // ------------------------------------------ // bitmap full bitmap bitmap (method: 07) // bitmap full bitmap full (method: 07) // bitmap full full bitmap (method: 07) ASSERT (GB_IS_BITMAP (M) || GB_IS_FULL (M)) ; const int8_t *restrict Mb = M->b ; const GB_void *restrict Mx = (GB_void *) (Mask_struct ? NULL : (M->x)) ; size_t msize = M->type->size ; #undef GB_GET_MIJ #define GB_GET_MIJ(p) \ bool mij = GBB (Mb, p) && GB_mcast (Mx, p, msize) ; \ if (Mask_comp) mij = !mij ; /* TODO: use ^ */ int tid ; #pragma omp parallel for num_threads(C_nthreads) schedule(static) \ reduction(+:cnvals) for (tid = 0 ; tid < C_nthreads ; tid++) { int64_t pstart, pend, task_cnvals = 0 ; GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ; for (int64_t p = pstart ; p < pend ; p++) { GB_GET_MIJ (p) ; if (mij) { // M(i,j) is true, so C(i,j) can be computed if (GBB (Ab, p) && GBB (Bb, p)) { // C (i,j) = A (i,j) + B (i,j) GB_GETA (aij, Ax, p) ; GB_GETB (bij, Bx, p) ; GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ; Cb [p] = 1 ; task_cnvals++ ; } } else { // M(i,j) == 1, so C(i,j) is not computed Cb [p] = 0 ; } } cnvals += task_cnvals ; } } C->nvals = cnvals ; }
GB_unaryop__identity_int32_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int32_uint32 // op(A') function: GB_tran__identity_int32_uint32 // C type: int32_t // A type: uint32_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int32_uint32 ( int32_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int32_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__bor_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bor_int16 // A.*B function (eWiseMult): GB_AemultB__bor_int16 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bor_int16 // C+=b function (dense accum): GB_Cdense_accumb__bor_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bor_int16 // C=scalar+B GB_bind1st__bor_int16 // C=scalar+B' GB_bind1st_tran__bor_int16 // C=A+scalar GB_bind2nd__bor_int16 // C=A'+scalar GB_bind2nd_tran__bor_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij) | (bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x) | (y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BOR || GxB_NO_INT16 || GxB_NO_BOR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bor_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bor_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bor_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bor_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bor_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bor_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t bij = Bx [p] ; Cx [p] = (x) | (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bor_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; Cx [p] = (aij) | (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x) | (aij) ; \ } GrB_Info GB_bind1st_tran__bor_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij) | (y) ; \ } GrB_Info GB_bind2nd_tran__bor_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sse-bnd-dag0.h
#include <util/omp_wrapper.h> extern "C" void wilson_dslash_bnd_dag0( IFloat *chi_p_f, IFloat *u_p_f, IFloat *psi_p_f, int cb, Wilson *wilson_p) { int lx, ly, lz, lt; int cbn; int vol; lx = wilson_p->ptr[0]; ly = wilson_p->ptr[1]; lz = wilson_p->ptr[2]; lt = wilson_p->ptr[3]; vol = wilson_p->vol[0]; #if 0 SSE_C_FLOAT* const recv_buf1 = (SSE_C_FLOAT*)wilson_p->recv_buf[0]; SSE_C_FLOAT* const recv_buf2 = (SSE_C_FLOAT*)wilson_p->recv_buf[1]; SSE_C_FLOAT* const recv_buf3 = (SSE_C_FLOAT*)wilson_p->recv_buf[2]; SSE_C_FLOAT* const recv_buf4 = (SSE_C_FLOAT*)wilson_p->recv_buf[3]; SSE_C_FLOAT* const recv_buf5 = (SSE_C_FLOAT*)wilson_p->recv_buf[4]; SSE_C_FLOAT* const recv_buf6 = (SSE_C_FLOAT*)wilson_p->recv_buf[5]; SSE_C_FLOAT* const recv_buf7 = (SSE_C_FLOAT*)wilson_p->recv_buf[6]; SSE_C_FLOAT* const recv_buf8 = (SSE_C_FLOAT*)wilson_p->recv_buf[7]; #endif #ifdef SSE_TO_C M128D* send_buf0 = (M128D*)(wilson_p->send_buf[0]); M128D* send_buf1 = (M128D*)(wilson_p->send_buf[1]); M128D* send_buf2 = (M128D*)(wilson_p->send_buf[2]); M128D* send_buf3 = (M128D*)(wilson_p->send_buf[3]); M128D* send_buf4 = (M128D*)(wilson_p->send_buf[4]); M128D* send_buf5 = (M128D*)(wilson_p->send_buf[5]); M128D* send_buf6 = (M128D*)(wilson_p->send_buf[6]); M128D* send_buf7 = (M128D*)(wilson_p->send_buf[7]); // for(int i=0;i<8;i++) VRB.Result("","wilson_dslash_bnd_dag0()","wilson_p->send_buf[%d]=%p\n",i,wilson_p->send_buf[i]); #else __m128d* send_buf0 = (__m128d*)(wilson_p->send_buf[0]); __m128d* send_buf1 = (__m128d*)(wilson_p->send_buf[1]); __m128d* send_buf2 = (__m128d*)(wilson_p->send_buf[2]); __m128d* send_buf3 = (__m128d*)(wilson_p->send_buf[3]); __m128d* send_buf4 = (__m128d*)(wilson_p->send_buf[4]); __m128d* send_buf5 = (__m128d*)(wilson_p->send_buf[5]); __m128d* send_buf6 = (__m128d*)(wilson_p->send_buf[6]); __m128d* send_buf7 = (__m128d*)(wilson_p->send_buf[7]); #endif // fixme: do it in wilson_p later const int block0=HALF_SPINOR_SIZE*ly*lz*lt/2; const int block1=HALF_SPINOR_SIZE*lx*lz*lt/2; const int block2=HALF_SPINOR_SIZE*lx*ly*lt/2; const int block3=HALF_SPINOR_SIZE*lx*ly*lz/2; if(cb == 0) cbn = 1; else cbn = 0; Float *u_p = (Float *) u_p_f; Float *psi_p = (Float *) psi_p_f; int tt; if(GJP.Xnodes() != 1) { #ifdef _OPENMP #pragma omp parallel for schedule(static,1) #endif for(tt = 0; tt < lt; tt++){ int x, y, z, t, s; int i; int by, bz; int bt1, by1, bz1; int xp, yp, zp, tp; int xm, ym, zm, tm; int xyzt, _xyzt, __xyzt; int xpyzt, xypzt, xyzpt, xyztp; int xmyzt, xymzt, xyzmt, xyztm; int _xpyzt, _xypzt, _xyzpt, _xyztp; int _xmyzt, _xymzt, _xyzmt, _xyztm; Float __RESTRICT *u; Float __RESTRICT *chi; Float __RESTRICT *psi; int shft,_shft; int ip,im,iu; ip=0; im=0;iu=0; x=lx-1; xp=0; xm=lx-1; if (omp_get_num_threads() == 4) { if (tt & 2) t = (lt >> 1) + ((tt >> 2) << 1) + (tt & 1); else t = (lt >> 1) - ((tt >> 2) << 1) - (tt & 1) - 1; } else t = (tt + 1) % lt; // tp = (t + 1)%lt; //tm = (t+lt-1)%lt; for(z = 0; z < lz; z++){ // zp = (z + 1) % lz; //zm = (z - 1 +lz)%lz; _xpyzt = (xp>>1)+(lx>>1)*(ly*(z+lz*t)); _xmyzt = (xm>>1)-(xp>>1)+(lx>>1); _shft= (SPINOR_SIZE>>2)* ((ly*(z+lz*t))>>1); const int parity= (cbn^((x+z+t)&1)); // by is even for(y = parity; y < ly; y+=2){ { register M128D _a,_b,_c,_d; // xp = (x + 1) & ((x + 1 - lx) >> 31); // xm = x - 1 + (((x - 1) >> 31) & lx); xpyzt = _xpyzt + (lx>>1)*y; psi = psi_p + SPINOR_SIZE * xpyzt; //shft= (SPINOR_SIZE/4)* ((y+ly*(z+lz*t))/2); shft= (SPINOR_SIZE>>2)*(y>>1) + _shft; #ifdef SSE_TO_C #if 1 STORE_P_PROJ_X_03(send_buf0+shft ,0); STORE_P_PROJ_X_03(send_buf0+shft+1 ,1); STORE_P_PROJ_X_03(send_buf0+shft+2 ,2); STORE_P_PROJ_X_12(send_buf0+shft+3 ,0); STORE_P_PROJ_X_12(send_buf0+shft+4 ,1); STORE_P_PROJ_X_12(send_buf0+shft+5 ,2); #endif #else _mm_store_pd((double*)(send_buf0+shft), P_PROJ_X_03(0)); _mm_store_pd((double*)(send_buf0+shft+1) , P_PROJ_X_03(1)); _mm_store_pd((double*)(send_buf0+shft+2) , P_PROJ_X_03(2)); _mm_store_pd((double*)(send_buf0+shft+3) , P_PROJ_X_12(0)); _mm_store_pd((double*)(send_buf0+shft+4) , P_PROJ_X_12(1)); _mm_store_pd((double*)(send_buf0+shft+5) , P_PROJ_X_12(2)); #endif BND_PREFETCH_PSI; xmyzt = xpyzt + _xmyzt - (lx>>1)*(parity<<1); psi = psi_p+ SPINOR_SIZE * xmyzt; u = u_p + GAUGE_SIZE * ( xmyzt + vol * cb); //stay same: shft= (SPINOR_SIZE/4)* ((yD+ly*(z+lz*t))/2); #if 0 __m128d* wxm = send_buf4+shft; P_KERN_XM_noadd; BND_PREFETCH_U0; BND_PREFETCH_PSI; #else M128D wxm[6]; #if 1 P_KERN_XM_noadd; #endif BND_PREFETCH_U0; BND_PREFETCH_PSI; #ifdef SSE_TO_C #if 1 { SSE_C_FLOAT *sse_c_p = (SSE_C_FLOAT*)(send_buf4+shft); STORE_XM(wxm,(sse_c_p)); } #else #endif #else _mm_store_pd( (double*)(send_buf4+shft), wxm[0]); _mm_store_pd( (double*)(send_buf4+shft+1), wxm[1]); _mm_store_pd( (double*)(send_buf4+shft+2), wxm[2]); _mm_store_pd( (double*)(send_buf4+shft+3), wxm[3]); _mm_store_pd( (double*)(send_buf4+shft+4), wxm[4]); _mm_store_pd( (double*)(send_buf4+shft+5), wxm[5]); #endif #endif }// } } } #ifdef BND_COMM //getPlusData((IFloat *)recv_buf1, (IFloat *)send_buf0, block0, 0); //getMinusData((IFloat *)recv_buf5, (IFloat *)send_buf4, block0, 0); QMP_start(wilson_p->multiple[0]); QMP_start(wilson_p->multiple[4]); #endif } //----------------------------------------------------------// if(GJP.Ynodes() != 1) { #ifdef _OPENMP #pragma omp parallel for schedule(static,1) #endif for(tt = 0; tt < lt; tt++){ int x, y, z, t, s; int i; int by, bz; int bt1, by1, bz1; int xp, yp, zp, tp; int xm, ym, zm, tm; int xyzt, _xyzt, __xyzt; int xpyzt, xypzt, xyzpt, xyztp; int xmyzt, xymzt, xyzmt, xyztm; int _xpyzt, _xypzt, _xyzpt, _xyztp; int _xmyzt, _xymzt, _xyzmt, _xyztm; Float __RESTRICT *u; Float __RESTRICT *chi; Float __RESTRICT *psi; int shft,_shft; y=ly-1; yp=0; ym=ly-1; if (omp_get_num_threads() == 4) { if (tt & 2) t = (lt >> 1) + ((tt >> 2) << 1) + (tt & 1); else t = (lt >> 1) - ((tt >> 2) << 1) - (tt & 1) - 1; } else t = (tt + 1) % lt; //tp = (t + 1)%lt; //tm = (t+lt-1)%lt; for (bz = 0; bz < lz; bz += Z_BLOCK) { bz1 = bz + Z_BLOCK; if (bz1 >= lz) bz1 = lz; for(z = bz; z < bz1; z++){ //zp = (z + 1) % lz; //zm = (z - 1 +lz)%lz; int parity= (cbn^((y+z+t)&1)); _xypzt = (lx>>1)*(yp+ly*(z+lz*t)); _shft= (SPINOR_SIZE>>2)* ((lx*(z+lz*t))>>1); _xymzt =(lx>>1)*(ym+ly*(z+lz*t)); for(x = parity; x < lx; x+=2) { register M128D _a,_b,_c,_d; // xp = (x + 1) & ((x + 1 - lx) >> 31); // xm = x - 1 + (((x - 1) >> 31) & lx); xypzt = (x>>1) + _xypzt; psi = psi_p + SPINOR_SIZE * xypzt; //shft = (SPINOR_SIZE/4)* ((x+lx*(z+lz*t))/2); shft= _shft + (SPINOR_SIZE>>2)* (x>>1); #ifdef SSE_TO_C STORE_P_PROJ_Y_03(send_buf1+shft ,0); STORE_P_PROJ_Y_03(send_buf1+shft+1 ,1); STORE_P_PROJ_Y_03(send_buf1+shft+2 ,2); STORE_P_PROJ_Y_12(send_buf1+shft+3 ,0); STORE_P_PROJ_Y_12(send_buf1+shft+4 ,1); STORE_P_PROJ_Y_12(send_buf1+shft+5 ,2); #else *(send_buf1+shft) = P_PROJ_Y_03(0); *(send_buf1+shft+1) = P_PROJ_Y_03(1); *(send_buf1+shft+2) = P_PROJ_Y_03(2); *(send_buf1+shft+3) = P_PROJ_Y_12(0); *(send_buf1+shft+4) = P_PROJ_Y_12(1); *(send_buf1+shft+5) = P_PROJ_Y_12(2); #endif BND_PREFETCH_PSI; //int xD = x+1-(parity<<1) ;// x+1 or x-1; //xymzt = (xD/2)+(lx/2)*(ym+ly*(z+lz*t)); xymzt = ((x+1-(parity<<1))>>1) + _xymzt; psi = psi_p + SPINOR_SIZE * xymzt; //shft= (SPINOR_SIZE/4)* ((xD+lx*(z+lz*t))/2); u = u_p + GAUGE_SIZE * ( xymzt + vol * cb); M128D* wym = send_buf5+shft; P_KERN_YM_noadd; BND_PREFETCH_U1; BND_PREFETCH_PSI; // printf("deb %d %d %d %d %d %e %e %e\n", // xD,ym,z,t,shft, *psi, *u, *(double*)&(wym[0])); }//loop(x) }//loop(z) }//loop(bz) }//loop(t) #ifdef BND_COMM //getPlusData((IFloat *)recv_buf2, (IFloat *)send_buf1, block1, 1); //getMinusData((IFloat *)recv_buf6, (IFloat *)send_buf5, block1, 1); QMP_start(wilson_p->multiple[1]); QMP_start(wilson_p->multiple[5]); #endif } //----------------------------------------------------------// if(GJP.Znodes() != 1) { #ifdef _OPENMP #pragma omp parallel for schedule(static,1) #endif for(tt = 0; tt < lt; tt++){ int x, y, z, t, s; int i; int by, bz; int bt1, by1, bz1; int xp, yp, zp, tp; int xm, ym, zm, tm; int xyzt, _xyzt, __xyzt; int xpyzt, xypzt, xyzpt, xyztp; int xmyzt, xymzt, xyzmt, xyztm; int _xpyzt, _xypzt, _xyzpt, _xyztp; int _xmyzt, _xymzt, _xyzmt, _xyztm; Float __RESTRICT *u; Float __RESTRICT *chi; Float __RESTRICT *psi; int shft,_shft; z=lz-1; zp=0; zm=lz-1; if (omp_get_num_threads() == 4) { if (tt & 2) t = (lt >> 1) + ((tt >> 2) << 1) + (tt & 1); else t = (lt >> 1) - ((tt >> 2) << 1) - (tt & 1) - 1; } else t = (tt + 1) % lt; // tp = (t + 1)%lt; // tm = (t+lt-1)%lt; for (by = 0; by < ly; by += Y_BLOCK) { by1 = by + Y_BLOCK; if (by1 >= ly) by1 = ly; for(y = by; y < by1; y++) { _xyzpt= (lx>>1)*(y+ly*(zp+lz*t)); _shft=(SPINOR_SIZE>>2) * ((lx*(y+ly*t))>>1); _xyzmt=(lx>>1)*(y+ly*(zm+lz*t)); int parity= (cbn^((y+z+t)&1)); for(x = parity; x < lx; x+=2) { register M128D _a,_b,_c,_d; // xp = (x + 1) & ((x + 1 - lx) >> 31); // xm = x - 1 + (((x - 1) >> 31) & lx); //xyzpt = (x/2)+(lx/2)*(y+ly*(zp+lz*t)); xyzpt = (x>>1) + _xyzpt; psi = psi_p + SPINOR_SIZE * xyzpt; //shft= (SPINOR_SIZE/4)* ((x+lx*(y+ly*t))/2); shft=(SPINOR_SIZE>>2)*(x>>1)+_shft; #ifdef SSE_TO_C STORE_P_PROJ_Z_02(send_buf2+shft ,0); STORE_P_PROJ_Z_02(send_buf2+shft+1 ,1); STORE_P_PROJ_Z_02(send_buf2+shft+2 ,2); STORE_P_PROJ_Z_13(send_buf2+shft+3 ,0); STORE_P_PROJ_Z_13(send_buf2+shft+4 ,1); STORE_P_PROJ_Z_13(send_buf2+shft+5 ,2); #else *(send_buf2+shft) = P_PROJ_Z_02(0); *(send_buf2+shft+1) = P_PROJ_Z_02(1); *(send_buf2+shft+2) = P_PROJ_Z_02(2); *(send_buf2+shft+3) = P_PROJ_Z_13(0); *(send_buf2+shft+4) = P_PROJ_Z_13(1); *(send_buf2+shft+5) = P_PROJ_Z_13(2); #endif //int xD = x+1-(parity<<1) ;// x+1 or x-1; //xyzmt = (xD/2)+(lx/2)*(y+ly*(zm+lz*t)); xyzmt = ((x+1-(parity<<1))>>1) + _xyzmt; psi = psi_p + SPINOR_SIZE * xyzmt; //shft= (SPINOR_SIZE/4)* ((xD+lx*(y+ly*t))/2); u = u_p + GAUGE_SIZE * ( xyzmt + vol * cb); M128D* wzm = send_buf6+shft; P_KERN_ZM_noadd; //printf("deb %d %d %d %d %d %e %e %e\n", // xD,ym,z,t,shft, *psi, *u, *(double*)&(wym[0])); }//loop(x) }//loop(z) }//loop(bz) }//loop(t) #ifdef BND_COMM //getPlusData((IFloat *)recv_buf3, (IFloat *)send_buf2, block2, 2); //getMinusData((IFloat *)recv_buf7, (IFloat *)send_buf6, block2, 2); QMP_start(wilson_p->multiple[2]); QMP_start(wilson_p->multiple[6]); #endif } //----------------------------------------------------------// int zz; if(GJP.Tnodes() != 1) { #ifdef _OPENMP #pragma omp parallel for schedule(static,1) #endif for(zz = 0; zz < lz; zz++){ int x, y, t, z; int i; int by, bz; int bt1, by1, bz1; int xp, yp, zp, tp; int xm, ym, zm, tm; int xyzt, _xyzt, __xyzt; int xpyzt, xypzt, xyzpt, xyztp; int xmyzt, xymzt, xyzmt, xyztm; int _xpyzt, _xypzt, _xyzpt, _xyztp; int _xmyzt, _xymzt, _xyzmt, _xyztm; Float __RESTRICT *u; Float __RESTRICT *chi; Float __RESTRICT *psi; int shft,_shft; if (omp_get_num_threads() == 4) { if (zz & 2) z = (lz >> 1) + ((zz >> 2) << 1) + (zz & 1); else z = (lz >> 1) - ((zz >> 2) << 1) - (zz & 1) - 1; } else z = (zz + 1) % lz; t=lt-1; tp=0; tm=lt-1; for (by = 0; by < ly; by += Y_BLOCK) { by1 = by + Y_BLOCK; if (by1 >= ly) by1 = ly; for(y = by; y < by1; y++) { _xyztp=(lx>>1)*(y+ly*(z+lz*tp)); _xyztm=(lx/2)*(y+ly*(z+lz*tm)); _shft= (SPINOR_SIZE>>2)* ((lx*(y+ly*z))>>1); int parity= (cbn^((y+z+t)&1)); for(x = parity; x < lx; x+=2) { register M128D _a,_b,_c,_d; // xp = (x + 1) & ((x + 1 - lx) >> 31); // xm = x - 1 + (((x - 1) >> 31) & lx); xyztp = (x>>1)+_xyztp; psi = psi_p + SPINOR_SIZE * xyztp; //shft= (SPINOR_SIZE/4)* ((x+lx*(y+ly*z))/2); shft= (SPINOR_SIZE>>2)* (x>>1)+_shft; #ifdef SSE_TO_C STORE_P_PROJ_T_02(send_buf3+shft ,0); STORE_P_PROJ_T_02(send_buf3+shft+1 ,1); STORE_P_PROJ_T_02(send_buf3+shft+2 ,2); STORE_P_PROJ_T_13(send_buf3+shft+3 ,0); STORE_P_PROJ_T_13(send_buf3+shft+4 ,1); STORE_P_PROJ_T_13(send_buf3+shft+5 ,2); #else *(send_buf3+shft) = P_PROJ_T_02(0); *(send_buf3+shft+1) = P_PROJ_T_02(1); *(send_buf3+shft+2) = P_PROJ_T_02(2); *(send_buf3+shft+3) = P_PROJ_T_13(0); *(send_buf3+shft+4) = P_PROJ_T_13(1); *(send_buf3+shft+5) = P_PROJ_T_13(2); #endif BND_PREFETCH_PSI; #if 0 printf("send_buf3 %d %d %d %d %d %e %e %e\n", x,y,z,t,shft, *psi, *u, *((SSE_C_FLOAT*)(send_buf3+shft))); #endif //int xD = x+1-(parity<<1) ;// x+1 or x-1; //xyztm = (xD/2)+(lx/2)*(y+ly*(z+lz*tm)); int xD = x+1-(parity<<1) ;// x+1 or x-1; xyztm = ((x+1-(parity<<1))>>1)+_xyztm; psi = psi_p + SPINOR_SIZE * xyztm; //shft= (SPINOR_SIZE/4)* ((xD+lx*(y+ly*z))/2); u = u_p + GAUGE_SIZE * ( xyztm + vol * cb); M128D* wtm = send_buf7+shft; P_KERN_TM_noadd; BND_PREFETCH_U2; BND_PREFETCH_PSI; #if 0 { SSE_C_FLOAT *tmp_p = (SSE_C_FLOAT*)wtm; printf("wtm=%p %e \n",tmp_p, *tmp_p);fflush(stdout); // printf("wtm %d %d %d %d %d %e %e %e\n", // xD,y,z,t,shft, *psi, *u,*tmp_p); } #endif }//loop(x) }//loop(y) }//loop(z) }//loop(zz) #ifdef BND_COMM //getPlusData((IFloat *)recv_buf4, (IFloat *)send_buf3, block3, 3); //getMinusData((IFloat *)recv_buf8, (IFloat *)send_buf7, block3, 3); QMP_start(wilson_p->multiple[3]); QMP_start(wilson_p->multiple[7]); #endif } //----------------------------------------------------------// ////////////////////////////////////////////////////////////////// #if 0 /* mpi comminucation start 2006.8.3 S.AOKI */ { Float tmp[SPINOR_SIZE]; Float tmp1[SPINOR_SIZE]; Float tmp2[SPINOR_SIZE]; Float tmp3[SPINOR_SIZE]; Float tmp4[SPINOR_SIZE]; Float tmp5[SPINOR_SIZE]; Float tmp6[SPINOR_SIZE]; Float tmp7[SPINOR_SIZE]; Float tmp8[SPINOR_SIZE]; Float fbuf[SPINOR_SIZE]; int dag=0; int sdag; if(dag == 0) sdag = 1; else if(dag == 1) sdag = -1; else{ } int shft; int shft6; int ixp=0; int ixm=0; int iyp=0; int iym=0; int izp=0; int izm=0; int itp=0; int itm=0; Float* const send_buf0 = wilson_p->send_buf[0]; Float* const send_buf1 = wilson_p->send_buf[1]; Float* const send_buf2 = wilson_p->send_buf[2]; Float* const send_buf3 = wilson_p->send_buf[3]; Float* const send_buf4 = wilson_p->send_buf[4]; Float* const send_buf5 = wilson_p->send_buf[5]; Float* const send_buf6 = wilson_p->send_buf[6]; Float* const send_buf7 = wilson_p->send_buf[7]; int x, y, z, t; int xp, yp, zp, tp; int xm, ym, zm, tm; int xyzt; int xpyzt, xypzt, xyzpt, xyztp; int xmyzt, xymzt, xyzmt, xyztm; int parity; int r, c, s, mu; Float *chi_p = (Float *) chi_p_f; Float *u_p = (Float *) u_p_f; Float *psi_p = (Float *) psi_p_f; Float *chi; Float *u; Float *psi; //mu=0 //SA 2007.5.22 #if 0 if(GJP.Xnodes() != 1) { //send_buf0=(Float *) malloc(block2*sizeof(Float)); //recv_buf1=(Float *) malloc(block2*sizeof(Float)); //recv_buf5=(Float *) malloc(block2*sizeof(Float)); //plus direction x=lx-1; xp=0; for(t=0; t<lt; t++){ for(z=0; z<lz; z++){ for(y=0; y<ly; y++){ parity = x+y+z+t; parity = parity % 2; if(parity == cbn){ xpyzt = (xp/2)+(lx/2)*(y+ly*(z+lz*t)); psi = psi_p + SPINOR_SIZE * xpyzt; shft=ixp*HALF_SPINOR_SIZE; shft6=shft+6; for(c=0;c<3;c++){ *(send_buf0+shft) = PSI(0,c,0) - sdag * ( -PSI(1,c,3) ); *(send_buf0+shft+1) = PSI(1,c,0) - sdag * ( PSI(0,c,3) ); // int my_xyzt=x+lx*(y+ly*(z+lz*t)); // ::printf("send_buf (%d,%d,%d,%d) %d %d %d %e %e\n",lx*GJP.XnodeCoor()+x,y,z,t, my_xyzt, my_xyzt/lx/2, ixp,*(send_buf0+shft), *(send_buf0+shft+1)); *(send_buf0+shft6) = PSI(0,c,1) - sdag * ( -PSI(1,c,2) ); *(send_buf0+shft6+1) = PSI(1,c,1) - sdag * ( PSI(0,c,2) ); shft+=2; shft6+=2; } ++ixp; } } } } // getPlusData((IFloat *)recv_buf1, (IFloat *)send_buf0, block0, 0); //minus direction x=0; xm=lx-1; for(t=0; t<lt; t++){ for(z=0; z<lz; z++){ for(y=0; y<ly; y++){ parity = x+y+z+t; parity = parity % 2; if(parity == cbn){ xmyzt = (xm/2)+(lx/2)*(y+ly*(z+lz*t)); u = u_p + GAUGE_SIZE * ( xmyzt + vol * cb); psi = psi_p + SPINOR_SIZE * xmyzt; for(c=0;c<3;c++){ TMP(0,c,0) = PSI(0,c,0) + sdag * ( -PSI(1,c,3) ); TMP(1,c,0) = PSI(1,c,0) + sdag * ( PSI(0,c,3) ); TMP(0,c,1) = PSI(0,c,1) + sdag * ( -PSI(1,c,2) ); TMP(1,c,1) = PSI(1,c,1) + sdag * ( PSI(0,c,2) ); } /* multiply by U_mu */ mu = 0; shft=ixm*HALF_SPINOR_SIZE; for(s=0;s<2;s++){ for(c=0;c<3;c++){ *(send_buf0+shft) = ( U(0,0,c,mu) * TMP(0,0,s) + U(0,1,c,mu) * TMP(0,1,s) + U(0,2,c,mu) * TMP(0,2,s) + U(1,0,c,mu) * TMP(1,0,s) + U(1,1,c,mu) * TMP(1,1,s) + U(1,2,c,mu) * TMP(1,2,s) ); ++shft; *(send_buf0+shft) = ( U(0,0,c,mu) * TMP(1,0,s) + U(0,1,c,mu) * TMP(1,1,s) + U(0,2,c,mu) * TMP(1,2,s) - U(1,0,c,mu) * TMP(0,0,s) - U(1,1,c,mu) * TMP(0,1,s) - U(1,2,c,mu) * TMP(0,2,s) ); ++shft; } } ++ixm; } } } } // getMinusData((IFloat *)recv_buf5, (IFloat *)send_buf0, block0, 0); // free(send_buf0); } #endif #define CHECK(A,B) \ { \ double reldiff = fabs( ((A)-(B))/((A)+(B)) ) *0.5; \ if( reldiff > 1e-12 ) \ { \ int gx=GJP.XnodeCoor()*lx+x; \ int gy=GJP.YnodeCoor()*ly+y; \ int gz=GJP.ZnodeCoor()*lz+z; \ int gt=GJP.TnodeCoor()*lt+t; \ printf("check failed (%d,%d,%d,%d): %e %e\n", \ gx,gy,gz,gt, A,B); \ }} \ //mu=1 if(GJP.Ynodes() != 1) { //plus direction //send_buf1=(Float *) malloc(block2*sizeof(Float)); //recv_buf2=(Float *) malloc(block2*sizeof(Float)); //recv_buf6=(Float *) malloc(block2*sizeof(Float)); y=ly-1; yp=0; for(t=0; t<lt; t++){ for(z=0; z<lz; z++){ for(x=0; x<lx; x++){ parity = x+y+z+t; parity = parity % 2; if(parity == cbn){ xypzt = (x/2)+(lx/2)*(yp+ly*(z+lz*t)); psi = psi_p + SPINOR_SIZE * xypzt; shft=iyp*HALF_SPINOR_SIZE; shft6=shft+6; for(c=0;c<3;c++){ #if 0 *(send_buf1+shft) = PSI(0,c,0) - sdag * ( -PSI(0,c,3) ); *(send_buf1+shft+1) = PSI(1,c,0) - sdag * ( -PSI(1,c,3) ); *(send_buf1+shft6) = PSI(0,c,1) - sdag * ( PSI(0,c,2) ); *(send_buf1+shft6+1) = PSI(1,c,1) - sdag * ( PSI(1,c,2) ); #else CHECK(*(send_buf1+shft) , PSI(0,c,0) - sdag * ( -PSI(0,c,3) )); CHECK(*(send_buf1+shft+1) , PSI(1,c,0) - sdag * ( -PSI(1,c,3) )); CHECK(*(send_buf1+shft6) , PSI(0,c,1) - sdag * ( PSI(0,c,2) )); CHECK(*(send_buf1+shft6+1) , PSI(1,c,1) - sdag * ( PSI(1,c,2) )); #endif shft+=2; shft6+=2; } ++iyp; } } } } // getPlusData((IFloat *)recv_buf2, (IFloat *)send_buf1, block1, 1); #define CHECK2(sft, A,B) \ if( (A) != (B) ) \ { \ int gx=GJP.XnodeCoor()*lx+x; \ int gy=GJP.YnodeCoor()*ly+y; \ int gz=GJP.ZnodeCoor()*lz+z; \ int gt=GJP.TnodeCoor()*lt+t; \ printf("check failed (%d,%d,%d,%d) %d: %e %e\n", \ gx,gy,gz,gt, sft, A,B); \ } \ //minus direction y=0; ym=ly-1; for(t=0; t<lt; t++){ for(z=0; z<lz; z++){ for(x=0; x<lx; x++){ parity = x+y+z+t; parity = parity % 2; if(parity == cbn){ xymzt = (x/2)+(lx/2)*(ym+ly*(z+lz*t)); u = u_p + GAUGE_SIZE * ( xymzt + vol * cb); psi = psi_p + SPINOR_SIZE * xymzt; // printf("PSI %d %d %d %d %e %e\n", x,y,z,t,PSI(0,0,0),*u); for(c=0;c<3;c++){ TMP(0,c,0) = PSI(0,c,0) + sdag * ( -PSI(0,c,3) ); TMP(1,c,0) = PSI(1,c,0) + sdag * ( -PSI(1,c,3) ); TMP(0,c,1) = PSI(0,c,1) + sdag * ( PSI(0,c,2) ); TMP(1,c,1) = PSI(1,c,1) + sdag * ( PSI(1,c,2) ); } /* multiply by U_mu */ mu = 1; shft=iym*HALF_SPINOR_SIZE; for(s=0;s<2;s++){ for(c=0;c<3;c++){ #if 1 *(send_buf1+shft) = ( U(0,0,c,mu) * TMP(0,0,s) + U(0,1,c,mu) * TMP(0,1,s) + U(0,2,c,mu) * TMP(0,2,s) + U(1,0,c,mu) * TMP(1,0,s) + U(1,1,c,mu) * TMP(1,1,s) + U(1,2,c,mu) * TMP(1,2,s) ); ++shft; *(send_buf1+shft) = ( U(0,0,c,mu) * TMP(1,0,s) + U(0,1,c,mu) * TMP(1,1,s) + U(0,2,c,mu) * TMP(1,2,s) - U(1,0,c,mu) * TMP(0,0,s) - U(1,1,c,mu) * TMP(0,1,s) - U(1,2,c,mu) * TMP(0,2,s) ); ++shft; #else CHECK2(shft,*(send_buf5+shft) , ( U(0,0,c,mu) * TMP(0,0,s) + U(0,1,c,mu) * TMP(0,1,s) + U(0,2,c,mu) * TMP(0,2,s) + U(1,0,c,mu) * TMP(1,0,s) + U(1,1,c,mu) * TMP(1,1,s) + U(1,2,c,mu) * TMP(1,2,s) )); ++shft; CHECK2(shft,*(send_buf5+shft) , ( U(0,0,c,mu) * TMP(1,0,s) + U(0,1,c,mu) * TMP(1,1,s) + U(0,2,c,mu) * TMP(1,2,s) - U(1,0,c,mu) * TMP(0,0,s) - U(1,1,c,mu) * TMP(0,1,s) - U(1,2,c,mu) * TMP(0,2,s) )); ++shft; #endif } } ++iym; } } } } // getMinusData((IFloat *)recv_buf6, (IFloat *)send_buf1, block1, 1); ///free(send_buf1); } //mu=2 if(GJP.Znodes() != 1) { //send_buf2=(Float *) malloc(block2*sizeof(Float)); //recv_buf3=(Float *) malloc(block2*sizeof(Float)); //recv_buf7=(Float *) malloc(block2*sizeof(Float)); //plus direction z=lz-1; zp=0; for(t=0; t<lt; t++){ for(y=0; y<ly; y++){ for(x=0; x<lx; x++){ parity = x+y+z+t; parity = parity % 2; if(parity == cbn){ xyzpt = (x/2)+(lx/2)*(y+ly*(zp+lz*t)); psi = psi_p + SPINOR_SIZE * xyzpt; shft=izp*HALF_SPINOR_SIZE; shft6=shft+6; for(c=0;c<3;c++){ #if 0 *(send_buf2+shft) = PSI(0,c,0) - sdag * ( -PSI(1,c,2) ); *(send_buf2+shft+1) = PSI(1,c,0) - sdag * ( PSI(0,c,2) ); *(send_buf2+shft6) = PSI(0,c,1) - sdag * ( PSI(1,c,3) ); *(send_buf2+shft6+1) = PSI(1,c,1) - sdag * ( -PSI(0,c,3) ); #else CHECK(*(send_buf2+shft) , PSI(0,c,0) - sdag * ( -PSI(1,c,2) )); CHECK(*(send_buf2+shft+1) , PSI(1,c,0) - sdag * ( PSI(0,c,2) )); CHECK(*(send_buf2+shft6) , PSI(0,c,1) - sdag * ( PSI(1,c,3) )); CHECK(*(send_buf2+shft6+1) , PSI(1,c,1) - sdag * ( -PSI(0,c,3) )); #endif shft+=2; shft6+=2; } ++izp; } } } } // getPlusData((IFloat *)recv_buf3, (IFloat *)send_buf2, block2, 2); //minus direction z=0; zm=lz-1; for(t=0; t<lt; t++){ for(y=0; y<ly; y++){ for(x=0; x<lx; x++){ parity = x+y+z+t; parity = parity % 2; if(parity == cbn){ xyzmt = (x/2)+(lx/2)*(y+ly*(zm+lz*t)); u = u_p + GAUGE_SIZE * ( xyzmt + vol * cb); psi = psi_p + SPINOR_SIZE * xyzmt; for(c=0;c<3;c++){ TMP(0,c,0) = PSI(0,c,0) + sdag * ( -PSI(1,c,2) ); TMP(1,c,0) = PSI(1,c,0) + sdag * ( PSI(0,c,2) ); TMP(0,c,1) = PSI(0,c,1) + sdag * ( PSI(1,c,3) ); TMP(1,c,1) = PSI(1,c,1) + sdag * ( -PSI(0,c,3) ); } /* multiply by U_mu */ mu = 2; shft=izm*HALF_SPINOR_SIZE; for(s=0;s<2;s++){ for(c=0;c<3;c++){ #if 0 *(send_buf6+shft) = ( U(0,0,c,mu) * TMP(0,0,s) + U(0,1,c,mu) * TMP(0,1,s) + U(0,2,c,mu) * TMP(0,2,s) + U(1,0,c,mu) * TMP(1,0,s) + U(1,1,c,mu) * TMP(1,1,s) + U(1,2,c,mu) * TMP(1,2,s) ); ++shft; *(send_buf6+shft) = ( U(0,0,c,mu) * TMP(1,0,s) + U(0,1,c,mu) * TMP(1,1,s) + U(0,2,c,mu) * TMP(1,2,s) - U(1,0,c,mu) * TMP(0,0,s) - U(1,1,c,mu) * TMP(0,1,s) - U(1,2,c,mu) * TMP(0,2,s) ); ++shft; #else CHECK(*(send_buf6+shft) , ( U(0,0,c,mu) * TMP(0,0,s) + U(0,1,c,mu) * TMP(0,1,s) + U(0,2,c,mu) * TMP(0,2,s) + U(1,0,c,mu) * TMP(1,0,s) + U(1,1,c,mu) * TMP(1,1,s) + U(1,2,c,mu) * TMP(1,2,s) )); ++shft; CHECK(*(send_buf6+shft) , ( U(0,0,c,mu) * TMP(1,0,s) + U(0,1,c,mu) * TMP(1,1,s) + U(0,2,c,mu) * TMP(1,2,s) - U(1,0,c,mu) * TMP(0,0,s) - U(1,1,c,mu) * TMP(0,1,s) - U(1,2,c,mu) * TMP(0,2,s) )); ++shft; #endif } } ++izm; } } } } // getMinusData((IFloat *)recv_buf7, (IFloat *)send_buf6, block2, 2); //free(send_buf2); } //mu=3 if(GJP.Tnodes() != 1) { //send_buf3=(Float *) malloc(block3*sizeof(Float)); //recv_buf4=(Float *) malloc(block3*sizeof(Float)); //recv_buf8=(Float *) malloc(block3*sizeof(Float)); //plus direction t=lt-1; tp=0; for(z=0; z<lz; z++){ for(y=0; y<ly; y++){ for(x=0; x<lx; x++){ parity = x+y+z+t; parity = parity % 2; if(parity == cbn){ xyztp = (x/2)+(lx/2)*(y+ly*(z+lz*tp)); psi = psi_p + SPINOR_SIZE * xyztp; shft=itp*HALF_SPINOR_SIZE; shft6=shft+6; // printf("PSI %d %d %d %d %e %e\n", x,y,z,t,PSI(0,0,0),*u); for(c=0;c<3;c++){ #if 1 *(send_buf3+shft) = PSI(0,c,0) - sdag * ( PSI(0,c,2) ); *(send_buf3+shft+1) = PSI(1,c,0) - sdag * ( PSI(1,c,2) ); *(send_buf3+shft6) = PSI(0,c,1) - sdag * ( PSI(0,c,3) ); *(send_buf3+shft6+1) = PSI(1,c,1) - sdag * ( PSI(1,c,3) ); #else CHECK(*(send_buf3+shft) , PSI(0,c,0) - sdag * ( PSI(0,c,2) )); CHECK(*(send_buf3+shft+1) , PSI(1,c,0) - sdag * ( PSI(1,c,2) )); CHECK(*(send_buf3+shft6) , PSI(0,c,1) - sdag * ( PSI(0,c,3) )); CHECK(*(send_buf3+shft6+1) , PSI(1,c,1) - sdag * ( PSI(1,c,3) )); #endif shft+=2; shft6+=2; } ++itp; } } } } // getPlusData((IFloat *)recv_buf4, (IFloat *)send_buf3, block3, 3); //minus direction t=0; tm=lt-1; for(z=0; z<lz; z++){ for(y=0; y<ly; y++){ for(x=0; x<lx; x++){ parity = x+y+z+t; parity = parity % 2; if(parity == cbn){ xyztm = (x/2)+(lx/2)*(y+ly*(z+lz*tm)); u = u_p + GAUGE_SIZE * ( xyztm + vol * cb); psi = psi_p + SPINOR_SIZE * xyztm; // printf("PSI %d %d %d %d %e %e\n", x,y,z,t,PSI(0,0,0),*u); for(c=0;c<3;c++){ TMP(0,c,0) = PSI(0,c,0) + sdag * ( PSI(0,c,2) ); TMP(1,c,0) = PSI(1,c,0) + sdag * ( PSI(1,c,2) ); TMP(0,c,1) = PSI(0,c,1) + sdag * ( PSI(0,c,3) ); TMP(1,c,1) = PSI(1,c,1) + sdag * ( PSI(1,c,3) ); } /* multiply by U_mu */ mu = 3; shft=itm*HALF_SPINOR_SIZE; for(s=0;s<2;s++){ for(c=0;c<3;c++){ #if 0 *(send_buf7+shft) = ( U(0,0,c,mu) * TMP(0,0,s) + U(0,1,c,mu) * TMP(0,1,s) + U(0,2,c,mu) * TMP(0,2,s) + U(1,0,c,mu) * TMP(1,0,s) + U(1,1,c,mu) * TMP(1,1,s) + U(1,2,c,mu) * TMP(1,2,s) ); ++shft; *(send_buf7+shft) = ( U(0,0,c,mu) * TMP(1,0,s) + U(0,1,c,mu) * TMP(1,1,s) + U(0,2,c,mu) * TMP(1,2,s) - U(1,0,c,mu) * TMP(0,0,s) - U(1,1,c,mu) * TMP(0,1,s) - U(1,2,c,mu) * TMP(0,2,s) ); ++shft; #else CHECK(*(send_buf7+shft) , ( U(0,0,c,mu) * TMP(0,0,s) + U(0,1,c,mu) * TMP(0,1,s) + U(0,2,c,mu) * TMP(0,2,s) + U(1,0,c,mu) * TMP(1,0,s) + U(1,1,c,mu) * TMP(1,1,s) + U(1,2,c,mu) * TMP(1,2,s) )); ++shft; CHECK(*(send_buf7+shft) , ( U(0,0,c,mu) * TMP(1,0,s) + U(0,1,c,mu) * TMP(1,1,s) + U(0,2,c,mu) * TMP(1,2,s) - U(1,0,c,mu) * TMP(0,0,s) - U(1,1,c,mu) * TMP(0,1,s) - U(1,2,c,mu) * TMP(0,2,s) )); ++shft; #endif } } ++itm; } } } } // getMinusData((IFloat *)recv_buf8, (IFloat *)send_buf7, block3, 3); // free(send_buf3); } } #endif }
rawSHA1_fmt_plug.c
/* * This software is Copyright (c) 2004 bartavelle, <simon at banquise.net>, and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, are permitted. * * Optimised set_key() and reduced binary size by magnum, 2012 * * OMP added May 2013, JimF */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rawSHA1; extern struct fmt_main fmt_rawSHA1_axcrypt; #elif FMT_REGISTERS_H john_register_one(&fmt_rawSHA1); john_register_one(&fmt_rawSHA1_axcrypt); #else #include <string.h> #include "arch.h" #include "sha.h" #include "common.h" #include "formats.h" #include "base64_convert.h" #include "rawSHA1_common.h" #include "johnswap.h" #if !FAST_FORMATS_OMP #undef _OPENMP #endif //#undef SIMD_COEF_32 //#undef SIMD_PARA_SHA1 /* * Only effective for SIMD. * Undef to disable reversing steps for benchmarking. */ #define REVERSE_STEPS #ifdef _OPENMP #ifdef SIMD_COEF_32 #ifndef OMP_SCALE #define OMP_SCALE 1024 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #endif #include <omp.h> #endif #include "simd-intrinsics.h" #include "memdbg.h" #define AX_FORMAT 1 #define RAW_FORMAT 2 #define AX_FORMAT_LABEL "Raw-SHA1-AxCrypt" #define FORMAT_LABEL "Raw-SHA1" #define FORMAT_NAME "" #define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA1) #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE DIGEST_SIZE #define BINARY_ALIGN 4 #ifdef SIMD_COEF_32 #define PLAINTEXT_LENGTH 55 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define FMT_IS_BE #include "common-simd-getpos.h" #else #define PLAINTEXT_LENGTH 125 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #ifdef SIMD_COEF_32 static uint32_t (*saved_key)[SHA_BUF_SIZ*NBKEYS]; static uint32_t (*crypt_key)[DIGEST_SIZE/4*NBKEYS]; #else static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_key)[DIGEST_SIZE / 4]; #endif static unsigned algo; static unsigned digest_size; static unsigned pos; static unsigned SSEi_flags; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef SIMD_COEF_32 saved_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS, sizeof(*saved_key), MEM_ALIGN_SIMD); crypt_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS, sizeof(*crypt_key), MEM_ALIGN_SIMD); #else saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); #endif } #ifndef REVERSE_STEPS #undef SSEi_REVERSE_STEPS #define SSEi_REVERSE_STEPS 0 #undef SSEi_REVERSE_3STEPS #define SSEi_REVERSE_3STEPS 0 #endif static void init_raw(struct fmt_main *self) { algo = RAW_FORMAT; digest_size = DIGEST_SIZE; pos = 4; SSEi_flags = SSEi_REVERSE_STEPS | SSEi_MIXED_IN; init(self); } static void init_ax(struct fmt_main *self) { algo = AX_FORMAT; digest_size = AX_DIGEST_SIZE; pos = 3; SSEi_flags = SSEi_REVERSE_3STEPS | SSEi_MIXED_IN; init(self); } static void done(void) { MEM_FREE(crypt_key); MEM_FREE(saved_key); } #ifdef SIMD_COEF_32 #define HASH_OFFSET (index&(SIMD_COEF_32-1))+(((unsigned int)index%NBKEYS)/SIMD_COEF_32)*SIMD_COEF_32*5+pos*SIMD_COEF_32 static int get_hash_0(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_6; } #else static int get_hash_0(int index) { return crypt_key[index][pos] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_key[index][pos] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_key[index][pos] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_key[index][pos] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_key[index][pos] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_key[index][pos] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_key[index][pos] & PH_MASK_6; } #endif static int binary_hash_0(void *binary) { return ((uint32_t*)binary)[pos] & PH_MASK_0; } static int binary_hash_1(void *binary) { return ((uint32_t*)binary)[pos] & PH_MASK_1; } static int binary_hash_2(void *binary) { return ((uint32_t*)binary)[pos] & PH_MASK_2; } static int binary_hash_3(void *binary) { return ((uint32_t*)binary)[pos] & PH_MASK_3; } static int binary_hash_4(void *binary) { return ((uint32_t*)binary)[pos] & PH_MASK_4; } static int binary_hash_5(void *binary) { return ((uint32_t*)binary)[pos] & PH_MASK_5; } static int binary_hash_6(void *binary) { return ((uint32_t*)binary)[pos] & PH_MASK_6; } #include "common-simd-setkey32.h" static void *get_binary(char *ciphertext) { static uint32_t full[DIGEST_SIZE / 4]; unsigned char *realcipher = (unsigned char*)full; memset(full, 0, sizeof(full)); // since ax-crypt 'may' be short. ciphertext += TAG_LENGTH; base64_convert(ciphertext, e_b64_hex, HASH_LENGTH, realcipher, e_b64_raw, sizeof(full), flg_Base64_MIME_TRAIL_EQ, 0); #ifdef SIMD_COEF_32 #if ARCH_LITTLE_ENDIAN alter_endianity(realcipher, DIGEST_SIZE); #endif #ifdef REVERSE_STEPS if (algo == RAW_FORMAT) sha1_reverse(full); else sha1_reverse3(full); #endif #endif return (void*)realcipher; } static char *source(char *source, void *binary) { static char hex[CIPHERTEXT_LENGTH + 1] = FORMAT_TAG; uint32_t hash[DIGEST_SIZE / 4]; char *p; int i, j; memcpy(hash, binary, DIGEST_SIZE); /* Un-reverse binary */ #ifdef SIMD_COEF_32 #ifdef REVERSE_STEPS if (algo == RAW_FORMAT) sha1_unreverse(hash); else { hash[4] = 0; sha1_unreverse3(hash); } #endif #if ARCH_LITTLE_ENDIAN alter_endianity(hash, DIGEST_SIZE); #endif #else if (algo == AX_FORMAT) hash[4] = 0; #endif #if !ARCH_LITTLE_ENDIAN alter_endianity(hash, DIGEST_SIZE); #endif /* Convert to hex string */ p = hex + TAG_LENGTH; for (i = 0; i < 5; i++) for (j = 0; j < 8; j++) *p++ = itoa16[(hash[i] >> ((j ^ 1) * 4)) & 0xf]; *p = 0; return hex; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP int loops = (count + MAX_KEYS_PER_CRYPT - 1) / MAX_KEYS_PER_CRYPT; #pragma omp parallel for for (index = 0; index < loops; ++index) #endif { #if SIMD_COEF_32 SIMDSHA1body(saved_key[index], crypt_key[index], NULL, SSEi_flags); #else SHA_CTX ctx; SHA1_Init( &ctx ); SHA1_Update( &ctx, (unsigned char*) saved_key[index], strlen( saved_key[index] ) ); SHA1_Final( (unsigned char*) crypt_key[index], &ctx); #endif } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) #ifdef SIMD_COEF_32 if (((uint32_t*)binary)[pos] == ((uint32_t*)crypt_key)[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*5*SIMD_COEF_32 + pos*SIMD_COEF_32]) #else if ( ((uint32_t*)binary)[0] == crypt_key[index][0] ) #endif return 1; return 0; } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 return (((uint32_t *) binary)[pos] == ((uint32_t*)crypt_key)[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*5*SIMD_COEF_32 + pos*SIMD_COEF_32]); #else return !memcmp(binary, crypt_key[index], digest_size); #endif } static int cmp_exact(char *source, int index) { #ifdef SIMD_COEF_32 uint32_t crypt_key[DIGEST_SIZE / 4]; SHA_CTX ctx; char *key = get_key(index); SHA1_Init(&ctx); SHA1_Update(&ctx, key, strlen(key)); SHA1_Final((void*)crypt_key, &ctx); #if ARCH_LITTLE_ENDIAN alter_endianity(crypt_key, DIGEST_SIZE); #endif #ifdef REVERSE_STEPS if (algo == RAW_FORMAT) sha1_reverse(crypt_key); else sha1_reverse3(crypt_key); #endif return !memcmp(get_binary(source), crypt_key, digest_size); #else return 1; #endif } struct fmt_main fmt_rawSHA1 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE, { NULL }, { FORMAT_TAG, FORMAT_TAG_OLD }, rawsha1_common_tests }, { init_raw, done, fmt_default_reset, rawsha1_common_prepare, rawsha1_common_valid, rawsha1_common_split, get_binary, fmt_default_salt, { NULL }, source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, binary_hash_4, binary_hash_5, binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_rawSHA1_axcrypt = { { AX_FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, DIGEST_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE, { NULL }, { NULL }, axcrypt_common_tests }, { init_ax, done, fmt_default_reset, rawsha1_common_prepare, rawsha1_axcrypt_valid, rawsha1_axcrypt_split, get_binary, fmt_default_salt, { NULL }, source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, binary_hash_4, binary_hash_5, binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
Misc.h
//######################################################################## //## Copyright 2019 Da Yan http://www.cs.uab.edu/yanda //## //## Licensed under the Apache License, Version 2.0 (the "License"); //## you may not use this file except in compliance with the License. //## You may obtain a copy of the License at //## //## //http://www.apache.org/licenses/LICENSE-2.0 //## //## Unless required by applicable law or agreed to in writing, software //## distributed under the License is distributed on an "AS IS" BASIS, //## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //## See the License for the specific language governing permissions and //## limitations under the License. //######################################################################## //######################################################################## //## Contributors //## * Wenwen Qu //## * Da Yan //######################################################################## #ifndef __sleuth_misc_h #define __sleuth_misc_h #include "assert.h" #include "../treeminer/Hashtable.h" FreqHT FK; #define ChildrenT map<Element*, vector<Element*> > struct TransBlock{ int st1; int st2; int end1; int end2; bool ins_flag; // whether inside or outside bool f2; TransBlock(int i1, int i2, int e1, int e2, int flag, int _f2): st1(i1), st2(i2), end1(e1), end2(e2), ins_flag(flag), f2(_f2){} }; //check where a element is frequent bool notfrequent(Element &n, int iter) { if(mine_induced && n.isup >= minsup) return false; else if(!mine_induced && n.sup >= minsup) return false; return true; } /*if a tree if canonical, each level of the tree follows certain orders. * cand is the full tree, tpos is pos of root of current rightmost subtree */ bool check_canonical(vector<int> &cand, int tpos) { if (mine_ordered) return true; //only have to check subtrees on rightmost path, with their immediate siblings to the left, and make sure code is smaller or equal bool first; int i, parent = 0, sibling; int scnt = 0; //base condition for stopping recursion, the root has no sibling if (tpos == 0) { return true; } //find the pos of parent and immediate left sibling of tpos under parent first = true; sibling = -1; //not found! for (i = tpos - 1; i >= 0; i--) { if (cand[i] == BranchIt) scnt++; else scnt--; if (first && scnt == 0) { sibling = i; first = false; } if (scnt < 0) { // parent is before all siblings parent = i; break; } } if (sibling == -1) { // not found! return check_canonical(cand, parent); } else { //compare the two subtrees rooted at tpos and sibling int j; for (i = tpos, j = sibling; i < cand.size() && j < tpos; i++, j++) { if (cand[i] > cand[j]) return check_canonical(cand, parent); else if (cand[i] < cand[j]) { return false; } } } return true; } bool lexsmaller(vector<int> &subtree, vector<int> &cand) { int i,j; for (i=0, j=0; i < subtree.size() && j < cand.size();){ if (subtree[i] > cand[j]){ if (cand[j] != BranchIt) return false; else{ while (cand[j] == BranchIt) j++; if (subtree[i] > cand[j]) return false; else if (subtree[i] < cand[j]) return true; else return false; } } else if (subtree[i] < cand[j]){ if (subtree[i] != BranchIt) return true; else{ while(subtree[i] == BranchIt) i++; if (subtree[i] > cand[j]) return false; else if (subtree[i] < cand[j]) return true; else return true; } } else{ i++; j++; } } return false; } /*return a new node to previous prefix pattern. the new node with "val" will appends to the node in "pos" * we also can prune new node if one of new pattern's parent if not frequent. */ Element* test_node(int iter, TreeMinerPattern *eq, int val, int pos) { Element *eqn = NULL; //if noprune, return with a new Eqnode if (prune_type == noprune) { eqn = new Element(val, pos); return eqn; } assert(false); // the pruning logic below does not work for DFS, should be disabled //perform pruning //prune based on frequent subtree vector<int> cand; vector<int> subtree; int hval; int scope, scnt; //form the candidate preifx cand = eq->prefix(); scnt = eq->get_scope(pos, scope); //what is the scope of node.pos while (scnt > scope) { cand.push_back(BranchIt); scnt--; } cand.push_back(val); bool res = true; //check subtrees int omita, omitb; res = true; //omit i-th item (omita) and associated BranchIt (omitb) int i, j, k; for (i = iter - 3; i >= 0; i--) { //find pos for i-th item for (j = 0, k = 0; j < cand.size(); j++) { if (cand[j] != BranchIt) { if (k == i) { omita = j; break; } else k++; } } //find pos for associated BranchIt scnt = 0; for (j++; j < cand.size() && scnt >= 0; j++) { if (cand[j] == BranchIt) scnt--; else scnt++; } if (scnt >= 0) omitb = cand.size(); else omitb = j - 1; hval = 0; subtree.clear(); bool rootless = false; scnt = 0; for (k = 0; k < cand.size() && !rootless; k++) { if (k != omita && k != omitb) { subtree.push_back(cand[k]); if (cand[k] != BranchIt) { hval += cand[k]; scnt++; } else scnt--; if (scnt <= 0) rootless = true; } } //skip a rootless subtree if (!rootless && lexsmaller(subtree, cand)) { res = FK.find(iter - 1, subtree, hval); if (!res) return NULL; //subtree not found! } } if (res) eqn = new Element(val, pos); else eqn = NULL; return eqn; } /* After we generate a new child extension eqnode, update the projected database of the new eqnode. * @l1 : the projected database for pattern A * @l2 : the projected database for pattern B * @ins : the new eqnode * @st1, end1 : the begin and end position in l1 * @st2, end2 : the begin and end position in l2 */ void check_ins(vector<TreeMinerProjTrans> *l1, vector<TreeMinerProjTrans> *l2, Element *ins, // inside int st1, int st2, int en1, int en2, bool f2, vector<TreeMinerProjTrans>* output_tlist) { TreeMinerProjTrans *n1, *n2; ival_type cmpval; bool found_flg = false; // child extension found? bool induced = false; // induced projected instance found for a join output? bool induced_flg = false; // induced projected instance found for the current transaction? int pos1 = st1; //temporary position holder for n1 ival bool next2; //should we go to next n2 ival? // once child find a parent enty, other entries can be skipped int depth; // if depth == 1, (child, parent) is an induced pattern while (st2 < en2) { n1 = &(*l1)[pos1]; n2 = &(*l2)[st2]; next2 = true; //by default we go to next n2 ival cmpval = ival::compare(n1->itscope, n2->itscope); switch (cmpval) { case sup: //n2 was under some n1, then such a extension is valid depth = n2->depth - n1->depth; if (depth >= 1 && n1->path_equal(*n2)) { if (depth == 1 && n1->induced) induced = true; else induced = false; if (!f2) depth = n2->depth; //construct new projected transaction and push it into the projected database of new child if (en1 - st1 > 1 || use_fullpath) { // project parent path into new projected transaction if(output_tlist) output_tlist->emplace_back(n2->tid, n1->parscope, n1->itscope.lb, n2->itscope, depth, induced); else ins->add_list(n2->tid, n1->parscope, n1->itscope.lb, n2->itscope, depth, induced); } else { if(output_tlist) output_tlist->emplace_back(n2->tid, n2->itscope, depth, induced); else ins->add_list(n2->tid, n2->itscope, depth, induced); } if (!count_unique) ins->sup++; found_flg = true; if (induced) induced_flg = true; } next2 = false; break; case before: //check next n1 ival for same n2 ival next2 = false; break; } if (next2 || pos1 + 1 == en1) { //go to next n2 ival pos1 = st1; st2++; } else pos1++; //go to next n1 ival } if (found_flg && count_unique) ins->sup++; if (induced_flg) ins->isup++; } /* After we generate a new cousin extension eqnode, update the projected database of the new eqnode * @l1 : the projected database for pattern A * @l2 : the projected database for pattern B * @ins : the new eqnode * @st1, end1 : the begin and end position in l1 * @st2, end2 : the begin and edn position in l2 */ void check_outs(vector<TreeMinerProjTrans> *l1, vector<TreeMinerProjTrans> *l2, Element *outs, int st1, int st2, int en1, int en2, vector<TreeMinerProjTrans>* output_tlist) { TreeMinerProjTrans *n1, *n2; ival_type cmpval; bool found_flg = false; bool induced, induced_flg = false; bool next2; int pos1 = st1; while (st2 < en2) { n1 = &(*l1)[pos1]; n2 = &(*l2)[st2]; cmpval = ival::compare(n1->itscope, n2->itscope); switch (cmpval) { case sup: break; //if they have no intersect, then valid case before: case after: if (mine_ordered && cmpval == after) break; //n1 is before n2. Check if n1.par is subset of or equal to n2.par if (n1->path_equal(*n2)) { if (n1->induced && n2->induced) induced = true; else induced = false; //construct new projected transaction and push it into the projected database of new child if (en1 - st1 > 1 || use_fullpath) { // project parent path into new projected transaction if(output_tlist) output_tlist->emplace_back(n2->tid, n1->parscope, n1->itscope.lb, n2->itscope, n2->depth, induced); else outs->add_list(n2->tid, n1->parscope, n1->itscope.lb, n2->itscope, n2->depth, induced); } else { if(output_tlist) output_tlist->emplace_back(n2->tid, n2->itscope, n2->depth, induced); else outs->add_list(n2->tid, n2->itscope, n2->depth, induced); } if (!count_unique) outs->sup++; found_flg = true; if (induced) induced_flg = true; } break; case sub: break; } if (pos1 + 1 == en1) { //go to next n2 ival pos1 = st1; st2++; } else pos1++; //go to next n1 ival } if (found_flg && count_unique) outs->sup++; if (induced_flg) outs->isup++; } //do "scope list join" to get "inside" node and "outside" node's projected database, which is mentioned in zaki's sleuth work. //l1 and l2 are two project database to join //f2 means this function is called by getF2() // the new generate projected database after join will store in ins->tlist and outs->tlist void get_intersect(vector<TreeMinerProjTrans> *l1, vector<TreeMinerProjTrans> *l2, Element *ins, Element *outs, bool f2 = false) { //f2 is true only when we compute F2, set to false for Fk TreeMinerProjTrans *n1, *n2; //in Sleuth paper Fig 3 Vertical Format, n1 points to a row of a rectangle vector<TransBlock> tbs; // store all projected transaction ready to join // join the projected instances of the same transaction //i1 and i2 is the position where tid match begins, e1 and e2 is the position where tid match ends. The join operator will do in [i1, e1] and [i2, e2] int i1 = 0, i2 = 0; // int e1, e2; while (i1 < l1->size() && i2 < l2->size()) { // join n1 = &(*l1)[i1]; n2 = &(*l2)[i2]; //look for matching tids if (n1->tid < n2->tid) i1++; else if (n1->tid > n2->tid) i2++; else { //cids match e1 = i1; e2 = i2; //check the cid end positions in it1 and it2 while (e1 < l1->size() && (*l1)[e1].tid == n1->tid) e1++; while (e2 < l2->size() && (*l2)[e2].tid == n2->tid) e2++; //generate projecetd transaction if candidate found if (ins) // child extension //we use + since the join is merge-like if((l1->size() + l2->size()) <= tauDB_omp) check_ins(l1, l2, ins, i1, i2, e1, e2, f2, 0); // in Sleuth paper Fig 3 Vertical Format, each range [i1, e1) denotes the rows for a transaction with some tid else tbs.emplace_back(i1, i2, e1, e2, true, f2); if (outs) // cousin extension //we use + since the join is merge-like if((l1->size() + l2->size()) <= tauDB_omp) check_outs(l1, l2, outs, i1, i2, e1, e2, 0); // in Sleuth paper Fig 3 Vertical Format, each range [i1, e1) denotes the rows for a transaction with some tid else tbs.emplace_back(i1, i2, e1, e2, false, f2); //restore index to end of cids i1 = e1; i2 = e2; } } //we use + since the join is merge-like if((l1->size() + l2->size()) > tauDB_omp){ //do parallel join vector<vector<TreeMinerProjTrans> > ins_tlistOfThread(THREADS); vector<vector<TreeMinerProjTrans> > outs_tlistOfThread(THREADS); #pragma omp parallel for schedule(dynamic, CHUNK) num_threads(THREADS) for(int i = 0; i < tbs.size(); i++){ TransBlock& cur = tbs[i]; int thread_id = omp_get_thread_num(); if(tbs[i].ins_flag) { check_ins(l1, l2, ins, cur.st1, cur.st2, cur.end1, cur.end2, cur.f2, &ins_tlistOfThread[thread_id]); } else { check_outs(l1, l2, outs, cur.st1, cur.st2, cur.end1, cur.end2, &outs_tlistOfThread[thread_id]); } } if(ins){ for(int i = 0; i < THREADS; i++){ vector<TreeMinerProjTrans>& tlist_i = ins_tlistOfThread[i]; if(ins->tlist.empty()) ins->tlist.swap(tlist_i); else{ ins->tlist.insert(ins->tlist.end(), tlist_i.begin(), tlist_i.end()); } } sort(ins->tlist.begin(), ins->tlist.end(), projTrans_cmp); } if(outs) { for(int i = 0; i < THREADS; i++){ vector<TreeMinerProjTrans>& tlist_i = outs_tlistOfThread[i]; if(outs->tlist.empty()) outs->tlist.swap(tlist_i); else{ outs->tlist.insert(outs->tlist.end(), tlist_i.begin(), tlist_i.end()); } } sort(outs->tlist.begin(), outs->tlist.end(), projTrans_cmp); } } } #endif
network.h
/* Copyright (c) 2014, Kai Klindworth All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef NEURAL_NETWORK_NETWORK_H #define NEURAL_NETWORK_NETWORK_H #include <memory> #include <cassert> #include <vector> #include <cmath> #include <limits> #include <algorithm> #include <iostream> #include <iterator> #include <sstream> #include <omp.h> #include <neural_network/blas_wrapper.h> #include <neural_network/layer.h> #include <neural_network/settings.h> namespace neural_network { template<typename T> class network { public: network(int in_dim) : in_dim(in_dim), out_dim(in_dim), is_trainable(false) { } network(int in_dim, int out_dim, const std::initializer_list<int>& list) : in_dim(in_dim), out_dim(in_dim), is_trainable(false) { bool dropout = false; for(int csize : list) { emplace_layer<fully_connected_layer>(csize); emplace_layer<relu_layer>(); if(dropout) emplace_layer<dropout_layer>(); } emplace_layer<fully_connected_layer>(out_dim); emplace_layer<softmax_output_layer>(); //emplace_layer<tanh_entropy_output_layer>(); } /** * Creates a new layer and passes the first two parameters of the layer's constructor on it's own (propagate down, input dimension) * E.g. if your layer has the following constructor layer_type(bool propagate_down, int input_dimension, int output_dimension) you have to call this function * emplace_layer<layer_type>(output_dimension) */ template<template<typename Ti> class layer_type, typename... args_type> void emplace_layer(args_type... args) { layers.push_back(std::make_shared<layer_type<T>>(is_trainable, out_dim, args...)); out_dim = layers.back()->output_dimension(); layers.back()->init_weights(); is_trainable |= layers.back()->trainable(); } /** * @brief test Tests the neural network. It measures, how many sample classes will be correctly predicted * @param data Samples * @param gt Ground truth label of the samples. Therefore it must have the same size as the data vector * @return Percentual amount of correctly predicted sample classes */ float test(const std::vector<std::vector<T>>& data, const std::vector<short>& gt) { assert(data.size() == gt.size()); int correct = 0; int approx_correct = 0; int approx_correct2 = 0; for(std::size_t i = 0; i < data.size(); ++i) { int result = this->predict(data[i].data()); int expected = std::abs(gt[i]); if(result == expected) ++correct; if(std::abs(result - expected) < 5) ++approx_correct; if(std::abs(result - expected) < 10) ++approx_correct2; } //std::cout << "result: " << (float)correct/data.size() << ", approx5: " << (float)approx_correct/data.size() << ", approx10: " << (float)approx_correct2/data.size() << std::endl; return static_cast<float>(correct)/data.size(); } std::vector<T> output(const T* data) { forward_propagation(data); std::vector<T> result(out_dim); const T* output_data = layers.back()->output(); std::copy(output_data, output_data + out_dim, result.begin()); return result; } /** * @brief predict Runs the prediction for one sample and returns it's class label * @param data Sample * @return Class label */ int predict(const T* data) { forward_propagation(data); const T* output_data = layers.back()->output(); int idx = -1; T max_output = -std::numeric_limits<T>::max(); for(int i = 0; i < out_dim; ++i) { if(output_data[i] > max_output) { max_output = output_data[i]; idx = i; } } return idx; } int predict(const std::vector<T>& data) { assert(static_cast<int>(data.size()) == layers.front()->input_dimension()); return this->predict(data.data()); } void update_weights(int batch_size) { for(auto& clayer : layers) clayer->update_weights(batch_size); } void training_sample(const T* data, const T* gt) { assert(!layers.empty()); forward_propagation(data); backward_propagation(data, gt); //update_weights(); } void training_sample(const T* data, short gt_idx) { assert(gt_idx >= 0 && gt_idx < out_dim); if(gt_idx < 0 || gt_idx >= out_dim) throw std::runtime_error("invalid ground truth value: index: " + std::to_string(gt_idx) + ", outdim: " + std::to_string(out_dim)); std::vector<T> gt(out_dim, 0.0); //for softmax //std::vector<T> gt(out_dim, -1.0); //for tanh gt[gt_idx] = 1.0; training_sample(data, gt.data()); } void end_batch(int batch_size) { update_weights(batch_size); } void training_single_epoch(const std::vector<std::vector<T>>& data, const std::vector<short>& gt, std::size_t batch_size) { for(auto& clayer : layers) clayer->set_phase(layer_base<T>::phase::Training); assert(data.size() == gt.size()); std::size_t batch_count = std::ceil(static_cast<float>(data.size()) / batch_size); for(std::size_t i = 0; i < batch_count; ++i) { std::size_t offset = i*batch_size; std::size_t bound = std::min(offset+batch_size, data.size()); //std::cout << "length: " << bound - offset << std::endl; #pragma omp parallel for for(std::size_t j = offset; j < bound; ++j) { //std::cout << "offset: " << offset << ", j: " << j << std::endl; training_sample(data[j].data(), std::abs(gt[j])); } end_batch(batch_size); //std::cout << "i: " << i << std::endl; } for(auto& clayer : layers) clayer->set_phase(layer_base<T>::phase::Testing); } void training(const std::vector<std::vector<T>>& data, const std::vector<short>& gt, training_settings settings, bool reset_weights = true) { if(reset_weights) this->reset_weights(); for(std::size_t i = 0; i < settings.epochs; ++i) { std::cout << "epoch: " << i << std::endl; training_single_epoch(data, gt, settings.batch_size); if(settings.training_error_calculation != 0) { if(i % settings.training_error_calculation == 0) { float res = test(data, gt); std::cout << "res: " << res << std::endl; if(i > 7 && res < 0.10) { this->reset_weights(); i = 0; } } } } } void multi_training(const std::vector<std::vector<T>>& data, const std::vector<short>& gt, std::size_t batch_size, std::size_t epochs, std::size_t training_error_calculation) { float best_res = 0; std::stringstream weightstream; for(int j = 0; j < 7; ++j) { float old_res = 0; int nothing_learned = 0; this->reset_weights(); for(std::size_t i = 0; i < epochs; ++i) { training_single_epoch(data, gt, batch_size); if(training_error_calculation != 0) { if(i % training_error_calculation == 0) { //std::cout << "epoch: " << i << std::endl; float res = test(data, gt); if(res - old_res < 0.01) nothing_learned++; if(nothing_learned >= 4) break; else old_res = res; } } } float res = test(data, gt); if(res > best_res) { weightstream.str(""); this->save_weights(weightstream); best_res = res; } } this->load_weights(weightstream); std::cout << "----------------final---------------------" << std::endl; std::cout << "result: " << test(data, gt) << std::endl; } void forward_propagation(const T* bottom_data) { // std::cout << "forward" << std::endl; //execute layers const T* current_out = bottom_data; for(auto& clayer : layers) { clayer->forward_propagation(current_out); current_out = clayer->output(); } } void backward_propagation(const T* bottom_data, const T* top_gradient) { const T* cgradient = top_gradient; const T* cdata = bottom_data; for(int i = layers.size() - 1; i >= 0; --i) { if(i == 0) cdata = bottom_data; else cdata = layers[i-1]->output(); layers[i]->backward_propagation(cdata, cgradient); cgradient = layers[i]->gradient(); } } void save_weights(std::ostream& stream) const { stream.precision(17); for(const auto& clayer : layers) clayer->save_weights(stream); } void load_weights(std::istream& stream) { for(const auto& clayer : layers) clayer->load_weights(stream); } void load_weights(std::istream &stream, int up_to_layer) { assert(up_to_layer < layers.size() && up_to_layer); for(int i = 0; i <= up_to_layer; ++i) layers[i]->load_weights(stream); } void reset_weights() { for(const auto& clayer : layers) clayer->init_weights(); } int output_dimension() const { return out_dim; } int input_dimension() const { return in_dim; } std::vector<std::shared_ptr<layer_base<T>>> layers; private: int in_dim, out_dim; bool is_trainable; }; } template<typename T> std::ostream& operator<<(std::ostream& stream, const neural_network::network<T>& net) { net.save_weights(stream); return stream; } template<typename T> std::istream& operator>>(std::istream& stream, neural_network::network<T>& net) { net.load_weights(stream); return stream; } #endif // NEURAL_NETWORK_NETWORK_H
header.h
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB SP code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// //--------------------------------------------------------------------- // The following include file is generated automatically by the // "setparams" utility. It defines // problem_size: 12, 64, 102, 162 (for class T, A, B, C) // dt_default: default time step for this problem size if no // config file // niter_default: default number of iterations for this problem size //--------------------------------------------------------------------- #include "npbparams.h" #include "type.h" #include "timers.h" #include "../my_include/my_include.h" /* common /global/ */ extern int grid_points[3], nx2, ny2, nz2; extern logical timeron; /* common /constants/ */ extern double tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3, dx1, dx2, dx3, dx4, dx5, dy1, dy2, dy3, dy4, dy5, dz1, dz2, dz3, dz4, dz5, dssp, dt, ce[5][13], dxmax, dymax, dzmax, xxcon1, xxcon2, xxcon3, xxcon4, xxcon5, dx1tx1, dx2tx1, dx3tx1, dx4tx1, dx5tx1, yycon1, yycon2, yycon3, yycon4, yycon5, dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1, zzcon1, zzcon2, zzcon3, zzcon4, zzcon5, dz1tz1, dz2tz1, dz3tz1, dz4tz1, dz5tz1, dnxm1, dnym1, dnzm1, c1c2, c1c5, c3c4, c1345, conz1, c1, c2, c3, c4, c5, c4dssp, c5dssp, dtdssp, dttx1, bt, dttx2, dtty1, dtty2, dttz1, dttz2, c2dttx1, c2dtty1, c2dttz1, comz1, comz4, comz5, comz6, c3c4tx3, c3c4ty3, c3c4tz3, c2iv, con43, con16; #define IMAX PROBLEM_SIZE #define JMAX PROBLEM_SIZE #define KMAX PROBLEM_SIZE #define IMAXP (IMAX/2*2) #define JMAXP (JMAX/2*2) //--------------------------------------------------------------------- // To improve cache performance, first two dimensions padded by 1 // for even number sizes only //--------------------------------------------------------------------- /* common /fields/ */ extern double u [KMAX][JMAXP+1][IMAXP+1][5]; extern double us [KMAX][JMAXP+1][IMAXP+1]; extern double vs [KMAX][JMAXP+1][IMAXP+1]; extern double ws [KMAX][JMAXP+1][IMAXP+1]; extern double qs [KMAX][JMAXP+1][IMAXP+1]; extern double rho_i [KMAX][JMAXP+1][IMAXP+1]; extern double speed [KMAX][JMAXP+1][IMAXP+1]; extern double square [KMAX][JMAXP+1][IMAXP+1]; extern double rhs [KMAX][JMAXP+1][IMAXP+1][5]; extern double forcing[KMAX][JMAXP+1][IMAXP+1][5]; /* common /work_1d/ */ extern double cv [PROBLEM_SIZE]; extern double rhon[PROBLEM_SIZE]; extern double rhos[PROBLEM_SIZE]; extern double rhoq[PROBLEM_SIZE]; extern double cuf [PROBLEM_SIZE]; extern double q [PROBLEM_SIZE]; extern double ue [PROBLEM_SIZE][5]; extern double buf[PROBLEM_SIZE][5]; #pragma omp threadprivate(cv,rhon,rhos,rhoq,cuf,q,ue,buf) /* common /work_lhs/ */ extern double lhs [IMAXP+1][IMAXP+1][5]; extern double lhsp[IMAXP+1][IMAXP+1][5]; extern double lhsm[IMAXP+1][IMAXP+1][5]; #pragma omp threadprivate(lhs,lhsp,lhsm) //kai extern int k1,k2,k3,k4,k5,k6,k7,k8,k9,k10, k11, k12, k13, k14, k15, k16; //----------------------------------------------------------------------- // Timer constants //----------------------------------------------------------------------- #define t_total 1 #define t_rhsx 2 #define t_rhsy 3 #define t_rhsz 4 #define t_rhs 5 #define t_xsolve 6 #define t_ysolve 7 #define t_zsolve 8 #define t_rdis1 9 #define t_rdis2 10 #define t_txinvr 11 #define t_pinvr 12 #define t_ninvr 13 #define t_tzetar 14 #define t_add 15 #define t_flush 16 #define t_last 16 //----------------------------------------------------------------------- void initialize(); void lhsinit(int ni, int nj); void lhsinitj(int nj, int ni); void exact_solution(double xi, double eta, double zeta, double dtemp[5]); void exact_rhs(); void set_constants(); void adi(); void compute_rhs(); void x_solve(); void ninvr(); void y_solve(); void pinvr(); void z_solve(); void tzetar(); void add(); void txinvr(); void error_norm(double rms[5]); void rhs_norm(double rms[5]); void verify(int no_time_steps, char *Class, logical *verified);
cache.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC AAA CCCC H H EEEEE % % C A A C H H E % % C AAAAA C HHHHH EEE % % C A A C H H E % % CCCC A A CCCC H H EEEEE % % % % % % MagickCore Pixel Cache Methods % % % % Software Design % % Cristy % % July 1999 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distribute-cache-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/quantum.h" #include "MagickCore/random_.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #if defined(MAGICKCORE_ZLIB_DELEGATE) #include "zlib.h" #endif /* Define declarations. */ #define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent) #define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \ GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse) /* Typedef declarations. */ typedef struct _MagickModulo { ssize_t quotient, remainder; } MagickModulo; /* Forward declarations. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static Cache GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *) magick_hot_spot; static const Quantum *GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t, const ssize_t,const size_t,const size_t,ExceptionInfo *), *GetVirtualPixelsCache(const Image *); static const void *GetVirtualMetacontentFromCache(const Image *); static MagickBooleanType GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *, ExceptionInfo *), GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod, const ssize_t,const ssize_t,Quantum *,ExceptionInfo *), OpenPixelCache(Image *,const MapMode,ExceptionInfo *), OpenPixelCacheOnDisk(CacheInfo *,const MapMode), ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), ReadPixelCacheMetacontent(CacheInfo *magick_restrict, NexusInfo *magick_restrict,ExceptionInfo *), SyncAuthenticPixelsCache(Image *,ExceptionInfo *), WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict, ExceptionInfo *); static Quantum *GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *SetPixelCacheNexusPixels(const CacheInfo *,const MapMode, const RectangleInfo *,NexusInfo *,ExceptionInfo *) magick_hot_spot; #if defined(MAGICKCORE_OPENCL_SUPPORT) static void CopyOpenCLBuffer(CacheInfo *magick_restrict); #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif /* Global declarations. */ static SemaphoreInfo *cache_semaphore = (SemaphoreInfo *) NULL; static ssize_t cache_anonymous_memory = (-1); static time_t cache_epoch = 0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCache() acquires a pixel cache. % % The format of the AcquirePixelCache() method is: % % Cache AcquirePixelCache(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate Cache AcquirePixelCache(const size_t number_threads) { CacheInfo *magick_restrict cache_info; char *value; cache_info=(CacheInfo *) AcquireCriticalMemory(sizeof(*cache_info)); (void) ResetMagickMemory(cache_info,0,sizeof(*cache_info)); cache_info->type=UndefinedCache; cache_info->mode=IOMode; cache_info->colorspace=sRGBColorspace; cache_info->file=(-1); cache_info->id=GetMagickThreadId(); cache_info->number_threads=number_threads; if (GetOpenMPMaximumThreads() > cache_info->number_threads) cache_info->number_threads=GetOpenMPMaximumThreads(); if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads) cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); if (cache_info->number_threads == 0) cache_info->number_threads=1; cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads); if (cache_info->nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); value=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } value=GetPolicyValue("cache:synchronize"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } cache_info->semaphore=AcquireSemaphoreInfo(); cache_info->reference_count=1; cache_info->file_semaphore=AcquireSemaphoreInfo(); cache_info->debug=IsEventLogging(); cache_info->signature=MagickCoreSignature; return((Cache ) cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCacheNexus() allocates the NexusInfo structure. % % The format of the AcquirePixelCacheNexus method is: % % NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) { NexusInfo **magick_restrict nexus_info; register ssize_t i; nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory( number_threads,sizeof(*nexus_info))); if (nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); nexus_info[0]=(NexusInfo *) AcquireQuantumMemory(number_threads, sizeof(**nexus_info)); if (nexus_info[0] == (NexusInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(nexus_info[0],0,number_threads*sizeof(**nexus_info)); for (i=0; i < (ssize_t) number_threads; i++) { nexus_info[i]=(&nexus_info[0][i]); nexus_info[i]->signature=MagickCoreSignature; } return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCachePixels() returns the pixels associated with the specified % image. % % The format of the AcquirePixelCachePixels() method is: % % const void *AcquirePixelCachePixels(const Image *image, % MagickSizeType *length,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate const void *AcquirePixelCachePixels(const Image *image, MagickSizeType *length,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((const void *) NULL); *length=cache_info->length; return((const void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentGenesis() instantiates the cache component. % % The format of the CacheComponentGenesis method is: % % MagickBooleanType CacheComponentGenesis(void) % */ MagickPrivate MagickBooleanType CacheComponentGenesis(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) cache_semaphore=AcquireSemaphoreInfo(); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentTerminus() destroys the cache component. % % The format of the CacheComponentTerminus() method is: % % CacheComponentTerminus(void) % */ MagickPrivate void CacheComponentTerminus(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&cache_semaphore); /* no op-- nothing to destroy */ RelinquishSemaphoreInfo(&cache_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCache() clones a pixel cache. % % The format of the ClonePixelCache() method is: % % Cache ClonePixelCache(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate Cache ClonePixelCache(const Cache cache) { CacheInfo *magick_restrict clone_info; const CacheInfo *magick_restrict cache_info; assert(cache != NULL); cache_info=(const CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads); clone_info->virtual_pixel_method=cache_info->virtual_pixel_method; return((Cache ) clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheMethods() clones the pixel cache methods from one cache to % another. % % The format of the ClonePixelCacheMethods() method is: % % void ClonePixelCacheMethods(Cache clone,const Cache cache) % % A description of each parameter follows: % % o clone: Specifies a pointer to a Cache structure. % % o cache: the pixel cache. % */ MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache) { CacheInfo *magick_restrict cache_info, *magick_restrict source_info; assert(clone != (Cache) NULL); source_info=(CacheInfo *) clone; assert(source_info->signature == MagickCoreSignature); if (source_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", source_info->filename); assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); source_info->methods=cache_info->methods; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e R e p o s i t o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheRepository() clones the source pixel cache to the destination % cache. % % The format of the ClonePixelCacheRepository() method is: % % MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info, % CacheInfo *source_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o source_info: the source pixel cache. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClonePixelCacheOnDisk( CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info) { MagickSizeType extent; size_t quantum; ssize_t count; struct stat file_stats; unsigned char *buffer; /* Clone pixel cache on disk with identical morphology. */ if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) || (OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse)) return(MagickFalse); quantum=(size_t) MagickMaxBufferExtent; if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0)) quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent); buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer)); if (buffer == (unsigned char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); extent=0; while ((count=read(cache_info->file,buffer,quantum)) > 0) { ssize_t number_bytes; number_bytes=write(clone_info->file,buffer,(size_t) count); if (number_bytes != count) break; extent+=number_bytes; } buffer=(unsigned char *) RelinquishMagickMemory(buffer); if (extent != cache_info->length) return(MagickFalse); return(MagickTrue); } static MagickBooleanType ClonePixelCacheRepository( CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info, ExceptionInfo *exception) { #define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource)) #define cache_number_threads(source,destination,chunk,multithreaded) \ num_threads((multithreaded) == 0 ? 1 : \ (((source)->type != MemoryCache) && \ ((source)->type != MapCache)) || \ (((destination)->type != MemoryCache) && \ ((destination)->type != MapCache)) ? \ MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \ MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1)) MagickBooleanType optimize, status; NexusInfo **magick_restrict cache_nexus, **magick_restrict clone_nexus; size_t length; ssize_t y; assert(cache_info != (CacheInfo *) NULL); assert(clone_info != (CacheInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); if (cache_info->type == PingCache) return(MagickTrue); length=cache_info->number_channels*sizeof(*cache_info->channel_map); if ((cache_info->columns == clone_info->columns) && (cache_info->rows == clone_info->rows) && (cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) && (cache_info->metacontent_extent == clone_info->metacontent_extent)) { /* Identical pixel cache morphology. */ if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && ((clone_info->type == MemoryCache) || (clone_info->type == MapCache))) { (void) memcpy(clone_info->pixels,cache_info->pixels, cache_info->number_channels*cache_info->columns*cache_info->rows* sizeof(*cache_info->pixels)); if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) (void) memcpy(clone_info->metacontent,cache_info->metacontent, cache_info->columns*cache_info->rows* clone_info->metacontent_extent*sizeof(unsigned char)); return(MagickTrue); } if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache)) return(ClonePixelCacheOnDisk(cache_info,clone_info)); } /* Mismatched pixel cache morphology. */ cache_nexus=AcquirePixelCacheNexus(MaxCacheThreads); clone_nexus=AcquirePixelCacheNexus(MaxCacheThreads); if ((cache_nexus == (NexusInfo **) NULL) || (clone_nexus == (NexusInfo **) NULL)) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); length=cache_info->number_channels*sizeof(*cache_info->channel_map); optimize=(cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ? MagickTrue : MagickFalse; length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns, clone_info->number_channels*clone_info->columns); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; RectangleInfo region; register ssize_t x; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; region.width=cache_info->columns; region.height=1; region.x=0; region.y=y; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region, cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; region.width=clone_info->columns; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,&region, clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; (void) ResetMagickMemory(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length); if (optimize != MagickFalse) (void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length* sizeof(Quantum)); else { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; /* Mismatched pixel channel map. */ p=cache_nexus[id]->pixels; q=clone_nexus[id]->pixels; for (x=0; x < (ssize_t) cache_info->columns; x++) { register ssize_t i; if (x == (ssize_t) clone_info->columns) break; for (i=0; i < (ssize_t) clone_info->number_channels; i++) { PixelChannel channel; PixelTrait traits; channel=clone_info->channel_map[i].channel; traits=cache_info->channel_map[channel].traits; if (traits != UndefinedPixelTrait) *q=*(p+cache_info->channel_map[channel].offset); q++; } p+=cache_info->number_channels; } } status=WritePixelCachePixels(clone_info,clone_nexus[id],exception); } if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) { /* Clone metacontent. */ length=(size_t) MagickMin(cache_info->metacontent_extent, clone_info->metacontent_extent); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; RectangleInfo region; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; region.width=cache_info->columns; region.height=1; region.x=0; region.y=y; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region, cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; region.width=clone_info->columns; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,&region, clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; if ((clone_nexus[id]->metacontent != (void *) NULL) && (cache_nexus[id]->metacontent != (void *) NULL)) (void) memcpy(clone_nexus[id]->metacontent, cache_nexus[id]->metacontent,length*sizeof(unsigned char)); status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception); } } cache_nexus=DestroyPixelCacheNexus(cache_nexus,MaxCacheThreads); clone_nexus=DestroyPixelCacheNexus(clone_nexus,MaxCacheThreads); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"%s => %s", CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type), CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type)); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixelCache() method is: % % void DestroyImagePixelCache(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void DestroyImagePixelCache(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->cache == (void *) NULL) return; image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixels() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixels() method is: % % void DestroyImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImagePixels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL) { cache_info->methods.destroy_pixel_handler(image); return; } image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyPixelCache() method is: % % Cache DestroyPixelCache(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info) { int status; status=(-1); if (cache_info->file != -1) { status=close(cache_info->file); cache_info->file=(-1); RelinquishMagickResource(FileResource,1); } return(status == -1 ? MagickFalse : MagickTrue); } static inline void RelinquishPixelCachePixels(CacheInfo *cache_info) { switch (cache_info->type) { case MemoryCache: { #if defined(MAGICKCORE_OPENCL_SUPPORT) if (cache_info->opencl != (MagickCLCacheInfo) NULL) { cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl, MagickTrue); cache_info->pixels=(Quantum *) NULL; break; } #endif if (cache_info->mapped == MagickFalse) cache_info->pixels=(Quantum *) RelinquishAlignedMemory( cache_info->pixels); else (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); RelinquishMagickResource(MemoryResource,cache_info->length); break; } case MapCache: { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(Quantum *) NULL; if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(MapResource,cache_info->length); } case DiskCache: { if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(DiskResource,cache_info->length); break; } case DistributedCache: { *cache_info->cache_filename='\0'; (void) RelinquishDistributePixelCache((DistributeCacheInfo *) cache_info->server_info); break; } default: break; } cache_info->type=UndefinedCache; cache_info->mapped=MagickFalse; cache_info->metacontent=(void *) NULL; } MagickPrivate Cache DestroyPixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count--; if (cache_info->reference_count != 0) { UnlockSemaphoreInfo(cache_info->semaphore); return((Cache) NULL); } UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"destroy %s", cache_info->filename); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } RelinquishPixelCachePixels(cache_info); if (cache_info->server_info != (DistributeCacheInfo *) NULL) cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *) cache_info->server_info); if (cache_info->nexus_info != (NexusInfo **) NULL) cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info, cache_info->number_threads); if (cache_info->random_info != (RandomInfo *) NULL) cache_info->random_info=DestroyRandomInfo(cache_info->random_info); if (cache_info->file_semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->file_semaphore); if (cache_info->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->semaphore); cache_info->signature=(~MagickCoreSignature); cache_info=(CacheInfo *) RelinquishMagickMemory(cache_info); cache=(Cache) NULL; return(cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCacheNexus() destroys a pixel cache nexus. % % The format of the DestroyPixelCacheNexus() method is: % % NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info, % const size_t number_threads) % % A description of each parameter follows: % % o nexus_info: the nexus to destroy. % % o number_threads: the number of nexus threads. % */ static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info) { if (nexus_info->mapped == MagickFalse) (void) RelinquishAlignedMemory(nexus_info->cache); else (void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length); nexus_info->cache=(Quantum *) NULL; nexus_info->pixels=(Quantum *) NULL; nexus_info->metacontent=(void *) NULL; nexus_info->length=0; nexus_info->mapped=MagickFalse; } MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info, const size_t number_threads) { register ssize_t i; assert(nexus_info != (NexusInfo **) NULL); for (i=0; i < (ssize_t) number_threads; i++) { if (nexus_info[i]->cache != (Quantum *) NULL) RelinquishCacheNexusPixels(nexus_info[i]); nexus_info[i]->signature=(~MagickCoreSignature); } nexus_info[0]=(NexusInfo *) RelinquishMagickMemory(nexus_info[0]); nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info); return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontent() returns the authentic metacontent corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the associated pixels are not available. % % The format of the GetAuthenticMetacontent() method is: % % void *GetAuthenticMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void *GetAuthenticMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) { void *metacontent; metacontent=cache_info->methods. get_authentic_metacontent_from_handler(image); return(metacontent); } assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontentFromCache() returns the meta-content corresponding % with the last call to QueueAuthenticPixelsCache() or % GetAuthenticPixelsCache(). % % The format of the GetAuthenticMetacontentFromCache() method is: % % void *GetAuthenticMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void *GetAuthenticMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL % operations. % % The format of the GetAuthenticOpenCLBuffer() method is: % % cl_mem GetAuthenticOpenCLBuffer(const Image *image, % MagickCLDevice device,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o device: the device to use. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image, MagickCLDevice device,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(device != (const MagickCLDevice) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info->type == UndefinedCache) SyncImagePixelCache((Image *) image,exception); if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse)) return((cl_mem) NULL); LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->opencl != (MagickCLCacheInfo) NULL) && (cache_info->opencl->device->context != device->context)) cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); if (cache_info->opencl == (MagickCLCacheInfo) NULL) { assert(cache_info->pixels != (Quantum *) NULL); cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels, cache_info->length); } if (cache_info->opencl != (MagickCLCacheInfo) NULL) RetainOpenCLMemObject(cache_info->opencl->buffer); UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->opencl == (MagickCLCacheInfo) NULL) return((cl_mem) NULL); assert(cache_info->opencl->pixels == cache_info->pixels); return(cache_info->opencl->buffer); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or % disk pixel cache as defined by the geometry parameters. A pointer to the % pixels is returned if the pixels are transferred, otherwise a NULL is % returned. % % The format of the GetAuthenticPixelCacheNexus() method is: % % Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to return. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict pixels; /* Transfer pixels from the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue, nexus_info,exception); if (pixels == (Quantum *) NULL) return((Quantum *) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); if (cache_info->metacontent_extent != 0) if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsFromCache() returns the pixels associated with the last % call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods. % % The format of the GetAuthenticPixelsFromCache() method is: % % Quantum *GetAuthenticPixelsFromCache(const Image image) % % A description of each parameter follows: % % o image: the image. % */ static Quantum *GetAuthenticPixelsFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelQueue() returns the authentic pixels associated % corresponding with the last call to QueueAuthenticPixels() or % GetAuthenticPixels(). % % The format of the GetAuthenticPixelQueue() method is: % % Quantum *GetAuthenticPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Quantum *GetAuthenticPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) return(cache_info->methods.get_authentic_pixels_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a Quantum array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in memory, or in a memory-mapped file. The returned pointer % must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image has corresponding metacontent,call % GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the % meta-content corresponding to the region. Once the Quantum array has % been updated, the changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the GetAuthenticPixels() method is: % % Quantum *GetAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns, rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetAuthenticPixelsCache() method is: % % Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtent() returns the extent of the pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetImageExtent() method is: % % MagickSizeType GetImageExtent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickSizeType GetImageExtent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCache() ensures that there is only a single reference to the % pixel cache to be modified, updating the provided cache pointer to point to % a clone of the original pixel cache if necessary. % % The format of the GetImagePixelCache method is: % % Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o clone: any value other than MagickFalse clones the cache pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType ValidatePixelCacheMorphology( const Image *magick_restrict image) { const CacheInfo *magick_restrict cache_info; const PixelChannelMap *magick_restrict p, *magick_restrict q; /* Does the image match the pixel cache morphology? */ cache_info=(CacheInfo *) image->cache; p=image->channel_map; q=cache_info->channel_map; if ((image->storage_class != cache_info->storage_class) || (image->colorspace != cache_info->colorspace) || (image->alpha_trait != cache_info->alpha_trait) || (image->read_mask != cache_info->read_mask) || (image->write_mask != cache_info->write_mask) || (image->columns != cache_info->columns) || (image->rows != cache_info->rows) || (image->number_channels != cache_info->number_channels) || (memcmp(p,q,image->number_channels*sizeof(*p)) != 0) || (image->metacontent_extent != cache_info->metacontent_extent) || (cache_info->nexus_info == (NexusInfo **) NULL)) return(MagickFalse); return(MagickTrue); } static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType destroy, status; static MagickSizeType cache_timelimit = MagickResourceInfinity, cpu_throttle = MagickResourceInfinity, cycles = 0; status=MagickTrue; if (cpu_throttle == MagickResourceInfinity) cpu_throttle=GetMagickResourceLimit(ThrottleResource); if ((cpu_throttle != 0) && ((cycles++ % 32) == 0)) MagickDelay(cpu_throttle); if (cache_epoch == 0) { /* Set the expire time in seconds. */ cache_timelimit=GetMagickResourceLimit(TimeResource); cache_epoch=time((time_t *) NULL); } if ((cache_timelimit != MagickResourceInfinity) && ((MagickSizeType) (time((time_t *) NULL)-cache_epoch) >= cache_timelimit)) { #if defined(ECANCELED) errno=ECANCELED; #endif ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded"); } LockSemaphoreInfo(image->semaphore); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif destroy=MagickFalse; if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { CacheInfo *clone_info; Image clone_image; /* Clone pixel cache. */ clone_image=(*image); clone_image.semaphore=AcquireSemaphoreInfo(); clone_image.reference_count=1; clone_image.cache=ClonePixelCache(cache_info); clone_info=(CacheInfo *) clone_image.cache; status=OpenPixelCache(&clone_image,IOMode,exception); if (status != MagickFalse) { if (clone != MagickFalse) status=ClonePixelCacheRepository(clone_info,cache_info, exception); if (status != MagickFalse) { destroy=MagickTrue; image->cache=clone_image.cache; } } RelinquishSemaphoreInfo(&clone_image.semaphore); } UnlockSemaphoreInfo(cache_info->semaphore); } if (destroy != MagickFalse) cache_info=(CacheInfo *) DestroyPixelCache(cache_info); if (status != MagickFalse) { /* Ensure the image matches the pixel cache morphology. */ image->type=UndefinedType; if (ValidatePixelCacheMorphology(image) == MagickFalse) { status=OpenPixelCache(image,IOMode,exception); cache_info=(CacheInfo *) image->cache; if (cache_info->type == DiskCache) (void) ClosePixelCacheOnDisk(cache_info); } } UnlockSemaphoreInfo(image->semaphore); if (status == MagickFalse) return((Cache) NULL); return(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCacheType() returns the pixel cache type: UndefinedCache, % DiskCache, MemoryCache, MapCache, or PingCache. % % The format of the GetImagePixelCacheType() method is: % % CacheType GetImagePixelCacheType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheType GetImagePixelCacheType(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e A u t h e n t i c P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixel() method is: % % MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType CopyPixel(const Image *image, const Quantum *source,Quantum *destination) { register ssize_t i; if (source == (const Quantum *) NULL) { destination[RedPixelChannel]=ClampToQuantum(image->background_color.red); destination[GreenPixelChannel]=ClampToQuantum( image->background_color.green); destination[BluePixelChannel]=ClampToQuantum( image->background_color.blue); destination[BlackPixelChannel]=ClampToQuantum( image->background_color.black); destination[AlphaPixelChannel]=ClampToQuantum( image->background_color.alpha); return(MagickFalse); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); destination[channel]=source[i]; } return(MagickTrue); } MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; register Quantum *magick_restrict q; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y, pixel,exception)); q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e A u t h e n t i c P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixelFromCache() method is: % % MagickBooleanType GetOneAuthenticPixelFromCache(const Image image, % const ssize_t x,const ssize_t y,Quantum *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id], exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixel() returns a single virtual pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixel() method is: % % MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, GetPixelCacheVirtualMethod(image),x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e V i r t u a l P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelFromCache() returns a single virtual pixel at the % specified (x,y) location. The image background color is returned if an % error occurs. % % The format of the GetOneVirtualPixelFromCache() method is: % % MagickBooleanType GetOneVirtualPixelFromCache(const Image image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixelInfo() method is: % % MagickBooleanType GetOneVirtualPixelInfo(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,PixelInfo *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: these values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelInfo *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); GetPixelInfo(image,pixel); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (p == (const Quantum *) NULL) return(MagickFalse); GetPixelInfoPixel(image,p,pixel); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheColorspace() returns the class type of the pixel cache. % % The format of the GetPixelCacheColorspace() method is: % % Colorspace GetPixelCacheColorspace(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheFilename() returns the filename associated with the pixel % cache. % % The format of the GetPixelCacheFilename() method is: % % const char *GetPixelCacheFilename(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const char *GetPixelCacheFilename(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->cache_filename); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheMethods() initializes the CacheMethods structure. % % The format of the GetPixelCacheMethods() method is: % % void GetPixelCacheMethods(CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods) { assert(cache_methods != (CacheMethods *) NULL); (void) ResetMagickMemory(cache_methods,0,sizeof(*cache_methods)); cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache; cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache; cache_methods->get_virtual_metacontent_from_handler= GetVirtualMetacontentFromCache; cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache; cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache; cache_methods->get_authentic_metacontent_from_handler= GetAuthenticMetacontentFromCache; cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache; cache_methods->get_one_authentic_pixel_from_handler= GetOneAuthenticPixelFromCache; cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache; cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache; cache_methods->destroy_pixel_handler=DestroyImagePixelCache; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e N e x u s E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheNexusExtent() returns the extent of the pixels associated % corresponding with the last call to SetPixelCacheNexusPixels() or % GetPixelCacheNexusPixels(). % % The format of the GetPixelCacheNexusExtent() method is: % % MagickSizeType GetPixelCacheNexusExtent(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o nexus_info: the nexus info. % */ MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; MagickSizeType extent; assert(cache != NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height; if (extent == 0) return((MagickSizeType) cache_info->columns*cache_info->rows); return(extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCachePixels() returns the pixels associated with the specified image. % % The format of the GetPixelCachePixels() method is: % % void *GetPixelCachePixels(Image *image,MagickSizeType *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); assert(length != (MagickSizeType *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=cache_info->length; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); return((void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheStorageClass() returns the class type of the pixel cache. % % The format of the GetPixelCacheStorageClass() method is: % % ClassType GetPixelCacheStorageClass(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->storage_class); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e T i l e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheTileSize() returns the pixel cache tile size. % % The format of the GetPixelCacheTileSize() method is: % % void GetPixelCacheTileSize(const Image *image,size_t *width, % size_t *height) % % A description of each parameter follows: % % o image: the image. % % o width: the optimized cache tile width in pixels. % % o height: the optimized cache tile height in pixels. % */ MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width, size_t *height) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *width=2048UL/(cache_info->number_channels*sizeof(Quantum)); if (GetImagePixelCacheType(image) == DiskCache) *width=8192UL/(cache_info->number_channels*sizeof(Quantum)); *height=(*width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the % pixel cache. A virtual pixel is any pixel access that is outside the % boundaries of the image cache. % % The format of the GetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->virtual_pixel_method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromCache() returns the meta-content corresponding with % the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualMetacontentFromCache() method is: % % void *GetVirtualMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const void *GetVirtualMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromNexus() returns the meta-content for the specified % cache nexus. % % The format of the GetVirtualMetacontentFromNexus() method is: % % const void *GetVirtualMetacontentFromNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the meta-content. % */ MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((void *) NULL); return(nexus_info->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontent() returns the virtual metacontent corresponding with % the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the meta-content are not available. % % The format of the GetVirtualMetacontent() method is: % % const void *GetVirtualMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const void *GetVirtualMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image); if (metacontent != (void *) NULL) return(metacontent); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsFromNexus() gets virtual pixels from the in-memory or disk % pixel cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelsFromNexus() method is: % % Quantum *GetVirtualPixelsFromNexus(const Image *image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to acquire. % % o exception: return any errors or warnings in this structure. % */ static ssize_t DitherMatrix[64] = { 0, 48, 12, 60, 3, 51, 15, 63, 32, 16, 44, 28, 35, 19, 47, 31, 8, 56, 4, 52, 11, 59, 7, 55, 40, 24, 36, 20, 43, 27, 39, 23, 2, 50, 14, 62, 1, 49, 13, 61, 34, 18, 46, 30, 33, 17, 45, 29, 10, 58, 6, 54, 9, 57, 5, 53, 42, 26, 38, 22, 41, 25, 37, 21 }; static inline ssize_t DitherX(const ssize_t x,const size_t columns) { ssize_t index; index=x+DitherMatrix[x & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) columns) return((ssize_t) columns-1L); return(index); } static inline ssize_t DitherY(const ssize_t y,const size_t rows) { ssize_t index; index=y+DitherMatrix[y & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) rows) return((ssize_t) rows-1L); return(index); } static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns) { return((ssize_t) (columns*GetPseudoRandomValue(random_info))); } static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows) { return((ssize_t) (rows*GetPseudoRandomValue(random_info))); } static inline MagickModulo VirtualPixelModulo(const ssize_t offset, const size_t extent) { MagickModulo modulo; /* Compute the remainder of dividing offset by extent. It returns not only the quotient (tile the offset falls in) but also the positive remainer within that tile such that 0 <= remainder < extent. This method is essentially a ldiv() using a floored modulo division rather than the normal default truncated modulo division. */ modulo.quotient=offset/(ssize_t) extent; if (offset < 0L) modulo.quotient--; modulo.remainder=offset-modulo.quotient*(ssize_t) extent; return(modulo); } MagickPrivate const Quantum *GetVirtualPixelsFromNexus(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType length, number_pixels; NexusInfo **magick_restrict virtual_nexus; Quantum *magick_restrict pixels, virtual_pixel[MaxPixelChannels]; RectangleInfo region; register const Quantum *magick_restrict p; register const void *magick_restrict r; register Quantum *magick_restrict q; register ssize_t i, u; register unsigned char *magick_restrict s; ssize_t v; void *magick_restrict virtual_metacontent; /* Acquire pixels. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((const Quantum *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif region.x=x; region.y=y; region.width=columns; region.height=rows; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region,nexus_info, exception); if (pixels == (Quantum *) NULL) return((const Quantum *) NULL); q=pixels; offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+ nexus_info->region.width-1L; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels)) if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) && (y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows)) { MagickBooleanType status; /* Pixel request is inside cache extents. */ if (nexus_info->authentic_pixel_cache != MagickFalse) return(q); status=ReadPixelCachePixels(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); if (cache_info->metacontent_extent != 0) { status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); } return(q); } /* Pixel request is outside cache extents. */ s=(unsigned char *) nexus_info->metacontent; virtual_nexus=AcquirePixelCacheNexus(1); if (virtual_nexus == (NexusInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) ResetMagickMemory(virtual_pixel,0,cache_info->number_channels* sizeof(*virtual_pixel)); virtual_metacontent=(void *) NULL; switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: case EdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: { if (cache_info->metacontent_extent != 0) { /* Acquire a metacontent buffer. */ virtual_metacontent=(void *) AcquireQuantumMemory(1, cache_info->metacontent_extent); if (virtual_metacontent == (void *) NULL) { virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1); (void) ThrowMagickException(exception,GetMagickModule(), CacheError,"UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) ResetMagickMemory(virtual_metacontent,0, cache_info->metacontent_extent); } switch (virtual_pixel_method) { case BlackVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case GrayVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange/2, virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case TransparentVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,TransparentAlpha,virtual_pixel); break; } case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } default: { SetPixelRed(image,ClampToQuantum(image->background_color.red), virtual_pixel); SetPixelGreen(image,ClampToQuantum(image->background_color.green), virtual_pixel); SetPixelBlue(image,ClampToQuantum(image->background_color.blue), virtual_pixel); SetPixelBlack(image,ClampToQuantum(image->background_color.black), virtual_pixel); SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha), virtual_pixel); break; } } break; } default: break; } for (v=0; v < (ssize_t) rows; v++) { ssize_t y_offset; y_offset=y+v; if ((virtual_pixel_method == EdgeVirtualPixelMethod) || (virtual_pixel_method == UndefinedVirtualPixelMethod)) y_offset=EdgeY(y_offset,cache_info->rows); for (u=0; u < (ssize_t) columns; u+=length) { ssize_t x_offset; x_offset=x+u; length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u); if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) || ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) || (length == 0)) { MagickModulo x_modulo, y_modulo; /* Transfer a single pixel. */ length=(MagickSizeType) 1; switch (virtual_pixel_method) { case EdgeVirtualPixelMethod: default: { p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns), EdgeY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case RandomVirtualPixelMethod: { if (cache_info->random_info == (RandomInfo *) NULL) cache_info->random_info=AcquireRandomInfo(); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, RandomX(cache_info->random_info,cache_info->columns), RandomY(cache_info->random_info,cache_info->rows),1UL,1UL, *virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case DitherVirtualPixelMethod: { p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, DitherX(x_offset,cache_info->columns), DitherY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case TileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case MirrorVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); if ((x_modulo.quotient & 0x01) == 1L) x_modulo.remainder=(ssize_t) cache_info->columns- x_modulo.remainder-1L; y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if ((y_modulo.quotient & 0x01) == 1L) y_modulo.remainder=(ssize_t) cache_info->rows- y_modulo.remainder-1L; p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case HorizontalTileEdgeVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL, *virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case VerticalTileEdgeVirtualPixelMethod: { y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL, *virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { p=virtual_pixel; r=virtual_metacontent; break; } case CheckerTileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L) { p=virtual_pixel; r=virtual_metacontent; break; } p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case HorizontalTileVirtualPixelMethod: { if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case VerticalTileVirtualPixelMethod: { if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } } if (p == (const Quantum *) NULL) break; (void) memcpy(q,p,(size_t) length*cache_info->number_channels* sizeof(*p)); q+=cache_info->number_channels; if ((s != (void *) NULL) && (r != (const void *) NULL)) { (void) memcpy(s,r,(size_t) cache_info->metacontent_extent); s+=cache_info->metacontent_extent; } continue; } /* Transfer a run of pixels. */ p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x_offset,y_offset, (size_t) length,1UL,*virtual_nexus,exception); if (p == (const Quantum *) NULL) break; r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); (void) memcpy(q,p,(size_t) length*cache_info->number_channels*sizeof(*p)); q+=length*cache_info->number_channels; if ((r != (void *) NULL) && (s != (const void *) NULL)) { (void) memcpy(s,r,(size_t) length); s+=length*cache_info->metacontent_extent; } } if (u < (ssize_t) columns) break; } /* Free resources. */ if (virtual_metacontent != (void *) NULL) virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent); virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1); if (v < (ssize_t) rows) return((const Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel % cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCache() method is: % % const Quantum *GetVirtualPixelCache(const Image *image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static const Quantum *GetVirtualPixelCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,columns,rows, cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelQueue() returns the virtual pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). % % The format of the GetVirtualPixelQueue() method is: % % const Quantum *GetVirtualPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const Quantum *GetVirtualPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixels_handler != (GetVirtualPixelsHandler) NULL) return(cache_info->methods.get_virtual_pixels_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in memory, or in a memory-mapped file. The % returned pointer must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % access the meta-content (of type void) corresponding to the the % region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread- % safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % The format of the GetVirtualPixels() method is: % % const Quantum *GetVirtualPixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const Quantum *GetVirtualPixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) return(cache_info->methods.get_virtual_pixel_handler(image, GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y, columns,rows,cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsCache() returns the pixels associated corresponding with the % last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualPixelsCache() method is: % % Quantum *GetVirtualPixelsCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const Quantum *GetVirtualPixelsCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsNexus() returns the pixels associated with the specified % cache nexus. % % The format of the GetVirtualPixelsNexus() method is: % % const Quantum *GetVirtualPixelsNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap pixels. % */ MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((Quantum *) NULL); return((const Quantum *) nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p e n P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenPixelCache() allocates the pixel cache. This includes defining the cache % dimensions, allocating space for the image pixels and optionally the % metacontent, and memory mapping the cache if it is disk based. The cache % nexus array is initialized as well. % % The format of the OpenPixelCache() method is: % % MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mode: ReadMode, WriteMode, or IOMode. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info, const MapMode mode) { int file; /* Open pixel cache on disk. */ if ((cache_info->file != -1) && (cache_info->mode == mode)) return(MagickTrue); /* cache already open and in the proper mode */ if (*cache_info->cache_filename == '\0') file=AcquireUniqueFileResource(cache_info->cache_filename); else switch (mode) { case ReadMode: { file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0); break; } case WriteMode: { file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE); break; } case IOMode: default: { file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE); break; } } if (file == -1) return(MagickFalse); (void) AcquireMagickResource(FileResource,1); if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); cache_info->file=file; return(MagickTrue); } static inline MagickOffsetType WritePixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length) { CacheInfo *magick_restrict cache_info; MagickOffsetType count, extent, offset; cache_info=(CacheInfo *) image->cache; if (image->debug != MagickFalse) { char format[MagickPathExtent], message[MagickPathExtent]; (void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format); (void) FormatLocaleString(message,MagickPathExtent, "extend %s (%s[%d], disk, %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) count=(MagickOffsetType) 1; else { extent=(MagickOffsetType) length-1; count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) ""); if (count != 1) return(MagickFalse); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (cache_info->synchronize != MagickFalse) (void) posix_fallocate(cache_info->file,offset+1,extent-offset); #endif } offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET); if (offset < 0) return(MagickFalse); return(MagickTrue); } static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, source_info; char format[MagickPathExtent], message[MagickPathExtent]; const char *hosts, *type; MagickBooleanType status; MagickSizeType length, number_pixels; size_t columns, packet_size; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (cache_anonymous_memory < 0) { char *value; /* Does the security policy require anonymous mapping for pixel cache? */ cache_anonymous_memory=0; value=GetPolicyValue("pixel-cache-memory"); if (value == (char *) NULL) value=GetPolicyValue("cache:memory-map"); if (LocaleCompare(value,"anonymous") == 0) { #if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS) cache_anonymous_memory=1; #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"DelegateLibrarySupportNotBuiltIn", "'%s' (policy requires anonymous memory mapping)",image->filename); #endif } value=DestroyString(value); } if ((image->columns == 0) || (image->rows == 0)) ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if ((AcquireMagickResource(WidthResource,image->columns) == MagickFalse) || (AcquireMagickResource(HeightResource,image->rows) == MagickFalse)) ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit", image->filename); source_info=(*cache_info); source_info.file=(-1); (void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]", image->filename,(double) GetImageIndexInList(image)); cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->alpha_trait=image->alpha_trait; cache_info->read_mask=image->read_mask; cache_info->write_mask=image->write_mask; cache_info->rows=image->rows; cache_info->columns=image->columns; InitializePixelChannelMap(image); cache_info->number_channels=GetPixelChannels(image); (void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels* sizeof(*image->channel_map)); cache_info->metacontent_extent=image->metacontent_extent; cache_info->mode=mode; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; packet_size=cache_info->number_channels*sizeof(Quantum); if (image->metacontent_extent != 0) packet_size+=cache_info->metacontent_extent; length=number_pixels*packet_size; columns=(size_t) (length/cache_info->rows/packet_size); if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) || ((ssize_t) cache_info->rows < 0)) ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed", image->filename); cache_info->length=length; if (image->ping != MagickFalse) { cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->type=PingCache; return(MagickTrue); } status=AcquireMagickResource(AreaResource,cache_info->length); if (cache_info->mode == PersistMode) status=MagickFalse; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length)) && ((cache_info->type == UndefinedCache) || (cache_info->type == MemoryCache))) { status=AcquireMagickResource(MemoryResource,cache_info->length); if (status != MagickFalse) { status=MagickTrue; if (cache_anonymous_memory <= 0) { cache_info->mapped=MagickFalse; cache_info->pixels=(Quantum *) MagickAssumeAligned( AcquireAlignedMemory(1,(size_t) cache_info->length)); } else { cache_info->mapped=MagickTrue; cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t) cache_info->length); } if (cache_info->pixels == (Quantum *) NULL) cache_info->pixels=source_info.pixels; else { /* Create memory pixel cache. */ cache_info->type=MemoryCache; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ number_pixels*cache_info->number_channels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(status == 0 ? MagickFalse : MagickTrue); } } } status=AcquireMagickResource(DiskResource,cache_info->length); hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts", exception); if ((status == MagickFalse) && (hosts != (const char *) NULL)) { DistributeCacheInfo *server_info; /* Distribute the pixel cache to a remote server. */ server_info=AcquireDistributeCacheInfo(exception); if (server_info != (DistributeCacheInfo *) NULL) { status=OpenDistributePixelCache(server_info,image); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", GetDistributeCacheHostname(server_info)); server_info=DestroyDistributeCacheInfo(server_info); } else { /* Create a distributed pixel cache. */ status=MagickTrue; cache_info->type=DistributedCache; cache_info->server_info=server_info; (void) FormatLocaleString(cache_info->cache_filename, MagickPathExtent,"%s:%d",GetDistributeCacheHostname( (DistributeCacheInfo *) cache_info->server_info), GetDistributeCachePort((DistributeCacheInfo *) cache_info->server_info)); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, GetDistributeCacheFile((DistributeCacheInfo *) cache_info->server_info),type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(status == 0 ? MagickFalse : MagickTrue); } } cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } /* Create pixel cache on disk. */ if (status == MagickFalse) { cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) && (cache_info->mode != PersistMode)) { (void) ClosePixelCacheOnDisk(cache_info); *cache_info->cache_filename='\0'; } if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse) { RelinquishMagickResource(DiskResource,cache_info->length); ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", image->filename); return(MagickFalse); } status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+ cache_info->length); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToExtendCache", image->filename); return(MagickFalse); } length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if (length != (MagickSizeType) ((size_t) length)) cache_info->type=DiskCache; else { status=AcquireMagickResource(MapResource,cache_info->length); if ((status == MagickFalse) && (cache_info->type != MapCache) && (cache_info->type != MemoryCache)) { status=MagickTrue; cache_info->type=DiskCache; } else { status=MagickTrue; cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode, cache_info->offset,(size_t) cache_info->length); if (cache_info->pixels == (Quantum *) NULL) { cache_info->type=DiskCache; cache_info->pixels=source_info.pixels; } else { /* Create file-backed memory-mapped pixel cache. */ (void) ClosePixelCacheOnDisk(cache_info); cache_info->type=MapCache; cache_info->mapped=MagickTrue; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ number_pixels*cache_info->number_channels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,(double) cache_info->number_channels, format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(status == 0 ? MagickFalse : MagickTrue); } } } status=MagickTrue; if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info,exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r s i s t P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PersistPixelCache() attaches to or initializes a persistent pixel cache. A % persistent pixel cache is one that resides on disk and is not destroyed % when the program exits. % % The format of the PersistPixelCache() method is: % % MagickBooleanType PersistPixelCache(Image *image,const char *filename, % const MagickBooleanType attach,MagickOffsetType *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filename: the persistent pixel cache filename. % % o attach: A value other than zero initializes the persistent pixel cache. % % o initialize: A value other than zero initializes the persistent pixel % cache. % % o offset: the offset in the persistent cache to store pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType PersistPixelCache(Image *image, const char *filename,const MagickBooleanType attach,MagickOffsetType *offset, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, *magick_restrict clone_info; MagickBooleanType status; ssize_t page_size; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (void *) NULL); assert(filename != (const char *) NULL); assert(offset != (MagickOffsetType *) NULL); page_size=GetMagickPageSize(); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif if (attach != MagickFalse) { /* Attach existing persistent pixel cache. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "attach persistent cache"); (void) CopyMagickString(cache_info->cache_filename,filename, MagickPathExtent); cache_info->type=DiskCache; cache_info->offset=(*offset); if (OpenPixelCache(image,ReadMode,exception) == MagickFalse) return(MagickFalse); *offset+=cache_info->length+page_size-(cache_info->length % page_size); return(SyncImagePixelCache(image,exception)); } /* Clone persistent pixel cache. */ status=AcquireMagickResource(DiskResource,cache_info->length); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } clone_info=(CacheInfo *) ClonePixelCache(cache_info); clone_info->type=DiskCache; (void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent); clone_info->file=(-1); clone_info->storage_class=cache_info->storage_class; clone_info->colorspace=cache_info->colorspace; clone_info->alpha_trait=cache_info->alpha_trait; clone_info->read_mask=cache_info->read_mask; clone_info->write_mask=cache_info->write_mask; clone_info->columns=cache_info->columns; clone_info->rows=cache_info->rows; clone_info->number_channels=cache_info->number_channels; clone_info->metacontent_extent=cache_info->metacontent_extent; clone_info->mode=PersistMode; clone_info->length=cache_info->length; (void) memcpy(clone_info->channel_map,cache_info->channel_map, MaxPixelChannels*sizeof(*cache_info->channel_map)); clone_info->offset=(*offset); status=ClonePixelCacheRepository(clone_info,cache_info,exception); *offset+=cache_info->length+page_size-(cache_info->length % page_size); clone_info=(CacheInfo *) DestroyPixelCache(clone_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelCacheNexus() method is: % % Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % const MagickBooleanType clone,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to set. % % o clone: clone the pixel cache. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType number_pixels; Quantum *magick_restrict pixels; RectangleInfo region; /* Validate pixel cache geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception); if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) || (y < 0) || (x >= (ssize_t) cache_info->columns) || (y >= (ssize_t) cache_info->rows)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "PixelsAreNotAuthentic","`%s'",image->filename); return((Quantum *) NULL); } offset=(MagickOffsetType) y*cache_info->columns+x; if (offset < 0) return((Quantum *) NULL); number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1; if ((MagickSizeType) offset >= number_pixels) return((Quantum *) NULL); /* Return pixel cache. */ region.x=x; region.y=y; region.width=columns; region.height=rows; pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,&region,nexus_info, exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelsCache() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelsCache() method is: % % Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u e u e A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixels() queues a mutable pixel region. If the region is % successfully initialized a pointer to a Quantum array representing the % region is returned, otherwise NULL is returned. The returned pointer may % point to a temporary working buffer for the pixels or it may point to the % final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This is useful if the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not % initialize the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in memory, or in a % memory-mapped file. The returned pointer must *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % obtain the meta-content (of type void) corresponding to the region. % Once the Quantum (and/or Quantum) array has been updated, the % changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the QueueAuthenticPixels() method is: % % Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y, columns,rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCacheMetacontent() reads metacontent from the specified region of % the pixel cache. % % The format of the ReadPixelCacheMetacontent() method is: % % MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the metacontent. % % o exception: return any errors or warnings in this structure. % */ static inline MagickOffsetType ReadPixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType ReadPixelCacheMetacontent( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register ssize_t y; register unsigned char *magick_restrict q; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; q=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { register unsigned char *magick_restrict p; /* Read meta-content from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->metacontent_extent*cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } break; } case DiskCache: { /* Read meta content from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read metacontent from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCachePixels() reads pixels from the specified region of the pixel % cache. % % The format of the ReadPixelCachePixels() method is: % % MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadPixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register Quantum *magick_restrict q; register ssize_t y; size_t number_channels, rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns; if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y) return(MagickFalse); offset+=nexus_info->region.x; number_channels=cache_info->number_channels; length=(MagickSizeType) number_channels*nexus_info->region.width* sizeof(Quantum); if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width) return(MagickFalse); rows=nexus_info->region.height; extent=length*rows; if ((extent == 0) || ((extent/length) != rows)) return(MagickFalse); y=0; q=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { register Quantum *magick_restrict p; /* Read pixels from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->pixels+offset*cache_info->number_channels; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } break; } case DiskCache: { /* Read pixels from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*q),length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read pixels from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e f e r e n c e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferencePixelCache() increments the reference count associated with the % pixel cache returning a pointer to the cache. % % The format of the ReferencePixelCache method is: % % Cache ReferencePixelCache(Cache cache_info) % % A description of each parameter follows: % % o cache_info: the pixel cache. % */ MagickPrivate Cache ReferencePixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count++; UnlockSemaphoreInfo(cache_info->semaphore); return(cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheChannels() resets the pixel cache channels. % % The format of the ResetPixelCacheChannels method is: % % void ResetPixelCacheChannels(Image *) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate void ResetPixelCacheChannels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); cache_info->number_channels=GetPixelChannels(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e E p o c h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheEpoch() resets the pixel cache epoch. % % The format of the ResetPixelCacheEpoch method is: % % void ResetPixelCacheEpoch(void) % */ MagickPrivate void ResetPixelCacheEpoch(void) { cache_epoch=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheMethods() sets the image pixel methods to the specified ones. % % The format of the SetPixelCacheMethods() method is: % % SetPixelCacheMethods(Cache *,CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache: the pixel cache. % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods) { CacheInfo *magick_restrict cache_info; GetOneAuthenticPixelFromHandler get_one_authentic_pixel_from_handler; GetOneVirtualPixelFromHandler get_one_virtual_pixel_from_handler; /* Set cache pixel methods. */ assert(cache != (Cache) NULL); assert(cache_methods != (CacheMethods *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) cache_info->methods.get_virtual_pixel_handler= cache_methods->get_virtual_pixel_handler; if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL) cache_info->methods.destroy_pixel_handler= cache_methods->destroy_pixel_handler; if (cache_methods->get_virtual_metacontent_from_handler != (GetVirtualMetacontentFromHandler) NULL) cache_info->methods.get_virtual_metacontent_from_handler= cache_methods->get_virtual_metacontent_from_handler; if (cache_methods->get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) cache_info->methods.get_authentic_pixels_handler= cache_methods->get_authentic_pixels_handler; if (cache_methods->queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) cache_info->methods.queue_authentic_pixels_handler= cache_methods->queue_authentic_pixels_handler; if (cache_methods->sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) cache_info->methods.sync_authentic_pixels_handler= cache_methods->sync_authentic_pixels_handler; if (cache_methods->get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) cache_info->methods.get_authentic_pixels_from_handler= cache_methods->get_authentic_pixels_from_handler; if (cache_methods->get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) cache_info->methods.get_authentic_metacontent_from_handler= cache_methods->get_authentic_metacontent_from_handler; get_one_virtual_pixel_from_handler= cache_info->methods.get_one_virtual_pixel_from_handler; if (get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) cache_info->methods.get_one_virtual_pixel_from_handler= cache_methods->get_one_virtual_pixel_from_handler; get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; if (get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) cache_info->methods.get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e N e x u s P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheNexusPixels() defines the region of the cache for the % specified cache nexus. % % The format of the SetPixelCacheNexusPixels() method is: % % Quantum SetPixelCacheNexusPixels(const CacheInfo *cache_info, % const MapMode mode,const RectangleInfo *region,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o mode: ReadMode, WriteMode, or IOMode. % % o region: A pointer to the RectangleInfo structure that defines the % region of this particular cache nexus. % % o nexus_info: the cache nexus to set. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType AcquireCacheNexusPixels( const CacheInfo *magick_restrict cache_info,NexusInfo *nexus_info, ExceptionInfo *exception) { if (nexus_info->length != (MagickSizeType) ((size_t) nexus_info->length)) return(MagickFalse); if (cache_anonymous_memory <= 0) { nexus_info->mapped=MagickFalse; nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1, (size_t) nexus_info->length)); if (nexus_info->cache != (Quantum *) NULL) (void) ResetMagickMemory(nexus_info->cache,0,(size_t) nexus_info->length); } else { nexus_info->mapped=MagickTrue; nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) nexus_info->length); } if (nexus_info->cache == (Quantum *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } return(MagickTrue); } static inline MagickBooleanType IsPixelCacheAuthentic( const CacheInfo *magick_restrict cache_info, const NexusInfo *magick_restrict nexus_info) { MagickBooleanType status; MagickOffsetType offset; /* Does nexus pixels point directly to in-core cache pixels or is it buffered? */ if (cache_info->type == PingCache) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; status=nexus_info->pixels == (cache_info->pixels+offset* cache_info->number_channels) ? MagickTrue : MagickFalse; return(status); } static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info, const MapMode mode) { if (mode == ReadMode) { MagickCachePrefetch((unsigned char *) nexus_info->pixels,0,1); return; } MagickCachePrefetch((unsigned char *) nexus_info->pixels,1,1); } static Quantum *SetPixelCacheNexusPixels(const CacheInfo *cache_info, const MapMode mode,const RectangleInfo *region,NexusInfo *nexus_info, ExceptionInfo *exception) { MagickBooleanType status; MagickSizeType length, number_pixels; assert(cache_info != (const CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((Quantum *) NULL); if ((region->width == 0) || (region->height == 0)) return((Quantum *) NULL); nexus_info->region=(*region); number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; if (number_pixels == 0) return((Quantum *) NULL); if ((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) { ssize_t x, y; x=nexus_info->region.x+(ssize_t) nexus_info->region.width-1; y=nexus_info->region.y+(ssize_t) nexus_info->region.height-1; if (((nexus_info->region.x >= 0) && (x < (ssize_t) cache_info->columns) && (nexus_info->region.y >= 0) && (y < (ssize_t) cache_info->rows)) && ((nexus_info->region.height == 1UL) || ((nexus_info->region.x == 0) && ((nexus_info->region.width == cache_info->columns) || ((nexus_info->region.width % cache_info->columns) == 0))))) { MagickOffsetType offset; /* Pixels are accessed directly from memory. */ offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; nexus_info->pixels=cache_info->pixels+cache_info->number_channels* offset; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(unsigned char *) cache_info->metacontent+ offset*cache_info->metacontent_extent; PrefetchPixelCacheNexusPixels(nexus_info,mode); nexus_info->authentic_pixel_cache=IsPixelCacheAuthentic(cache_info, nexus_info); return(nexus_info->pixels); } } /* Pixels are stored in a staging region until they are synced to the cache. */ length=number_pixels*cache_info->number_channels*sizeof(Quantum); if (cache_info->metacontent_extent != 0) length+=number_pixels*cache_info->metacontent_extent; if (nexus_info->cache == (Quantum *) NULL) { nexus_info->length=length; status=AcquireCacheNexusPixels(cache_info,nexus_info,exception); if (status == MagickFalse) { nexus_info->length=0; return((Quantum *) NULL); } } else if (nexus_info->length < length) { RelinquishCacheNexusPixels(nexus_info); nexus_info->length=length; status=AcquireCacheNexusPixels(cache_info,nexus_info,exception); if (status == MagickFalse) { nexus_info->length=0; return((Quantum *) NULL); } } nexus_info->pixels=nexus_info->cache; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(void *) (nexus_info->pixels+number_pixels* cache_info->number_channels); PrefetchPixelCacheNexusPixels(nexus_info,mode); nexus_info->authentic_pixel_cache=IsPixelCacheAuthentic(cache_info, nexus_info); return(nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the % pixel cache and returns the previous setting. A virtual pixel is any pixel % access that is outside the boundaries of the image cache. % % The format of the SetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; CacheView *magick_restrict image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } status=SyncCacheViewAuthenticPixels(image_view,exception); } image_view=DestroyCacheView(image_view); return(status); } MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; VirtualPixelMethod method; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); method=cache_info->virtual_pixel_method; cache_info->virtual_pixel_method=virtual_pixel_method; if ((image->columns != 0) && (image->rows != 0)) switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: { if ((image->background_color.alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); if ((IsPixelInfoGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); break; } case TransparentVirtualPixelMethod: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); break; } default: break; } return(method); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have % been completed and updates the host memory. % % The format of the SyncAuthenticOpenCLBuffer() method is: % % void SyncAuthenticOpenCLBuffer(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info) { assert(cache_info != (CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->type != MemoryCache) || (cache_info->opencl == (MagickCLCacheInfo) NULL)) return; /* Ensure single threaded access to OpenCL environment. */ LockSemaphoreInfo(cache_info->semaphore); cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); UnlockSemaphoreInfo(cache_info->semaphore); } MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); cache_info=(CacheInfo *) image->cache; CopyOpenCLBuffer(cache_info); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the % in-memory or disk cache. The method returns MagickTrue if the pixel region % is synced, otherwise MagickFalse. % % The format of the SyncAuthenticPixelCacheNexus() method is: % % MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to sync. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType status; /* Transfer pixels to the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->cache == (Cache) NULL) ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) { image->taint=MagickTrue; return(MagickTrue); } assert(cache_info->signature == MagickCoreSignature); status=WritePixelCachePixels(cache_info,nexus_info,exception); if ((cache_info->metacontent_extent != 0) && (WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (status != MagickFalse) image->taint=MagickTrue; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory % or disk cache. The method returns MagickTrue if the pixel region is synced, % otherwise MagickFalse. % % The format of the SyncAuthenticPixelsCache() method is: % % MagickBooleanType SyncAuthenticPixelsCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SyncAuthenticPixelsCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncAuthenticPixels() method is: % % MagickBooleanType SyncAuthenticPixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixels(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) { status=cache_info->methods.sync_authentic_pixels_handler(image, exception); return(status); } assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixelCache() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncImagePixelCache() method is: % % MagickBooleanType SyncImagePixelCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(exception != (ExceptionInfo *) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception); return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCacheMetacontent() writes the meta-content to the specified region % of the pixel cache. % % The format of the WritePixelCacheMetacontent() method is: % % MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the meta-content. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const unsigned char *magick_restrict p; register ssize_t y; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=(MagickSizeType) length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { register unsigned char *magick_restrict q; /* Write associated pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width*cache_info->metacontent_extent; q+=cache_info->columns*cache_info->metacontent_extent; } break; } case DiskCache: { /* Write associated pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write metacontent to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCachePixels() writes image pixels to the specified region of the % pixel cache. % % The format of the WritePixelCachePixels() method is: % % MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const Quantum *magick_restrict p; register ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width* sizeof(Quantum); extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { register Quantum *magick_restrict q; /* Write pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->pixels+offset*cache_info->number_channels; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*nexus_info->region.width; q+=cache_info->number_channels*cache_info->columns; } break; } case DiskCache: { /* Write pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*p),length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write pixels to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); }
pcpaes_cfbdecrypt.c
/******************************************************************************* * Copyright 2013-2019 Intel Corporation * All Rights Reserved. * * If this software was obtained under the Intel Simplified Software License, * the following terms apply: * * The source code, information and material ("Material") contained herein is * owned by Intel Corporation or its suppliers or licensors, and title to such * Material remains with Intel Corporation or its suppliers or licensors. The * Material contains proprietary information of Intel or its suppliers and * licensors. The Material is protected by worldwide copyright laws and treaty * provisions. No part of the Material may be used, copied, reproduced, * modified, published, uploaded, posted, transmitted, distributed or disclosed * in any way without Intel's prior express written permission. No license under * any patent, copyright or other intellectual property rights in the Material * is granted to or conferred upon you, either expressly, by implication, * inducement, estoppel or otherwise. Any license under such intellectual * property rights must be express and approved by Intel in writing. * * Unless otherwise agreed by Intel in writing, you may not remove or alter this * notice or any other notice embedded in Materials by Intel or Intel's * suppliers or licensors in any way. * * * If this software was obtained under the Apache License, Version 2.0 (the * "License"), the following terms apply: * * You may not use this file except in compliance with the License. You may * obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 * * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ /* // // Purpose: // Cryptography Primitive. // AES encryption/decryption (CFB mode) // // Contents: // ippsAESDecryptCFB() // */ #include "owndefs.h" #include "owncp.h" #include "pcpaesm.h" #include "pcptool.h" #if defined(_OPENMP) # include "omp.h" #endif #if (_ALG_AES_SAFE_==_ALG_AES_SAFE_COMPOSITE_GF_) #elif (_ALG_AES_SAFE_==_ALG_AES_SAFE_COMPACT_SBOX_) # include "pcprijtables.h" #else #endif /*F* // Name: ippsAESDecryptCFB // // Purpose: AES-CFB decryption. // // Returns: Reason: // ippStsNullPtrErr pCtx == NULL // pSrc == NULL // pDst == NULL // pIV == NULL // ippStsContextMatchErr !VALID_AES_ID() // ippStsLengthErr len <1 // ippStsCFBSizeErr (1>cfbBlkSize || cfbBlkSize>MBS_RIJ128) // ippStsUnderRunErr 0!=(dataLen%cfbBlkSize) // ippStsNoErr no errors // // Parameters: // pSrc pointer to the source data buffer // pDst pointer to the target data buffer // len output buffer length (in bytes) // cfbBlkSize CFB block size (in bytes) // pCtx pointer to the AES context // pIV pointer to the initialization vector // *F*/ static void cpDecryptAES_cfb(const Ipp8u* pIV, const Ipp8u* pSrc, Ipp8u* pDst, int nBlocks, int cfbBlkSize, const IppsAESSpec* pCtx) { #if (_IPP>=_IPP_P8) || (_IPP32E>=_IPP32E_Y8) /* use pipelined version is possible */ if(AES_NI_ENABLED==RIJ_AESNI(pCtx)) { if(cfbBlkSize==MBS_RIJ128) DecryptCFB128_RIJ128pipe_AES_NI(pSrc, pDst, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), nBlocks*cfbBlkSize, pIV); else if(0==(cfbBlkSize&3)) DecryptCFB32_RIJ128pipe_AES_NI(pSrc, pDst, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), nBlocks, cfbBlkSize, pIV); else DecryptCFB_RIJ128pipe_AES_NI(pSrc, pDst, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), nBlocks, cfbBlkSize, pIV); } else #endif { Ipp32u tmpInp[2*NB(128)]; Ipp32u tmpOut[ NB(128)]; /* setup encoder method */ RijnCipher encoder = RIJ_ENCODER(pCtx); /* read IV */ CopyBlock16(pIV, tmpInp); /* decrypt data block-by-block of cfbLen each */ while(nBlocks) { /* decryption */ //encoder(tmpInp, tmpOut, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), (const Ipp32u (*)[256])RIJ_ENC_SBOX(pCtx)); #if (_ALG_AES_SAFE_==_ALG_AES_SAFE_COMPACT_SBOX_) encoder((Ipp8u*)tmpInp, (Ipp8u*)tmpOut, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), RijEncSbox/*NULL*/); #else encoder((Ipp8u*)tmpInp, (Ipp8u*)tmpOut, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), NULL); #endif /* store output and put feedback into the input buffer (tmpInp) */ if( cfbBlkSize==MBS_RIJ128 && pSrc!=pDst) { ((Ipp32u*)pDst)[0] = tmpOut[0]^((Ipp32u*)pSrc)[0]; ((Ipp32u*)pDst)[1] = tmpOut[1]^((Ipp32u*)pSrc)[1]; ((Ipp32u*)pDst)[2] = tmpOut[2]^((Ipp32u*)pSrc)[2]; ((Ipp32u*)pDst)[3] = tmpOut[3]^((Ipp32u*)pSrc)[3]; tmpInp[0] = ((Ipp32u*)pSrc)[0]; tmpInp[1] = ((Ipp32u*)pSrc)[1]; tmpInp[2] = ((Ipp32u*)pSrc)[2]; tmpInp[3] = ((Ipp32u*)pSrc)[3]; } else { int n; for(n=0; n<cfbBlkSize; n++) { ((Ipp8u*)tmpInp)[MBS_RIJ128+n] = pSrc[n]; pDst[n] = (Ipp8u)( ((Ipp8u*)tmpOut)[n] ^ pSrc[n] ); } /* shift input buffer (tmpInp) for the next CFB operation */ CopyBlock16((Ipp8u*)tmpInp+cfbBlkSize, tmpInp); } pSrc += cfbBlkSize; pDst += cfbBlkSize; nBlocks--; } } } IPPFUN(IppStatus, ippsAESDecryptCFB,(const Ipp8u* pSrc, Ipp8u* pDst, int len, int cfbBlkSize, const IppsAESSpec* pCtx, const Ipp8u* pIV)) { /* test context */ IPP_BAD_PTR1_RET(pCtx); /* use aligned AES context */ pCtx = (IppsAESSpec*)( IPP_ALIGNED_PTR(pCtx, AES_ALIGNMENT) ); /* test the context ID */ IPP_BADARG_RET(!VALID_AES_ID(pCtx), ippStsContextMatchErr); /* test source, target buffers and initialization pointers */ IPP_BAD_PTR3_RET(pSrc, pIV, pDst); /* test stream length */ IPP_BADARG_RET((len<1), ippStsLengthErr); /* test CFB value */ IPP_BADARG_RET(((1>cfbBlkSize) || (MBS_RIJ128<cfbBlkSize)), ippStsCFBSizeErr); /* test stream integrity */ IPP_BADARG_RET((len%cfbBlkSize), ippStsUnderRunErr); /* do encryption */ { int nBlocks = len / cfbBlkSize; #if !defined(_OPENMP) cpDecryptAES_cfb(pIV, pSrc, pDst, nBlocks, cfbBlkSize, pCtx); #else int blk_per_thread = AES_NI_ENABLED==RIJ_AESNI(pCtx)? AESNI128_MIN_BLK_PER_THREAD : RIJ128_MIN_BLK_PER_THREAD; int nThreads = IPP_MIN(IPPCP_GET_NUM_THREADS(), IPP_MAX(nBlocks/blk_per_thread, 1)); if(1==nThreads) cpDecryptAES_cfb(pIV, pSrc, pDst, nBlocks, cfbBlkSize, pCtx); else { int blksThreadReg; int blksThreadTail; int srcBlkSize; int ivBlkSize; Ipp8u locIV[MBS_RIJ128*DEFAULT_CPU_NUM]; #if defined (__INTEL_COMPILER) Ipp8u* pLocIV = nThreads>DEFAULT_CPU_NUM? kmp_malloc(nThreads*MBS_RIJ128) : locIV; #else Ipp8u* pLocIV = nThreads>DEFAULT_CPU_NUM ? malloc(nThreads*MBS_RIJ128) : locIV; #endif if(pLocIV) { #pragma omp parallel IPPCP_OMP_LIMIT_MAX_NUM_THREADS(nThreads) { #pragma omp master { int nt; nThreads = omp_get_num_threads(); blksThreadReg = nBlocks / nThreads; blksThreadTail = blksThreadReg + nBlocks % nThreads; srcBlkSize = blksThreadReg*cfbBlkSize; ivBlkSize = IPP_MIN(MBS_RIJ128,srcBlkSize); CopyBlock16(pIV, pLocIV+0); for(nt=1; nt<nThreads; nt++) CopyBlock(pSrc+nt*srcBlkSize-ivBlkSize, pLocIV+MBS_RIJ128+(nt-1)*ivBlkSize, ivBlkSize); } #pragma omp barrier { int id = omp_get_thread_num(); Ipp8u* pThreadIV = pLocIV + id*ivBlkSize; Ipp8u* pThreadSrc = (Ipp8u*)pSrc + id*srcBlkSize; Ipp8u* pThreadDst = (Ipp8u*)pDst + id*srcBlkSize; int blkThread = (id==(nThreads-1))? blksThreadTail : blksThreadReg; cpDecryptAES_cfb(pThreadIV, pThreadSrc, pThreadDst, blkThread, cfbBlkSize, pCtx); } } if(pLocIV != locIV) #if defined (__INTEL_COMPILER) kmp_free(pLocIV); #else free(pLocIV); #endif } else return ippStsMemAllocErr; } #endif return ippStsNoErr; } }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 24; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
distribute_parallel_for_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp distribute parallel for simd for (int i = 0; i < 10; ++i) argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute parallel for simd'}} #pragma omp distribute parallel for simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute parallel for simd'}} #pragma omp distribute parallel for simd foo void test_no_clause() { int i; #pragma omp distribute parallel for simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp distribute parallel for simd' must be a for loop}} #pragma omp distribute parallel for simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd; for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd firstprivate(x); for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd safelen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd safelen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd safelen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{integer constant expression}} #pragma omp distribute parallel for simd safelen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{integer constant expression}} #pragma omp distribute parallel for simd safelen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd safelen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd safelen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd simdlen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd simdlen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd simdlen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{integer constant expression}} #pragma omp distribute parallel for simd simdlen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{integer constant expression}} #pragma omp distribute parallel for simd simdlen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd simdlen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd simdlen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp distribute parallel for simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp distribute parallel for simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd collapse for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+1 {{integer constant expression}} #pragma omp distribute parallel for simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{integer constant expression}} #pragma omp distribute parallel for simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd collapse(2) for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp distribute parallel for simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd linear( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd linear(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd linear(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd linear() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd linear(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd linear(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute parallel for simd linear(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute parallel for simd linear(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute parallel for simd linear(x, y, z) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd aligned(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd aligned() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd aligned(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd aligned(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd aligned(z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd aligned(x :) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp distribute parallel for simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute parallel for simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd private( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd private(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd private() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; // expected-error@+3 {{lastprivate variable cannot be firstprivate}} expected-note@+3 {{defined as lastprivate}} #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; // expected-error@+3 2 {{lastprivate variable cannot be firstprivate}} expected-note@+3 2 {{defined as lastprivate}} #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 3 {{lastprivate variable cannot be firstprivate}} expected-note@+3 3 {{defined as lastprivate}} #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute parallel for simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute parallel for simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void test_nontemporal() { int i; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd nontemporal( for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd nontemporal(, for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd nontemporal(, ) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd nontemporal() for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd nontemporal(int) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} omp50-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd nontemporal(0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute parallel for simd nontemporal(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute parallel for simd nontemporal(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute parallel for simd nontemporal(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd nontemporal(x :) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} #pragma omp distribute parallel for simd nontemporal(x :, ) for (i = 0; i < 16; ++i) ; // omp50-note@+2 {{defined as nontemporal}} // omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}} #pragma omp distribute parallel for simd nontemporal(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} #pragma omp distribute parallel for simd private(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} #pragma omp distribute parallel for simd nontemporal(x) private(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} #pragma omp distribute parallel for simd nontemporal(x, y : 0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} #pragma omp distribute parallel for simd nontemporal(x) lastprivate(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} #pragma omp distribute parallel for simd lastprivate(x) nontemporal(x) for (i = 0; i < 16; ++i) ; #pragma omp distribute parallel for simd order // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute parallel for simd'}} expected-error {{expected '(' after 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp distribute parallel for simd order( // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp distribute parallel for simd order(none // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp distribute parallel for simd order(concurrent // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} for (int i = 0; i < 10; ++i) ; #pragma omp distribute parallel for simd order(concurrent) // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute parallel for simd'}} for (int i = 0; i < 10; ++i) ; }
auxiliaryPlane.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include "compearth.h" #pragma omp declare simd static void strikeDip(const double nRad, const double eRad, const double uRad, double *strikeDeg, double *dipDeg); /*! * @brief Computes the strike, dip, and rake of the auxiliary nodal plane. * * @param[in] nmt Number of moment tensors. * @param[in] s1 Strike angles (degrees) for plane 1. These are measured * positive east from north and should be in the range of * \f$ \phi \in [0, 360] \f$. This is an array of dimension * [nmt]. * @param[in] d1 Dip angles (degrees) for plane 1. These are measured * positive down from the horizontal and should be in the * range \f$ \delta \in [0, 90 \f$. This is an array * of dimension [nmt]. * @param[in] r1 Rake angles (degrees) for plane 1. This should be in * range \f$ \lambda \in [-180, 180] \f$. This is an array * of dimension [nmt]. * @param[out] s2 Strike angles (degrees) for plane 2. These are measured * positive east from north and should be in the range of * \f$ \phi \in [0, 360] \f$. This is an array of * dimension [nmt]. * @param[out] d2 Dip angles (degrees) for plane 2. These are measured * positive down from the horizontal and should be in the * range \f$ \delta \in [0, 90] \f$. This is an array * of dimension [nmt]. * @param[out] r2 Rake angles (degrees) for plane 2. This should be in * range \f$ \lambda \in [-180, 180] \f$. This is an array * of dimension [nmt]. * * @result 0 indicates success. * * @author Andy Michaels and Oliver Boyd updated by Ben Baker * * @copyright MIT * * @date August 2017 * */ int compearth_auxiliaryPlane(const int nmt, const double *__restrict__ s1, const double *__restrict__ d1, const double *__restrict__ r1, double *__restrict__ s2, double *__restrict__ d2, double *__restrict__ r2) { const double toRad = M_PI/180.0; const double toDeg = 180.0/M_PI; double cosz, cosz2, cosz3, hNorm, h1, h2, h3, n1, n2, n3, sinz, sinz2, sinz3, sl1, sl2, sl3, z, z2, z3; int i; if (nmt < 1 || s1 == NULL || d1 == NULL || r1 == NULL || s2 == NULL || d2 == NULL || r2 == NULL) { if (nmt < 1){fprintf(stderr, "%s: No points\n", __func__);} if (s1 == NULL){fprintf(stderr, "%s: s1 is NULL\n", __func__);} if (d1 == NULL){fprintf(stderr, "%s: d1 is NULL\n", __func__);} if (r1 == NULL){fprintf(stderr, "%s: r1 is NULL\n", __func__);} if (s2 == NULL){fprintf(stderr, "%s: s2 is NULL\n", __func__);} if (d2 == NULL){fprintf(stderr, "%s: d2 is NULL\n", __func__);} if (r2 == NULL){fprintf(stderr, "%s: r2 is NULL\n", __func__);} return -1; } for (i=0; i<nmt; i++) { // Advance the strike 90 degrees z = (s1[i] + 90.0)*toRad; // strike +90 degrees z2 = d1[i]*toRad; // dip z3 = r1[i]*toRad; // rake // Compute the slip vector in plane 1 cosz = cos(z); cosz2 = cos(z2); cosz3 = cos(z3); sinz = sin(z); sinz2 = sin(z2); sinz3 = sin(z3); // Jost and Herrmann Eqn 15 except sl1 and sl3 are negated. sl1 =-cosz3*cosz - sinz3*sinz*cosz2; sl2 = cosz3*sinz - sinz3*cosz*cosz2; sl3 = sinz3*sinz2; // Compute normal vector to plane 1. Jost and Herrmann Eqn 16 // except n1 and n3 are negated. n1 = sinz*sinz2; n2 = cosz*sinz2; n3 = cosz2; // Compute slip vector of plane 2. This looks like: // h = (1,0,0) x (sl1, sl2, sl3) // I think this produces the directions of strike and slip. hNorm = 1.0/hypot(sl2, sl1); // Letting h3 = 0.0 h1 =-sl2*hNorm; h2 = sl1*hNorm; h3 = 0.0; // Dot strike and slip into the fault plane (normal to fault plane 1) z = h1*n1 + h2*n2 + h3*n3; // z/hypot(h1, h2); z = fmin(1.0, fmax(-1.0, z)); // Strictly enforce bounds // Compute the angle between the measured fault plane and directions // of slip and dip - Aki and Richards pg 101 Figure 4.13 z = acos(z); // Compute the strike and dip angles from the slip strikeDip(sl2, sl1, sl3, &s2[i], &d2[i]); r2[i] = z*toDeg; if (sl3 <= 0.0){r2[i] =-r2[i];} } return 0; } /*! * @brief Finds strike and dip of plane given normal vector having components * n, e, and u. * * @author Andy Michaels and Oliver Boyd updated by Ben Baker * * @copyright MIT * * @date August 2017 * */ static void strikeDip(const double nRad, const double eRad, const double uRad, double *strikeDeg, double *dipDeg) { const double toDeg = 180./M_PI; double e, u, n, strike, x; n = nRad; e = eRad; u = uRad; if (u < 0.0) { n =-n; e =-e; u =-u; } strike = atan2(e, n)*toDeg; // This is in range [-180,180] strike = strike - 90.0; // This is in range [-270,90] // This loop is pointless //while (strike >= 360.0) //{ // strike = strike - 360.0; //} // This loop could execute at most once //while strike < 0 //{ // strike = strike + 360.0; //} if (strike < 0.0){strike = strike + 360.0;} *strikeDeg = strike; x = hypot(n, e); //x = sqrt(n*n + e*e); *dipDeg = atan2(x, u)*toDeg; return; } /* function [strike, dip, rake] = AuxPlane(s1,d1,r1); %function [strike, dip, rake] = AuxPlane(s1,d1,r1); % Get Strike and dip of second plane, adapted from Andy Michael bothplanes.c r2d = 180/pi; z = (s1+90)/r2d; z2 = d1/r2d; z3 = r1/r2d; % slick vector in plane 1 sl1 = -cos(z3).*cos(z)-sin(z3).*sin(z).*cos(z2); sl2 = cos(z3).*sin(z)-sin(z3).*cos(z).*cos(z2); sl3 = sin(z3).*sin(z2); [strike, dip] = strikedip(sl2,sl1,sl3); n1 = sin(z).*sin(z2); % normal vector to plane 1 n2 = cos(z).*sin(z2); n3 = cos(z2); h1 = -sl2; % strike vector of plane 2 h2 = sl1; % note h3=0 always so we leave it out z = h1.*n1 + h2.*n2; z = z./sqrt(h1.*h1 + h2.*h2); z = acos(z); rake = zeros(size(strike)); j = find(sl3 > 0); rake(j) = z(j)*r2d; j = find(sl3 <= 0); rake(j) = -z(j)*r2d; function [strike, dip] = strikedip(n, e, u) %function [strike, dip] = strikedip(n, e, u) % Finds strike and dip of plane given normal vector having components n, e, and u % % Adapted from Andy Michaels stridip.c r2d = 180/pi; j = find(u < 0); n(j) = -n(j); e(j) = -e(j); u(j) = -u(j); strike = atan2(e,n)*r2d; strike = strike - 90; while strike >= 360 strike = strike - 360; end while strike < 0 strike = strike + 360; end x = sqrt(n.^2 + e.^2); dip = atan2(x,u)*r2d; */
MINDSSCbox.h
void boxfilter(float* input,float* temp1,float* temp2, int hw, // mind_step int m,int n,int o){ int sz=m*n*o; for(int i=0;i<sz;i++){ temp1[i]=input[i];//copy to temp, initialization } ////y for(int k=0;k<o;k++){ for(int j=0;j<n;j++){ for(int i=1;i<m;i++){ temp1[i+j*m+k*m*n]+=temp1[(i-1)+j*m+k*m*n]; //add intensity value but index starting delayed } } } for(int k=0;k<o;k++){ //iterate over all z dimensions for(int j=0;j<n;j++){ // for x dim do (or y dim?) for(int i=0;i<(hw+1);i++){ // [0 to hw] temp2[i+j*m+k*m*n]=temp1[(i+hw)+j*m+k*m*n]; // -0 at beginning, left of sliding index, copy offset y value? } for(int i=(hw+1);i<(m-hw);i++){ //[hw+1 to len-hw) ('box' can move free without hitting a border) temp2[i+j*m+k*m*n]=temp1[(i+hw)+j*m+k*m*n]-temp1[(i-hw-1)+j*m+k*m*n]; //delta (symmetric y val offset + dy / -dy) } for(int i=(m-hw);i<m;i++){ // [len-hw to len) temp2[i+j*m+k*m*n]=temp1[(m-1)+j*m+k*m*n]-temp1[(i-hw-1)+j*m+k*m*n]; //delta (last value of dim (fixed) - negative offseted value } } } ////x for(int k=0;k<o;k++){ for(int j=1;j<n;j++){ for(int i=0;i<m;i++){ temp2[i+j*m+k*m*n]+=temp2[i+(j-1)*m+k*m*n]; //add intensity value but reversed axis (other) } } } for(int k=0;k<o;k++){ for(int i=0;i<m;i++){ for(int j=0;j<(hw+1);j++){ //caution, dimensions were switched temp1[i+j*m+k*m*n]=temp2[i+(j+hw)*m+k*m*n]; //see above, but for next dim (x) } for(int j=(hw+1);j<(n-hw);j++){ temp1[i+j*m+k*m*n]=temp2[i+(j+hw)*m+k*m*n]-temp2[i+(j-hw-1)*m+k*m*n]; } for(int j=(n-hw);j<n;j++){ temp1[i+j*m+k*m*n]=temp2[i+(n-1)*m+k*m*n]-temp2[i+(j-hw-1)*m+k*m*n]; } } } ////z //add intensity value but last reversed axis z for(int k=1;k<o;k++){ for(int j=0;j<n;j++){ for(int i=0;i<m;i++){ temp1[i+j*m+k*m*n]+=temp1[i+j*m+(k-1)*m*n]; } } } // see above, but now for last axis for(int j=0;j<n;j++){ for(int i=0;i<m;i++){ for(int k=0;k<(hw+1);k++){ input[i+j*m+k*m*n]=temp1[i+j*m+(k+hw)*m*n]; } for(int k=(hw+1);k<(o-hw);k++){ input[i+j*m+k*m*n]=temp1[i+j*m+(k+hw)*m*n]-temp1[i+j*m+(k-hw-1)*m*n]; } for(int k=(o-hw);k<o;k++){ input[i+j*m+k*m*n]=temp1[i+j*m+(o-1)*m*n]-temp1[i+j*m+(k-hw-1)*m*n]; } } } } void imshift(float* input,float* output,int dx,int dy,int dz,int m,int n,int o){ // Get values of shifted patch. If coordinate is not within image dimensions return same intensity as original image (will result in zero distance) //shift image with preservation of original image values when shifted area is out of bounds for(int k=0;k<o;k++){ //z for(int j=0;j<n;j++){ //x for(int i=0;i<m;i++){ //y , iterate orig image size if(i+dy>=0 && i+dy<m && j+dx>=0 && j+dx<n && k+dz>=0 && k+dz<o) //this is something like the min(max) construct output[i+j*m+k*m*n]=input[i+dy+(j+dx)*m+(k+dz)*m*n]; //lookup displaced values and return else output[i+j*m+k*m*n]=input[i+j*m+k*m*n]; //lookup value w/o displacement (just copy), sth like 'inversed mirroring ad edged = normal image at egdes' } } } } /*void *distances(void *threadarg) { struct mind_data *my_data; my_data = (struct mind_data *) threadarg; float* im1=my_data->im1; float* d1=my_data->d1; int qs=my_data->qs; int ind_d1=my_data->ind_d1; int m=image_m; int n=image_n; int o=image_o;*/ void distances(float* im1,float* d1,int m,int n,int o,int qs,int l){ int sz1=m*n*o; float* w1=new float[sz1]; int len1=6;//not needed float* temp1=new float[sz1]; //img size temp float* temp2=new float[sz1]; //img size temp // int dx[6]={+qs, +qs, -qs, 0, +qs, +0 }; //redifinition // int dy[6]={+qs, -qs, 0, -qs, 0, +qs}; //redefinition // int dz[6]={0, 0, +qs, +qs, +qs, +qs}; //redefiniton int dx[6]={+qs, +qs, +qs, +qs, 0, 0}; //redifinition int dy[6]={+qs, -qs, 0, 0, +qs, +qs}; //redefinition int dz[6]={0, 0, +qs, -qs, +qs, -qs}; //redefiniton //dx, dy, dz could be passed directly to this function from upper call to omit redefinitions // Offset patches in every 6-neighbourhood direction by quanstisation step (radius) imshift(im1,w1,dx[l],dy[l],dz[l],m,n,o); // std::cout<<"\nw1="; // for(int pri=0;pri<m*n*o ;pri++){ // std::cout<<w1[pri]<<" "; // } // std::cout<<"\nw2="; // for(int pri=0;pri<sz_pad ;pri++){ // std::cout<<w2[pri]<<" "; // } for(int i=0;i<sz1;i++){ w1[i]=(w1[i]-im1[i])*(w1[i]-im1[i]); //(0-im[i])^2 = squared img dist from intensity val } // for(int k=0;k<o;k++){ // for(int j=0;j<m;j++){ // for(int i=0;i<n;i++){ // int w2_coord = i-dx[l] + (j-dy[l])*n + (k-dz[l])*m*n; // int im_coord = i + j*n + k*m*n; // w2[w2_coord] = (w2[w2_coord] - im1[im_coord]) * (w2[w2_coord] - im1[im_coord]); //(0-im[i])^2 = squared img dist from intensity val // } // } // } // std::cout<<"\nsquared_patch_distance="; // for(int pri=0;pri<m*n*o ;pri++){ // std::cout<<w1[pri]<<" "; // } // for(int pri=0;pri<sz_pad ;pri++){ // std::cout<<w2[pri]<<" "; // } //3 dim box filter = sth. like blur // boxfilter(w1,temp1,temp2,qs,m,n,o); //w1 is input and output for(int i=0;i<sz1;i++){ d1[i+l*sz1]=w1[i]; } delete[] temp1; delete[] temp2; delete[] w1; } //__builtin_popcountll(left[i]^right[i]); absolute hamming distances void descriptor(uint64_t* mindq,float* im1, int m,int n,int o, //image dims int qs, float* output_mind_twelve=0, float* output_mind_bare=0){ //mind_step (chain values smaller than quantisation chain) timeval time1,time2; //MIND with self-similarity context //3^3 shift combinations (+qs,0,-qs) (x-dir, y-dir, z-dir) but only adjacent placed to origin (no diagonals) = 6 combinations // int sx[12]={-qs,+0, -qs,+0, +0, +qs,+0, +0, +0, -qs,+0, +0}; //is that MIND-SSC? // int sy[12]={+0, -qs,+0, +qs,+0, +0, +0, +qs,+0, +0, +0, -qs}; // int sz[12]={+0, +0, +0, +0, -qs,+0, -qs,+0, -qs,+0, -qs,+0}; // int sy[12]={ +0, -qs, -qs, +0, +0, +qs, +0, +0, +0, -qs, +0, +0}; //is that MIND-SSC? // int sx[12]={-qs, +0, +0, +qs, +0, +0, +0, +qs, +0, +0, +0, -qs}; // int sz[12]={+0, +0, +0, +0, -qs, +0, -qs, +0, -qs, +0, -qs, +0}; int sy[12]={0, -qs, -qs, 0, -qs, 0, -qs, 0, 0, 0, 0, 0 }; //is that MIND-SSC? int sx[12]={-qs, 0, 0, qs, 0, 0, 0, 0, -qs, 0, -qs, 0 }; int sz[12]={0, 0, 0, 0, 0, -qs, 0, +qs, 0, -qs, 0, qs}; int index[12]={0,0,1,1,2,2,3,3,4,4,5,5}; float sigma=0.75;//1.0;//0.75;//1.5; int rho=ceil(sigma*1.5)*2+1; // is unused! int len1=6; //len dx dy dz const int len2=12; //len sx sy sz image_d=12; int d=12; int sz1=m*n*o; pthread_t thread1, thread2, thread3; //============== DISTANCES USING BOXFILTER =================== float* d1=new float[sz1*len1]; //img_size * (dx,dy,dz) (6 neighbourhood) gettimeofday(&time1, NULL); #pragma omp parallel for for(int l=0;l<len1;l++){ //for all dx, dy, dz //l iterator controls the shift package (dx,dy,dz) distances(im1,d1,m,n,o,qs,l); //d1 is returned (stored distances per x,y,z,l dimension, 4-dim) } std::cout<<"\ndistances="; for(int pri=0;pri<m*n*o*6 ;pri++){ std::cout<<d1[pri]<<" "; } gettimeofday(&time2, NULL); float timeMIND1=time2.tv_sec+time2.tv_usec/1e6-(time1.tv_sec+time1.tv_usec/1e6); gettimeofday(&time1, NULL); //quantisation table const int val=6; const unsigned long long power=32; #pragma omp parallel for // float mind_twelve[m*n*o*12]; for(int k=0;k<o;k++){ //iterate z //could be moved out of loop unsigned int tablei[6]={0,1,3,7,15,31}; //lookup table float compare[val-1]; for(int i=0;i<val-1;i++){ compare[i]=-log((i+1.5f)/val);//compare is const for every iteration } //could be moved out of loop float mind1[12];//is this correct here? -> could be moved before l_iter loop (only needed within l_iter loop) for(int j=0;j<n;j++){//iterate x for(int i=0;i<m;i++){ //iterate y std::cout<<"\nmind1_bare: "; for(int l=0;l<len2;l++){ // iterate (sx,sy,sz) / index[l] package to get 12 mind values int eff_sx = min(max(0, i+sx[l]),m-1); int eff_sy = min(max(0, j+sy[l]),n-1); int eff_sz = min(max(0, k+sz[l]),o-1); // consecutive l's will take same d1 offset (0...6) but different spatial coordinate mind1[l]=d1[eff_sx + eff_sy*m + eff_sz*m*n+index[l]*sz1]; //mind1 is 1-dim, size=12 // -> take same node but calc 2 diffs per node and add? 2*6diffs = 12 // if(i+sx[l]>=0 && i+sx[l]<m && j+sy[l]>=0 && j+sy[l]<n && k+sz[l]>=0 && k+sz[l]<o){ //min(max) construct // mind1[l]=d1[i+sx[l]+(j+sy[l])*m+(k+sz[l])*m*n+index[l]*sz1]; //mind1 is 1-dim, size=12 // //read offseted distance value // } // else{ // mind1[l]=d1[i+j*m+k*m*n+index[l]*sz1]; //(sz1=m*n*o) builds 12-neighbourhood // //read without (sx,sy,sz) ofset but with l=12 layer offset // } } float minval=*min_element(mind1,mind1+len2); //get minimum value of all 12 stored mind1 features float sumnoise=0.0f; for(int l=0;l<len2;l++){ mind1[l]-=minval; //reset minimum value of mind1 to 0 and lower others accordingly std::cout<<mind1[l]<<" "; output_mind_bare[i+j*m+k*m*n+l*sz1] = mind1[l]; sumnoise+=mind1[l]; //accumulate } float noise1=max(sumnoise/(float)len2,1e-6f); //rescale accumulated mind features for(int l=0;l<len2;l++){ mind1[l]/=noise1; } unsigned long long accum=0; unsigned long long tabled1=1; for(int l=0;l<len2;l++){ //iterate over all mind features //mind1[l]=exp(-mind1[l]); int mind1val=0; for(int c=0;c<val-1;c++){ //accumulate over 5 values mind1val+=compare[c]>mind1[l]?1:0; //count if mind feature is smaller than compare const } //int mind1val=min(max((int)(mind1[l]*val-0.5f),0),val-1); accum+=tablei[mind1val]*tabled1; //*32^l, propably this was done because of casting to long long tabled1*=power; } mindq[i+j*m+k*m*n]=accum; //one mind value for every coordinate xyz } } } // std::cout<<"\nmindq="; // for(int pri=0;pri<m*n*o ;pri++){ // std::cout<<mindq[pri]<<" "; // } // std::cout<<"\nmind_twelve="; // for(int pri=0;pri<m*n*o*12 ;pri++){ // std::cout<<output_mind_twelve[pri]<<" "; // } gettimeofday(&time2, NULL); float timeMIND2=time2.tv_sec+time2.tv_usec/1e6-(time1.tv_sec+time1.tv_usec/1e6); delete[] d1; } std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> mind_ssc_descriptor( torch::Tensor image, torch::Tensor pQuantisation_step) { int o = image.size(0); int n = image.size(1); int m = image.size(2); torch::Tensor image_copy = image.clone(); float* input_image = image_copy.data_ptr<float>(); int* quantisation_step = pQuantisation_step.data_ptr<int>(); uint64_t* output = new uint64_t[m*n*o]; float* output_mind_twelve = new float[m*n*o*12]; float* output_mind_bare = new float[m*n*o*12]; descriptor(output, input_image, o, m, n, *quantisation_step, output_mind_twelve, output_mind_bare); std::vector<uint64_t> output_vect{output, output+m*n*o}; std::vector<float> output_mind_bare_vect{output_mind_bare, output_mind_bare+m*n*o*12}; std::vector<float> output_mind_twelve_vect{output_mind_twelve, output_mind_twelve+m*n*o*12}; auto options = torch::TensorOptions().dtype(torch::kInt64); auto float_options = torch::TensorOptions().dtype(torch::kFloat); return std::tuple< torch::Tensor, torch::Tensor, torch::Tensor>( torch::from_blob(output_vect.data(), {n,m,o}, options).clone(), torch::from_blob(output_mind_twelve_vect.data(), {12,n,m,o}, float_options).clone(), torch::from_blob(output_mind_bare_vect.data(), {12,n,m,o}, float_options).clone() ); }
convolution_3x3_pack1to8_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_pack1to8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const __fp16* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); float16x8_t _bias0 = bias ? vld1q_f16(bias + p * 8) : vdupq_n_f16((__fp16)0.f); out0.fill(_bias0); const __fp16* k0 = kernel.channel(p); int q = 0; for (; q < inch; q++) { __fp16* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const __fp16* r0 = img0.row<const __fp16>(0); const __fp16* r1 = img0.row<const __fp16>(1); const __fp16* r2 = img0.row<const __fp16>(2); float16x8_t _k00 = vld1q_f16(k0); float16x8_t _k01 = vld1q_f16(k0 + 8); float16x8_t _k02 = vld1q_f16(k0 + 16); float16x8_t _k10 = vld1q_f16(k0 + 24); float16x8_t _k11 = vld1q_f16(k0 + 32); float16x8_t _k12 = vld1q_f16(k0 + 40); float16x8_t _k20 = vld1q_f16(k0 + 48); float16x8_t _k21 = vld1q_f16(k0 + 56); float16x8_t _k22 = vld1q_f16(k0 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 7 < outw; j += 8) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%0], #64 \n" // sum0 sum1 sum2 sum3 "prfm pldl1keep, [%0, #512] \n" "ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum4 sum5 sum6 sum7 "sub %0, %0, #64 \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.8h}, [%1], #16 \n" // r0 "ld1 {v1.4h}, [%1] \n" "fmla v24.8h, %8.8h, v0.h[0] \n" "fmla v25.8h, %8.8h, v0.h[1] \n" "fmla v26.8h, %8.8h, v0.h[2] \n" "fmla v27.8h, %8.8h, v0.h[3] \n" "fmla v28.8h, %8.8h, v0.h[4] \n" "fmla v29.8h, %8.8h, v0.h[5] \n" "fmla v30.8h, %8.8h, v0.h[6] \n" "fmla v31.8h, %8.8h, v0.h[7] \n" "fmla v24.8h, %9.8h, v0.h[1] \n" "fmla v25.8h, %9.8h, v0.h[2] \n" "fmla v26.8h, %9.8h, v0.h[3] \n" "fmla v27.8h, %9.8h, v0.h[4] \n" "fmla v28.8h, %9.8h, v0.h[5] \n" "fmla v29.8h, %9.8h, v0.h[6] \n" "fmla v30.8h, %9.8h, v0.h[7] \n" "fmla v31.8h, %9.8h, v1.h[0] \n" "fmla v24.8h, %10.8h, v0.h[2] \n" "fmla v25.8h, %10.8h, v0.h[3] \n" "fmla v26.8h, %10.8h, v0.h[4] \n" "fmla v27.8h, %10.8h, v0.h[5] \n" "fmla v28.8h, %10.8h, v0.h[6] \n" "fmla v29.8h, %10.8h, v0.h[7] \n" "fmla v30.8h, %10.8h, v1.h[0] \n" "fmla v31.8h, %10.8h, v1.h[1] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v2.8h}, [%2], #16 \n" // r1 "ld1 {v3.4h}, [%2] \n" "fmla v24.8h, %11.8h, v2.h[0] \n" "fmla v25.8h, %11.8h, v2.h[1] \n" "fmla v26.8h, %11.8h, v2.h[2] \n" "fmla v27.8h, %11.8h, v2.h[3] \n" "fmla v28.8h, %11.8h, v2.h[4] \n" "fmla v29.8h, %11.8h, v2.h[5] \n" "fmla v30.8h, %11.8h, v2.h[6] \n" "fmla v31.8h, %11.8h, v2.h[7] \n" "fmla v24.8h, %12.8h, v2.h[1] \n" "fmla v25.8h, %12.8h, v2.h[2] \n" "fmla v26.8h, %12.8h, v2.h[3] \n" "fmla v27.8h, %12.8h, v2.h[4] \n" "fmla v28.8h, %12.8h, v2.h[5] \n" "fmla v29.8h, %12.8h, v2.h[6] \n" "fmla v30.8h, %12.8h, v2.h[7] \n" "fmla v31.8h, %12.8h, v3.h[0] \n" "fmla v24.8h, %13.8h, v2.h[2] \n" "fmla v25.8h, %13.8h, v2.h[3] \n" "fmla v26.8h, %13.8h, v2.h[4] \n" "fmla v27.8h, %13.8h, v2.h[5] \n" "fmla v28.8h, %13.8h, v2.h[6] \n" "fmla v29.8h, %13.8h, v2.h[7] \n" "fmla v30.8h, %13.8h, v3.h[0] \n" "fmla v31.8h, %13.8h, v3.h[1] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.8h}, [%3], #16 \n" // r2 "ld1 {v5.4h}, [%3] \n" "fmla v24.8h, %14.8h, v4.h[0] \n" "fmla v25.8h, %14.8h, v4.h[1] \n" "fmla v26.8h, %14.8h, v4.h[2] \n" "fmla v27.8h, %14.8h, v4.h[3] \n" "fmla v28.8h, %14.8h, v4.h[4] \n" "fmla v29.8h, %14.8h, v4.h[5] \n" "fmla v30.8h, %14.8h, v4.h[6] \n" "fmla v31.8h, %14.8h, v4.h[7] \n" "fmla v24.8h, %15.8h, v4.h[1] \n" "fmla v25.8h, %15.8h, v4.h[2] \n" "fmla v26.8h, %15.8h, v4.h[3] \n" "fmla v27.8h, %15.8h, v4.h[4] \n" "fmla v28.8h, %15.8h, v4.h[5] \n" "fmla v29.8h, %15.8h, v4.h[6] \n" "fmla v30.8h, %15.8h, v4.h[7] \n" "fmla v31.8h, %15.8h, v5.h[0] \n" "fmla v24.8h, %16.8h, v4.h[2] \n" "fmla v25.8h, %16.8h, v4.h[3] \n" "fmla v26.8h, %16.8h, v4.h[4] \n" "fmla v27.8h, %16.8h, v4.h[5] \n" "fmla v28.8h, %16.8h, v4.h[6] \n" "fmla v29.8h, %16.8h, v4.h[7] \n" "fmla v30.8h, %16.8h, v5.h[0] \n" "fmla v31.8h, %16.8h, v5.h[1] \n" "st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%0], #64 \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum0 sum1 sum2 sum3 "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.8h}, [%1] \n" // r0 "fmla v28.8h, %8.8h, v0.h[0] \n" "fmla v29.8h, %8.8h, v0.h[1] \n" "fmla v30.8h, %8.8h, v0.h[2] \n" "fmla v31.8h, %8.8h, v0.h[3] \n" "fmla v28.8h, %9.8h, v0.h[1] \n" "fmla v29.8h, %9.8h, v0.h[2] \n" "fmla v30.8h, %9.8h, v0.h[3] \n" "fmla v31.8h, %9.8h, v0.h[4] \n" "fmla v28.8h, %10.8h, v0.h[2] \n" "fmla v29.8h, %10.8h, v0.h[3] \n" "fmla v30.8h, %10.8h, v0.h[4] \n" "fmla v31.8h, %10.8h, v0.h[5] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v1.8h}, [%2] \n" // r1 "fmla v28.8h, %11.8h, v1.h[0] \n" "fmla v29.8h, %11.8h, v1.h[1] \n" "fmla v30.8h, %11.8h, v1.h[2] \n" "fmla v31.8h, %11.8h, v1.h[3] \n" "fmla v28.8h, %12.8h, v1.h[1] \n" "fmla v29.8h, %12.8h, v1.h[2] \n" "fmla v30.8h, %12.8h, v1.h[3] \n" "fmla v31.8h, %12.8h, v1.h[4] \n" "fmla v28.8h, %13.8h, v1.h[2] \n" "fmla v29.8h, %13.8h, v1.h[3] \n" "fmla v30.8h, %13.8h, v1.h[4] \n" "fmla v31.8h, %13.8h, v1.h[5] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v2.8h}, [%3] \n" // r2 "fmla v28.8h, %14.8h, v2.h[0] \n" "fmla v29.8h, %14.8h, v2.h[1] \n" "fmla v30.8h, %14.8h, v2.h[2] \n" "fmla v31.8h, %14.8h, v2.h[3] \n" "fmla v28.8h, %15.8h, v2.h[1] \n" "fmla v29.8h, %15.8h, v2.h[2] \n" "fmla v30.8h, %15.8h, v2.h[3] \n" "fmla v31.8h, %15.8h, v2.h[4] \n" "fmla v28.8h, %16.8h, v2.h[2] \n" "fmla v29.8h, %16.8h, v2.h[3] \n" "fmla v30.8h, %16.8h, v2.h[4] \n" "fmla v31.8h, %16.8h, v2.h[5] \n" "add %1, %1, #8 \n" "add %2, %2, #8 \n" "add %3, %3, #8 \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "cc", "memory", "v0", "v1", "v2", "v28", "v29", "v30", "v31"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v30.8h, v31.8h}, [%0] \n" // sum0 sum1 "prfm pldl1keep, [%1, #64] \n" "ld1 {v0.4h}, [%1] \n" // r0 "fmla v30.8h, %8.8h, v0.h[0] \n" "fmla v31.8h, %8.8h, v0.h[1] \n" "fmla v30.8h, %9.8h, v0.h[1] \n" "fmla v31.8h, %9.8h, v0.h[2] \n" "fmla v30.8h, %10.8h, v0.h[2] \n" "fmla v31.8h, %10.8h, v0.h[3] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v1.4h}, [%2] \n" // r1 "fmla v30.8h, %11.8h, v1.h[0] \n" "fmla v31.8h, %11.8h, v1.h[1] \n" "fmla v30.8h, %12.8h, v1.h[1] \n" "fmla v31.8h, %12.8h, v1.h[2] \n" "fmla v30.8h, %13.8h, v1.h[2] \n" "fmla v31.8h, %13.8h, v1.h[3] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v2.4h}, [%3] \n" // r2 "fmla v30.8h, %14.8h, v2.h[0] \n" "fmla v31.8h, %14.8h, v2.h[1] \n" "fmla v30.8h, %15.8h, v2.h[1] \n" "fmla v31.8h, %15.8h, v2.h[2] \n" "fmla v30.8h, %16.8h, v2.h[2] \n" "fmla v31.8h, %16.8h, v2.h[3] \n" "add %1, %1, #4 \n" "add %2, %2, #4 \n" "add %3, %3, #4 \n" "st1 {v30.8h, v31.8h}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "cc", "memory", "v0", "v1", "v2", "v30", "v31"); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v30.8h}, [%0] \n" // sum0 "prfm pldl1keep, [%1, #64] \n" "ld1 {v0.4h}, [%1] \n" // r0 "fmla v30.8h, %8.8h, v0.h[0] \n" "fmla v30.8h, %9.8h, v0.h[1] \n" "fmla v30.8h, %10.8h, v0.h[2] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v1.4h}, [%2] \n" // r1 "fmla v30.8h, %11.8h, v1.h[0] \n" "fmla v30.8h, %12.8h, v1.h[1] \n" "fmla v30.8h, %13.8h, v1.h[2] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v2.4h}, [%3] \n" // r2 "fmla v30.8h, %14.8h, v2.h[0] \n" "fmla v30.8h, %15.8h, v2.h[1] \n" "fmla v30.8h, %16.8h, v2.h[2] \n" "add %1, %1, #2 \n" "add %2, %2, #2 \n" "add %3, %3, #2 \n" "st1 {v30.8h}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "cc", "memory", "v0", "v1", "v2", "v30"); } r0 += 2; r1 += 2; r2 += 2; } k0 += 9 * 8; } } } static void conv3x3s2_pack1to8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const __fp16* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); float16x8_t _bias0 = bias ? vld1q_f16(bias + p * 8) : vdupq_n_f16((__fp16)0.f); out0.fill(_bias0); const __fp16* k0 = kernel.channel(p); int q = 0; for (; q < inch; q++) { __fp16* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const __fp16* r0 = img0.row<const __fp16>(0); const __fp16* r1 = img0.row<const __fp16>(1); const __fp16* r2 = img0.row<const __fp16>(2); float16x8_t _k00 = vld1q_f16(k0); float16x8_t _k01 = vld1q_f16(k0 + 8); float16x8_t _k02 = vld1q_f16(k0 + 16); float16x8_t _k10 = vld1q_f16(k0 + 24); float16x8_t _k11 = vld1q_f16(k0 + 32); float16x8_t _k12 = vld1q_f16(k0 + 40); float16x8_t _k20 = vld1q_f16(k0 + 48); float16x8_t _k21 = vld1q_f16(k0 + 56); float16x8_t _k22 = vld1q_f16(k0 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum0 sum1 sum2 sum3 "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.8h}, [%1], #16 \n" // r0 "ld1 {v1.h}[0], [%1] \n" "fmla v28.8h, %8.8h, v0.h[0] \n" "fmla v29.8h, %8.8h, v0.h[2] \n" "fmla v30.8h, %8.8h, v0.h[4] \n" "fmla v31.8h, %8.8h, v0.h[6] \n" "fmla v28.8h, %9.8h, v0.h[1] \n" "fmla v29.8h, %9.8h, v0.h[3] \n" "fmla v30.8h, %9.8h, v0.h[5] \n" "fmla v31.8h, %9.8h, v0.h[7] \n" "fmla v28.8h, %10.8h, v0.h[2] \n" "fmla v29.8h, %10.8h, v0.h[4] \n" "fmla v30.8h, %10.8h, v0.h[6] \n" "fmla v31.8h, %10.8h, v1.h[0] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v2.8h}, [%2], #16 \n" // r1 "ld1 {v3.h}[0], [%2] \n" "fmla v28.8h, %11.8h, v2.h[0] \n" "fmla v29.8h, %11.8h, v2.h[2] \n" "fmla v30.8h, %11.8h, v2.h[4] \n" "fmla v31.8h, %11.8h, v2.h[6] \n" "fmla v28.8h, %12.8h, v2.h[1] \n" "fmla v29.8h, %12.8h, v2.h[3] \n" "fmla v30.8h, %12.8h, v2.h[5] \n" "fmla v31.8h, %12.8h, v2.h[7] \n" "fmla v28.8h, %13.8h, v2.h[2] \n" "fmla v29.8h, %13.8h, v2.h[4] \n" "fmla v30.8h, %13.8h, v2.h[6] \n" "fmla v31.8h, %13.8h, v3.h[0] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.8h}, [%3], #16 \n" // r2 "ld1 {v5.h}[0], [%3] \n" "fmla v28.8h, %14.8h, v4.h[0] \n" "fmla v29.8h, %14.8h, v4.h[2] \n" "fmla v30.8h, %14.8h, v4.h[4] \n" "fmla v31.8h, %14.8h, v4.h[6] \n" "fmla v28.8h, %15.8h, v4.h[1] \n" "fmla v29.8h, %15.8h, v4.h[3] \n" "fmla v30.8h, %15.8h, v4.h[5] \n" "fmla v31.8h, %15.8h, v4.h[7] \n" "fmla v28.8h, %16.8h, v4.h[2] \n" "fmla v29.8h, %16.8h, v4.h[4] \n" "fmla v30.8h, %16.8h, v4.h[6] \n" "fmla v31.8h, %16.8h, v5.h[0] \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v28", "v29", "v30", "v31"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v30.8h, v31.8h}, [%0] \n" // sum0 sum1 "prfm pldl1keep, [%1, #64] \n" "ld1 {v0.4h}, [%1], #8 \n" // r0 "ld1 {v1.h}[0], [%1] \n" "fmla v30.8h, %8.8h, v0.h[0] \n" "fmla v31.8h, %8.8h, v0.h[2] \n" "fmla v30.8h, %9.8h, v0.h[1] \n" "fmla v31.8h, %9.8h, v0.h[3] \n" "fmla v30.8h, %10.8h, v0.h[2] \n" "fmla v31.8h, %10.8h, v1.h[0] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v2.4h}, [%2], #8 \n" // r1 "ld1 {v3.h}[0], [%2] \n" "fmla v30.8h, %11.8h, v2.h[0] \n" "fmla v31.8h, %11.8h, v2.h[2] \n" "fmla v30.8h, %12.8h, v2.h[1] \n" "fmla v31.8h, %12.8h, v2.h[3] \n" "fmla v30.8h, %13.8h, v2.h[2] \n" "fmla v31.8h, %13.8h, v3.h[0] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v4.4h}, [%3], #8 \n" // r2 "ld1 {v5.h}[0], [%3] \n" "fmla v30.8h, %14.8h, v4.h[0] \n" "fmla v31.8h, %14.8h, v4.h[2] \n" "fmla v30.8h, %15.8h, v4.h[1] \n" "fmla v31.8h, %15.8h, v4.h[3] \n" "fmla v30.8h, %16.8h, v4.h[2] \n" "fmla v31.8h, %16.8h, v5.h[0] \n" "st1 {v30.8h, v31.8h}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v30", "v31"); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v30.8h}, [%0] \n" // sum0 "prfm pldl1keep, [%1, #64] \n" "ld1 {v0.4h}, [%1] \n" // r0 "fmla v30.8h, %8.8h, v0.h[0] \n" "fmla v30.8h, %9.8h, v0.h[1] \n" "fmla v30.8h, %10.8h, v0.h[2] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v1.4h}, [%2] \n" // r1 "fmla v30.8h, %11.8h, v1.h[0] \n" "fmla v30.8h, %12.8h, v1.h[1] \n" "fmla v30.8h, %13.8h, v1.h[2] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v2.4h}, [%3] \n" // r2 "fmla v30.8h, %14.8h, v2.h[0] \n" "fmla v30.8h, %15.8h, v2.h[1] \n" "fmla v30.8h, %16.8h, v2.h[2] \n" "add %1, %1, #4 \n" "add %2, %2, #4 \n" "add %3, %3, #4 \n" "st1 {v30.8h}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "cc", "memory", "v0", "v1", "v2", "v30"); } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9 * 8; } } }
GB_unop__identity_fp32_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp32_fc64) // op(A') function: GB (_unop_tran__identity_fp32_fc64) // C type: float // A type: GxB_FC64_t // cast: float cij = (float) creal (aij) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = (float) creal (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (float) creal (aij) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp32_fc64) ( float *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; float z = (float) creal (aij) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; float z = (float) creal (aij) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp32_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
blake2bp-ref.c
/* BLAKE2 reference source code package - reference C implementations Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at your option. The terms of these licenses can be found at: - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 - OpenSSL license : https://www.openssl.org/source/license.html - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 More information about the BLAKE2 hash function can be found at https://blake2.net. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdint.h> #if defined(_OPENMP) #include <omp.h> #endif #include "blake2.h" #include "blake2-impl.h" #define PARALLELISM_DEGREE 4 static inline int blake2bp_init_leaf( blake2b_state *S, uint8_t outlen, uint8_t keylen, uint64_t offset ) { blake2b_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32( &P->leaf_length, 0 ); store64( &P->node_offset, offset ); P->node_depth = 0; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2b_init_param( S, P ); } static inline int blake2bp_init_root( blake2b_state *S, uint8_t outlen, uint8_t keylen ) { blake2b_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32( &P->leaf_length, 0 ); store64( &P->node_offset, 0 ); P->node_depth = 1; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2b_init_param( S, P ); } int blake2bp_init( blake2bp_state *S, const uint8_t outlen ) { if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2bp_init_root( S->R, outlen, 0 ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; return 0; } int blake2bp_init_key( blake2bp_state *S, const uint8_t outlen, const void *key, const uint8_t keylen ) { if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2bp_init_root( S->R, outlen, keylen ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->S[i], block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } return 0; } int blake2bp_update( blake2bp_state *S, const uint8_t *in, uint64_t inlen ) { size_t left = S->buflen; size_t fill = sizeof( S->buf ) - left; if( left && inlen >= fill ) { memcpy( S->buf + left, in, fill ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, BLAKE2B_BLOCKBYTES ); in += fill; inlen -= fill; left = 0; } #if defined(_OPENMP) #pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = omp_get_thread_num(); #endif uint64_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S->S[id__], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } } in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ); inlen %= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; if( inlen > 0 ) memcpy( S->buf + left, in, inlen ); S->buflen = left + inlen; return 0; } int blake2bp_final( blake2bp_state *S, uint8_t *out, const uint8_t outlen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) { if( S->buflen > i * BLAKE2B_BLOCKBYTES ) { size_t left = S->buflen - i * BLAKE2B_BLOCKBYTES; if( left > BLAKE2B_BLOCKBYTES ) left = BLAKE2B_BLOCKBYTES; blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, left ); } blake2b_final( S->S[i], hash[i], BLAKE2B_OUTBYTES ); } for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->R, hash[i], BLAKE2B_OUTBYTES ); return blake2b_final( S->R, out, outlen ); } int blake2bp( uint8_t *out, const void *in, const void *key, uint8_t outlen, uint64_t inlen, uint8_t keylen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; blake2b_state S[PARALLELISM_DEGREE][1]; blake2b_state FS[1]; /* Verify parameters */ if ( NULL == in && inlen > 0 ) return -1; if ( NULL == out ) return -1; if( NULL == key && keylen > 0 ) return -1; if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( keylen > BLAKE2B_KEYBYTES ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1; S[PARALLELISM_DEGREE - 1]->last_node = 1; // mark last node if( keylen > 0 ) { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S[i], block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } #if defined(_OPENMP) #pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = omp_get_thread_num(); #endif uint64_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S[id__], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } if( inlen__ > id__ * BLAKE2B_BLOCKBYTES ) { const size_t left = inlen__ - id__ * BLAKE2B_BLOCKBYTES; const size_t len = left <= BLAKE2B_BLOCKBYTES ? left : BLAKE2B_BLOCKBYTES; blake2b_update( S[id__], in__, len ); } blake2b_final( S[id__], hash[id__], BLAKE2B_OUTBYTES ); } if( blake2bp_init_root( FS, outlen, keylen ) < 0 ) return -1; FS->last_node = 1; // Mark as last node for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( FS, hash[i], BLAKE2B_OUTBYTES ); return blake2b_final( FS, out, outlen );; } #if defined(BLAKE2BP_SELFTEST) #include <string.h> #include "blake2-kat.h" int main( int argc, char **argv ) { uint8_t key[BLAKE2B_KEYBYTES]; uint8_t buf[KAT_LENGTH]; for( size_t i = 0; i < BLAKE2B_KEYBYTES; ++i ) key[i] = ( uint8_t )i; for( size_t i = 0; i < KAT_LENGTH; ++i ) buf[i] = ( uint8_t )i; for( size_t i = 0; i < KAT_LENGTH; ++i ) { uint8_t hash[BLAKE2B_OUTBYTES]; blake2bp( hash, buf, key, BLAKE2B_OUTBYTES, i, BLAKE2B_KEYBYTES ); if( 0 != memcmp( hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES ) ) { puts( "error" ); return -1; } } puts( "ok" ); return 0; } #endif
ASTMatchers.h
//===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements matchers to be used together with the MatchFinder to // match AST nodes. // // Matchers are created by generator functions, which can be combined in // a functional in-language DSL to express queries over the C++ AST. // // For example, to match a class with a certain name, one would call: // cxxRecordDecl(hasName("MyClass")) // which returns a matcher that can be used to find all AST nodes that declare // a class named 'MyClass'. // // For more complicated match expressions we're often interested in accessing // multiple parts of the matched AST nodes once a match is found. In that case, // call `.bind("name")` on match expressions that match the nodes you want to // access. // // For example, when we're interested in child classes of a certain class, we // would write: // cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child"))) // When the match is found via the MatchFinder, a user provided callback will // be called with a BoundNodes instance that contains a mapping from the // strings that we provided for the `.bind()` calls to the nodes that were // matched. // In the given example, each time our matcher finds a match we get a callback // where "child" is bound to the RecordDecl node of the matching child // class declaration. // // See ASTMatchersInternal.h for a more in-depth explanation of the // implementation details of the matcher framework. // // See ASTMatchFinder.h for how to use the generated matchers to run over // an AST. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #include "clang/AST/ASTContext.h" #include "clang/AST/ASTTypeTraits.h" #include "clang/AST/Attr.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/LambdaCapture.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/OperationKinds.h" #include "clang/AST/ParentMapContext.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" #include "clang/ASTMatchers/ASTMatchersInternal.h" #include "clang/ASTMatchers/ASTMatchersMacros.h" #include "clang/Basic/AttrKinds.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TypeTraits.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Regex.h" #include <cassert> #include <cstddef> #include <iterator> #include <limits> #include <string> #include <utility> #include <vector> namespace clang { namespace ast_matchers { /// Maps string IDs to AST nodes matched by parts of a matcher. /// /// The bound nodes are generated by calling \c bind("id") on the node matchers /// of the nodes we want to access later. /// /// The instances of BoundNodes are created by \c MatchFinder when the user's /// callbacks are executed every time a match is found. class BoundNodes { public: /// Returns the AST node bound to \c ID. /// /// Returns NULL if there was no node bound to \c ID or if there is a node but /// it cannot be converted to the specified type. template <typename T> const T *getNodeAs(StringRef ID) const { return MyBoundNodes.getNodeAs<T>(ID); } /// Type of mapping from binding identifiers to bound nodes. This type /// is an associative container with a key type of \c std::string and a value /// type of \c clang::DynTypedNode using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap; /// Retrieve mapping from binding identifiers to bound nodes. const IDToNodeMap &getMap() const { return MyBoundNodes.getMap(); } private: friend class internal::BoundNodesTreeBuilder; /// Create BoundNodes from a pre-filled map of bindings. BoundNodes(internal::BoundNodesMap &MyBoundNodes) : MyBoundNodes(MyBoundNodes) {} internal::BoundNodesMap MyBoundNodes; }; /// Types of matchers for the top-level classes in the AST class /// hierarchy. /// @{ using DeclarationMatcher = internal::Matcher<Decl>; using StatementMatcher = internal::Matcher<Stmt>; using TypeMatcher = internal::Matcher<QualType>; using TypeLocMatcher = internal::Matcher<TypeLoc>; using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>; using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>; using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>; using TemplateArgumentMatcher = internal::Matcher<TemplateArgument>; using TemplateArgumentLocMatcher = internal::Matcher<TemplateArgumentLoc>; /// @} /// Matches any node. /// /// Useful when another matcher requires a child matcher, but there's no /// additional constraint. This will often be used with an explicit conversion /// to an \c internal::Matcher<> type such as \c TypeMatcher. /// /// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g., /// \code /// "int* p" and "void f()" in /// int* p; /// void f(); /// \endcode /// /// Usable as: Any Matcher inline internal::TrueMatcher anything() { return internal::TrueMatcher(); } /// Matches the top declaration context. /// /// Given /// \code /// int X; /// namespace NS { /// int Y; /// } // namespace NS /// \endcode /// decl(hasDeclContext(translationUnitDecl())) /// matches "int X", but not "int Y". extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl> translationUnitDecl; /// Matches typedef declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefDecl() /// matches "typedef int X", but not "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl> typedefDecl; /// Matches typedef name declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefNameDecl() /// matches "typedef int X" and "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl> typedefNameDecl; /// Matches type alias declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typeAliasDecl() /// matches "using Y = int", but not "typedef int X" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl> typeAliasDecl; /// Matches type alias template declarations. /// /// typeAliasTemplateDecl() matches /// \code /// template <typename T> /// using Y = X<T>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl> typeAliasTemplateDecl; /// Matches AST nodes that were expanded within the main-file. /// /// Example matches X but not Y /// (matcher = cxxRecordDecl(isExpansionInMainFile()) /// \code /// #include <Y.h> /// class X {}; /// \endcode /// Y.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInMainFile, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); return SourceManager.isInMainFile( SourceManager.getExpansionLoc(Node.getBeginLoc())); } /// Matches AST nodes that were expanded within system-header-files. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInSystemHeader()) /// \code /// #include <SystemHeader.h> /// class X {}; /// \endcode /// SystemHeader.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } return SourceManager.isInSystemHeader(ExpansionLoc); } /// Matches AST nodes that were expanded within files whose name is /// partially matching a given regex. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*")) /// \code /// #include "ASTMatcher.h" /// class X {}; /// \endcode /// ASTMatcher.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER_REGEX(isExpansionInFileMatching, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), RegExp) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } auto FileEntry = SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc)); if (!FileEntry) { return false; } auto Filename = FileEntry->getName(); return RegExp->match(Filename); } /// Matches statements that are (transitively) expanded from the named macro. /// Does not match if only part of the statement is expanded from that macro or /// if different parts of the the statement are expanded from different /// appearances of the macro. AST_POLYMORPHIC_MATCHER_P(isExpandedFromMacro, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), std::string, MacroName) { // Verifies that the statement' beginning and ending are both expanded from // the same instance of the given macro. auto& Context = Finder->getASTContext(); llvm::Optional<SourceLocation> B = internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context); if (!B) return false; llvm::Optional<SourceLocation> E = internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context); if (!E) return false; return *B == *E; } /// Matches declarations. /// /// Examples matches \c X, \c C, and the friend declaration inside \c C; /// \code /// void X(); /// class C { /// friend X; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<Decl> decl; /// Matches decomposition-declarations. /// /// Examples matches the declaration node with \c foo and \c bar, but not /// \c number. /// (matcher = declStmt(has(decompositionDecl()))) /// /// \code /// int number = 42; /// auto [foo, bar] = std::make_pair{42, 42}; /// \endcode extern const internal::VariadicAllOfMatcher<DecompositionDecl> decompositionDecl; /// Matches a declaration of a linkage specification. /// /// Given /// \code /// extern "C" {} /// \endcode /// linkageSpecDecl() /// matches "extern "C" {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl> linkageSpecDecl; /// Matches a declaration of anything that could have a name. /// /// Example matches \c X, \c S, the anonymous union type, \c i, and \c U; /// \code /// typedef int X; /// struct S { /// union { /// int i; /// } U; /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl; /// Matches a declaration of label. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelDecl() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl; /// Matches a declaration of a namespace. /// /// Given /// \code /// namespace {} /// namespace test {} /// \endcode /// namespaceDecl() /// matches "namespace {}" and "namespace test {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl> namespaceDecl; /// Matches a declaration of a namespace alias. /// /// Given /// \code /// namespace test {} /// namespace alias = ::test; /// \endcode /// namespaceAliasDecl() /// matches "namespace alias" but not "namespace test" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl> namespaceAliasDecl; /// Matches class, struct, and union declarations. /// /// Example matches \c X, \c Z, \c U, and \c S /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl; /// Matches C++ class declarations. /// /// Example matches \c X, \c Z /// \code /// class X; /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl> cxxRecordDecl; /// Matches C++ class template declarations. /// /// Example matches \c Z /// \code /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl> classTemplateDecl; /// Matches C++ class template specializations. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// \endcode /// classTemplateSpecializationDecl() /// matches the specializations \c A<int> and \c A<double> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplateSpecializationDecl> classTemplateSpecializationDecl; /// Matches C++ class template partial specializations. /// /// Given /// \code /// template<class T1, class T2, int I> /// class A {}; /// /// template<class T, int I> /// class A<T, T*, I> {}; /// /// template<> /// class A<int, int, 1> {}; /// \endcode /// classTemplatePartialSpecializationDecl() /// matches the specialization \c A<T,T*,I> but not \c A<int,int,1> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplatePartialSpecializationDecl> classTemplatePartialSpecializationDecl; /// Matches declarator declarations (field, variable, function /// and non-type template parameter declarations). /// /// Given /// \code /// class X { int y; }; /// \endcode /// declaratorDecl() /// matches \c int y. extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl> declaratorDecl; /// Matches parameter variable declarations. /// /// Given /// \code /// void f(int x); /// \endcode /// parmVarDecl() /// matches \c int x. extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl> parmVarDecl; /// Matches C++ access specifier declarations. /// /// Given /// \code /// class C { /// public: /// int a; /// }; /// \endcode /// accessSpecDecl() /// matches 'public:' extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl> accessSpecDecl; /// Matches constructor initializers. /// /// Examples matches \c i(42). /// \code /// class C { /// C() : i(42) {} /// int i; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<CXXCtorInitializer> cxxCtorInitializer; /// Matches template arguments. /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgument() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument; /// Matches template arguments (with location info). /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgumentLoc() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgumentLoc> templateArgumentLoc; /// Matches template name. /// /// Given /// \code /// template <typename T> class X { }; /// X<int> xi; /// \endcode /// templateName() /// matches 'X' in X<int>. extern const internal::VariadicAllOfMatcher<TemplateName> templateName; /// Matches non-type template parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// nonTypeTemplateParmDecl() /// matches 'N', but not 'T'. extern const internal::VariadicDynCastAllOfMatcher<Decl, NonTypeTemplateParmDecl> nonTypeTemplateParmDecl; /// Matches template type parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'T', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl> templateTypeParmDecl; /// Matches template template parameter declarations. /// /// Given /// \code /// template <template <typename> class Z, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'Z', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTemplateParmDecl> templateTemplateParmDecl; /// Matches public C++ declarations and C++ base specifers that specify public /// inheritance. /// /// Examples: /// \code /// class C { /// public: int a; // fieldDecl(isPublic()) matches 'a' /// protected: int b; /// private: int c; /// }; /// \endcode /// /// \code /// class Base {}; /// class Derived1 : public Base {}; // matches 'Base' /// struct Derived2 : Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isPublic, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_public; } /// Matches protected C++ declarations and C++ base specifers that specify /// protected inheritance. /// /// Examples: /// \code /// class C { /// public: int a; /// protected: int b; // fieldDecl(isProtected()) matches 'b' /// private: int c; /// }; /// \endcode /// /// \code /// class Base {}; /// class Derived : protected Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isProtected, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_protected; } /// Matches private C++ declarations and C++ base specifers that specify private /// inheritance. /// /// Examples: /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; // fieldDecl(isPrivate()) matches 'c' /// }; /// \endcode /// /// \code /// struct Base {}; /// struct Derived1 : private Base {}; // matches 'Base' /// class Derived2 : Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isPrivate, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_private; } /// Matches non-static data members that are bit-fields. /// /// Given /// \code /// class C { /// int a : 2; /// int b; /// }; /// \endcode /// fieldDecl(isBitField()) /// matches 'int a;' but not 'int b;'. AST_MATCHER(FieldDecl, isBitField) { return Node.isBitField(); } /// Matches non-static data members that are bit-fields of the specified /// bit width. /// /// Given /// \code /// class C { /// int a : 2; /// int b : 4; /// int c : 2; /// }; /// \endcode /// fieldDecl(hasBitWidth(2)) /// matches 'int a;' and 'int c;' but not 'int b;'. AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) { return Node.isBitField() && Node.getBitWidthValue(Finder->getASTContext()) == Width; } /// Matches non-static data members that have an in-class initializer. /// /// Given /// \code /// class C { /// int a = 2; /// int b = 3; /// int c; /// }; /// \endcode /// fieldDecl(hasInClassInitializer(integerLiteral(equals(2)))) /// matches 'int a;' but not 'int b;'. /// fieldDecl(hasInClassInitializer(anything())) /// matches 'int a;' and 'int b;' but not 'int c;'. AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getInClassInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// Determines whether the function is "main", which is the entry point /// into an executable program. AST_MATCHER(FunctionDecl, isMain) { return Node.isMain(); } /// Matches the specialized template of a specialization declaration. /// /// Given /// \code /// template<typename T> class A {}; #1 /// template<> class A<int> {}; #2 /// \endcode /// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl())) /// matches '#2' with classTemplateDecl() matching the class template /// declaration of 'A' at #1. AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate, internal::Matcher<ClassTemplateDecl>, InnerMatcher) { const ClassTemplateDecl* Decl = Node.getSpecializedTemplate(); return (Decl != nullptr && InnerMatcher.matches(*Decl, Finder, Builder)); } /// Matches a declaration that has been implicitly added /// by the compiler (eg. implicit default/copy constructors). AST_MATCHER(Decl, isImplicit) { return Node.isImplicit(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl that have at least one TemplateArgument matching the given /// InnerMatcher. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// /// template<typename T> f() {}; /// void func() { f<int>(); }; /// \endcode /// /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(asString("int")))) /// matches the specialization \c A<int> /// /// functionDecl(hasAnyTemplateArgument(refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P( hasAnyTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder, Builder) != List.end(); } /// Causes all nested matchers to be matched with the specified traversal kind. /// /// Given /// \code /// void foo() /// { /// int i = 3.0; /// } /// \endcode /// The matcher /// \code /// traverse(TK_IgnoreUnlessSpelledInSource, /// varDecl(hasInitializer(floatLiteral().bind("init"))) /// ) /// \endcode /// matches the variable declaration with "init" bound to the "3.0". template <typename T> internal::Matcher<T> traverse(TraversalKind TK, const internal::Matcher<T> &InnerMatcher) { return internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>(); } template <typename T> internal::BindableMatcher<T> traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) { return internal::BindableMatcher<T>( internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>()); } template <typename... T> internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>> traverse(TraversalKind TK, const internal::VariadicOperatorMatcher<T...> &InnerMatcher) { return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>( TK, InnerMatcher); } template <template <typename ToArg, typename FromArg> class ArgumentAdapterT, typename T, typename ToTypes> internal::TraversalWrapper< internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>> traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor< ArgumentAdapterT, T, ToTypes> &InnerMatcher) { return internal::TraversalWrapper< internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>(TK, InnerMatcher); } template <template <typename T, typename P1> class MatcherT, typename P1, typename ReturnTypesF> internal::TraversalWrapper< internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>> traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam1< MatcherT, P1, ReturnTypesF> &InnerMatcher) { return internal::TraversalWrapper< internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>( TK, InnerMatcher); } template <template <typename T, typename P1, typename P2> class MatcherT, typename P1, typename P2, typename ReturnTypesF> internal::TraversalWrapper< internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>> traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam2< MatcherT, P1, P2, ReturnTypesF> &InnerMatcher) { return internal::TraversalWrapper< internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>( TK, InnerMatcher); } template <typename... T> internal::Matcher<typename internal::GetClade<T...>::Type> traverse(TraversalKind TK, const internal::MapAnyOfHelper<T...> &InnerMatcher) { return traverse(TK, InnerMatcher.with()); } /// Matches expressions that match InnerMatcher after any implicit AST /// nodes are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// class C {}; /// C a = C(); /// C b; /// C c = b; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr()))) /// \endcode /// would match the declarations for a, b, and c. /// While /// \code /// varDecl(hasInitializer(cxxConstructExpr())) /// \endcode /// only match the declarations for b and c. AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder); } /// Matches expressions that match InnerMatcher after any implicit casts /// are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = 0; /// const int c = a; /// int *d = arr; /// long e = (long) 0l; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringImpCasts(declRefExpr()))) /// \endcode /// would match the declarations for a, b, c, and d, but not e. /// While /// \code /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// \endcode /// only match the declarations for b, c, and d. AST_MATCHER_P(Expr, ignoringImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after parentheses and /// casts are stripped off. /// /// Implicit and non-C Style casts are also discarded. /// Given /// \code /// int a = 0; /// char b = (0); /// void* c = reinterpret_cast<char*>(0); /// char d = char(0); /// \endcode /// The matcher /// varDecl(hasInitializer(ignoringParenCasts(integerLiteral()))) /// would match the declarations for a, b, c, and d. /// while /// varDecl(hasInitializer(integerLiteral())) /// only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after implicit casts and /// parentheses are stripped off. /// /// Explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = (0); /// const int c = a; /// int *d = (arr); /// long e = ((long) 0l); /// \endcode /// The matchers /// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr()))) /// would match the declarations for a, b, c, and d, but not e. /// while /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// would only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder); } /// Matches types that match InnerMatcher after any parens are stripped. /// /// Given /// \code /// void (*fp)(void); /// \endcode /// The matcher /// \code /// varDecl(hasType(pointerType(pointee(ignoringParens(functionType()))))) /// \endcode /// would match the declaration for fp. AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>, InnerMatcher, 0) { return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder); } /// Overload \c ignoringParens for \c Expr. /// /// Given /// \code /// const char* str = ("my-string"); /// \endcode /// The matcher /// \code /// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral()))) /// \endcode /// would match the implicit cast resulting from the assignment. AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>, InnerMatcher, 1) { const Expr *E = Node.IgnoreParens(); return InnerMatcher.matches(*E, Finder, Builder); } /// Matches expressions that are instantiation-dependent even if it is /// neither type- nor value-dependent. /// /// In the following example, the expression sizeof(sizeof(T() + T())) /// is instantiation-dependent (since it involves a template parameter T), /// but is neither type- nor value-dependent, since the type of the inner /// sizeof is known (std::size_t) and therefore the size of the outer /// sizeof is known. /// \code /// template<typename T> /// void f(T x, T y) { sizeof(sizeof(T() + T()); } /// \endcode /// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T()) AST_MATCHER(Expr, isInstantiationDependent) { return Node.isInstantiationDependent(); } /// Matches expressions that are type-dependent because the template type /// is not yet instantiated. /// /// For example, the expressions "x" and "x + y" are type-dependent in /// the following code, but "y" is not type-dependent: /// \code /// template<typename T> /// void add(T x, int y) { /// x + y; /// } /// \endcode /// expr(isTypeDependent()) matches x + y AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); } /// Matches expression that are value-dependent because they contain a /// non-type template parameter. /// /// For example, the array bound of "Chars" in the following example is /// value-dependent. /// \code /// template<int Size> int f() { return Size; } /// \endcode /// expr(isValueDependent()) matches return Size AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl where the n'th TemplateArgument matches the given InnerMatcher. /// /// Given /// \code /// template<typename T, typename U> class A {}; /// A<bool, int> b; /// A<int, bool> c; /// /// template<typename T> void f() {} /// void func() { f<int>(); }; /// \endcode /// classTemplateSpecializationDecl(hasTemplateArgument( /// 1, refersToType(asString("int")))) /// matches the specialization \c A<bool, int> /// /// functionDecl(hasTemplateArgument(0, refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P2( hasTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); if (List.size() <= N) return false; return InnerMatcher.matches(List[N], Finder, Builder); } /// Matches if the number of template arguments equals \p N. /// /// Given /// \code /// template<typename T> struct C {}; /// C<int> c; /// \endcode /// classTemplateSpecializationDecl(templateArgumentCountIs(1)) /// matches C<int>. AST_POLYMORPHIC_MATCHER_P( templateArgumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType), unsigned, N) { return internal::getTemplateSpecializationArgs(Node).size() == N; } /// Matches a TemplateArgument that refers to a certain type. /// /// Given /// \code /// struct X {}; /// template<typename T> struct A {}; /// A<X> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(class(hasName("X"))))) /// matches the specialization \c A<X> AST_MATCHER_P(TemplateArgument, refersToType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Type) return false; return InnerMatcher.matches(Node.getAsType(), Finder, Builder); } /// Matches a TemplateArgument that refers to a certain template. /// /// Given /// \code /// template<template <typename> class S> class X {}; /// template<typename T> class Y {}; /// X<Y> xi; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToTemplate(templateName()))) /// matches the specialization \c X<Y> AST_MATCHER_P(TemplateArgument, refersToTemplate, internal::Matcher<TemplateName>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Template) return false; return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder); } /// Matches a canonical TemplateArgument that refers to a certain /// declaration. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToDeclaration(fieldDecl(hasName("next"))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, refersToDeclaration, internal::Matcher<Decl>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Declaration) return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder); return false; } /// Matches a sugar TemplateArgument that refers to a certain expression. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// templateSpecializationType(hasAnyTemplateArgument( /// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next")))))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Expression) return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder); return false; } /// Matches a TemplateArgument that is an integral value. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(isIntegral())) /// matches the implicit instantiation of C in C<42> /// with isIntegral() matching 42. AST_MATCHER(TemplateArgument, isIntegral) { return Node.getKind() == TemplateArgument::Integral; } /// Matches a TemplateArgument that refers to an integral type. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(refersToIntegralType(asString("int")))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, refersToIntegralType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Integral) return false; return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder); } /// Matches a TemplateArgument of integral type with a given value. /// /// Note that 'Value' is a string as the template argument's value is /// an arbitrary precision integer. 'Value' must be euqal to the canonical /// representation of that integral value in base 10. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(equalsIntegralValue("42"))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, equalsIntegralValue, std::string, Value) { if (Node.getKind() != TemplateArgument::Integral) return false; return Node.getAsIntegral().toString(10) == Value; } /// Matches an Objective-C autorelease pool statement. /// /// Given /// \code /// @autoreleasepool { /// int x = 0; /// } /// \endcode /// autoreleasePoolStmt(stmt()) matches the declaration of "x" /// inside the autorelease pool. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAutoreleasePoolStmt> autoreleasePoolStmt; /// Matches any value declaration. /// /// Example matches A, B, C and F /// \code /// enum X { A, B, C }; /// void F(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl; /// Matches C++ constructor declarations. /// /// Example matches Foo::Foo() and Foo::Foo(int) /// \code /// class Foo { /// public: /// Foo(); /// Foo(int); /// int DoSomething(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl> cxxConstructorDecl; /// Matches explicit C++ destructor declarations. /// /// Example matches Foo::~Foo() /// \code /// class Foo { /// public: /// virtual ~Foo(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl> cxxDestructorDecl; /// Matches enum declarations. /// /// Example matches X /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl; /// Matches enum constants. /// /// Example matches A, B, C /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl> enumConstantDecl; /// Matches tag declarations. /// /// Example matches X, Z, U, S, E /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// enum E { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl; /// Matches method declarations. /// /// Example matches y /// \code /// class X { void y(); }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> cxxMethodDecl; /// Matches conversion operator declarations. /// /// Example matches the operator. /// \code /// class X { operator int() const; }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl> cxxConversionDecl; /// Matches user-defined and implicitly generated deduction guide. /// /// Example matches the deduction guide. /// \code /// template<typename T> /// class X { X(int) }; /// X(int) -> X<int>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl> cxxDeductionGuideDecl; /// Matches variable declarations. /// /// Note: this does not match declarations of member variables, which are /// "field" declarations in Clang parlance. /// /// Example matches a /// \code /// int a; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl; /// Matches field declarations. /// /// Given /// \code /// class X { int m; }; /// \endcode /// fieldDecl() /// matches 'm'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl; /// Matches indirect field declarations. /// /// Given /// \code /// struct X { struct { int a; }; }; /// \endcode /// indirectFieldDecl() /// matches 'a'. extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl> indirectFieldDecl; /// Matches function declarations. /// /// Example matches f /// \code /// void f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl> functionDecl; /// Matches C++ function template declarations. /// /// Example matches f /// \code /// template<class T> void f(T t) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl> functionTemplateDecl; /// Matches friend declarations. /// /// Given /// \code /// class X { friend void foo(); }; /// \endcode /// friendDecl() /// matches 'friend void foo()'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl; /// Matches statements. /// /// Given /// \code /// { ++a; } /// \endcode /// stmt() /// matches both the compound statement '{ ++a; }' and '++a'. extern const internal::VariadicAllOfMatcher<Stmt> stmt; /// Matches declaration statements. /// /// Given /// \code /// int a; /// \endcode /// declStmt() /// matches 'int a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt; /// Matches member expressions. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// int a; static int b; /// }; /// \endcode /// memberExpr() /// matches this->x, x, y.x, a, this->b extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr; /// Matches unresolved member expressions. /// /// Given /// \code /// struct X { /// template <class T> void f(); /// void g(); /// }; /// template <class T> void h() { X x; x.f<T>(); x.g(); } /// \endcode /// unresolvedMemberExpr() /// matches x.f<T> extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr> unresolvedMemberExpr; /// Matches member expressions where the actual member referenced could not be /// resolved because the base expression or the member name was dependent. /// /// Given /// \code /// template <class T> void f() { T t; t.g(); } /// \endcode /// cxxDependentScopeMemberExpr() /// matches t.g extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDependentScopeMemberExpr> cxxDependentScopeMemberExpr; /// Matches call expressions. /// /// Example matches x.y() and y() /// \code /// X x; /// x.y(); /// y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr; /// Matches call expressions which were resolved using ADL. /// /// Example matches y(x) but not y(42) or NS::y(x). /// \code /// namespace NS { /// struct X {}; /// void y(X); /// } /// /// void y(...); /// /// void test() { /// NS::X x; /// y(x); // Matches /// NS::y(x); // Doesn't match /// y(42); // Doesn't match /// using NS::y; /// y(x); // Found by both unqualified lookup and ADL, doesn't match // } /// \endcode AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); } /// Matches lambda expressions. /// /// Example matches [&](){return 5;} /// \code /// [&](){return 5;} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr; /// Matches member call expressions. /// /// Example matches x.y() /// \code /// X x; /// x.y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr> cxxMemberCallExpr; /// Matches ObjectiveC Message invocation expressions. /// /// The innermost message send invokes the "alloc" class method on the /// NSString class, while the outermost message send invokes the /// "initWithString" instance method on the object returned from /// NSString's "alloc". This matcher should match both message sends. /// \code /// [[NSString alloc] initWithString:@"Hello"] /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr> objcMessageExpr; /// Matches Objective-C interface declarations. /// /// Example matches Foo /// \code /// @interface Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl> objcInterfaceDecl; /// Matches Objective-C implementation declarations. /// /// Example matches Foo /// \code /// @implementation Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl> objcImplementationDecl; /// Matches Objective-C protocol declarations. /// /// Example matches FooDelegate /// \code /// @protocol FooDelegate /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl> objcProtocolDecl; /// Matches Objective-C category declarations. /// /// Example matches Foo (Additions) /// \code /// @interface Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl> objcCategoryDecl; /// Matches Objective-C category definitions. /// /// Example matches Foo (Additions) /// \code /// @implementation Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl> objcCategoryImplDecl; /// Matches Objective-C method declarations. /// /// Example matches both declaration and definition of -[Foo method] /// \code /// @interface Foo /// - (void)method; /// @end /// /// @implementation Foo /// - (void)method {} /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl> objcMethodDecl; /// Matches block declarations. /// /// Example matches the declaration of the nameless block printing an input /// integer. /// /// \code /// myFunc(^(int p) { /// printf("%d", p); /// }) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl> blockDecl; /// Matches Objective-C instance variable declarations. /// /// Example matches _enabled /// \code /// @implementation Foo { /// BOOL _enabled; /// } /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl> objcIvarDecl; /// Matches Objective-C property declarations. /// /// Example matches enabled /// \code /// @interface Foo /// @property BOOL enabled; /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl> objcPropertyDecl; /// Matches Objective-C \@throw statements. /// /// Example matches \@throw /// \code /// @throw obj; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt> objcThrowStmt; /// Matches Objective-C @try statements. /// /// Example matches @try /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt> objcTryStmt; /// Matches Objective-C @catch statements. /// /// Example matches @catch /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt> objcCatchStmt; /// Matches Objective-C @finally statements. /// /// Example matches @finally /// \code /// @try {} /// @finally {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt> objcFinallyStmt; /// Matches expressions that introduce cleanups to be run at the end /// of the sub-expression's evaluation. /// /// Example matches std::string() /// \code /// const std::string str = std::string(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups> exprWithCleanups; /// Matches init list expressions. /// /// Given /// \code /// int a[] = { 1, 2 }; /// struct B { int x, y; }; /// B b = { 5, 6 }; /// \endcode /// initListExpr() /// matches "{ 1, 2 }" and "{ 5, 6 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr> initListExpr; /// Matches the syntactic form of init list expressions /// (if expression have it). AST_MATCHER_P(InitListExpr, hasSyntacticForm, internal::Matcher<Expr>, InnerMatcher) { const Expr *SyntForm = Node.getSyntacticForm(); return (SyntForm != nullptr && InnerMatcher.matches(*SyntForm, Finder, Builder)); } /// Matches C++ initializer list expressions. /// /// Given /// \code /// std::vector<int> a({ 1, 2, 3 }); /// std::vector<int> b = { 4, 5 }; /// int c[] = { 6, 7 }; /// std::pair<int, int> d = { 8, 9 }; /// \endcode /// cxxStdInitializerListExpr() /// matches "{ 1, 2, 3 }" and "{ 4, 5 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStdInitializerListExpr> cxxStdInitializerListExpr; /// Matches implicit initializers of init list expressions. /// /// Given /// \code /// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 }; /// \endcode /// implicitValueInitExpr() /// matches "[0].y" (implicitly) extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr> implicitValueInitExpr; /// Matches paren list expressions. /// ParenListExprs don't have a predefined type and are used for late parsing. /// In the final AST, they can be met in template declarations. /// /// Given /// \code /// template<typename T> class X { /// void f() { /// X x(*this); /// int a = 0, b = 1; int i = (a, b); /// } /// }; /// \endcode /// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b) /// has a predefined type and is a ParenExpr, not a ParenListExpr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr> parenListExpr; /// Matches substitutions of non-type template parameters. /// /// Given /// \code /// template <int N> /// struct A { static const int n = N; }; /// struct B : public A<42> {}; /// \endcode /// substNonTypeTemplateParmExpr() /// matches "N" in the right-hand side of "static const int n = N;" extern const internal::VariadicDynCastAllOfMatcher<Stmt, SubstNonTypeTemplateParmExpr> substNonTypeTemplateParmExpr; /// Matches using declarations. /// /// Given /// \code /// namespace X { int x; } /// using X::x; /// \endcode /// usingDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl; /// Matches using namespace declarations. /// /// Given /// \code /// namespace X { int x; } /// using namespace X; /// \endcode /// usingDirectiveDecl() /// matches \code using namespace X \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl> usingDirectiveDecl; /// Matches reference to a name that can be looked up during parsing /// but could not be resolved to a specific declaration. /// /// Given /// \code /// template<typename T> /// T foo() { T a; return a; } /// template<typename T> /// void bar() { /// foo<T>(); /// } /// \endcode /// unresolvedLookupExpr() /// matches \code foo<T>() \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr> unresolvedLookupExpr; /// Matches unresolved using value declarations. /// /// Given /// \code /// template<typename X> /// class C : private X { /// using X::x; /// }; /// \endcode /// unresolvedUsingValueDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingValueDecl> unresolvedUsingValueDecl; /// Matches unresolved using value declarations that involve the /// typename. /// /// Given /// \code /// template <typename T> /// struct Base { typedef T Foo; }; /// /// template<typename T> /// struct S : private Base<T> { /// using typename Base<T>::Foo; /// }; /// \endcode /// unresolvedUsingTypenameDecl() /// matches \code using Base<T>::Foo \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingTypenameDecl> unresolvedUsingTypenameDecl; /// Matches a constant expression wrapper. /// /// Example matches the constant in the case statement: /// (matcher = constantExpr()) /// \code /// switch (a) { /// case 37: break; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr> constantExpr; /// Matches parentheses used in expressions. /// /// Example matches (foo() + 1) /// \code /// int foo() { return 1; } /// int a = (foo() + 1); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr; /// Matches constructor call expressions (including implicit ones). /// /// Example matches string(ptr, n) and ptr within arguments of f /// (matcher = cxxConstructExpr()) /// \code /// void f(const string &a, const string &b); /// char *ptr; /// int n; /// f(string(ptr, n), ptr); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr> cxxConstructExpr; /// Matches unresolved constructor call expressions. /// /// Example matches T(t) in return statement of f /// (matcher = cxxUnresolvedConstructExpr()) /// \code /// template <typename T> /// void f(const T& t) { return T(t); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXUnresolvedConstructExpr> cxxUnresolvedConstructExpr; /// Matches implicit and explicit this expressions. /// /// Example matches the implicit this expression in "return i". /// (matcher = cxxThisExpr()) /// \code /// struct foo { /// int i; /// int f() { return i; } /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr> cxxThisExpr; /// Matches nodes where temporaries are created. /// /// Example matches FunctionTakesString(GetStringByValue()) /// (matcher = cxxBindTemporaryExpr()) /// \code /// FunctionTakesString(GetStringByValue()); /// FunctionTakesStringByPointer(GetStringPointer()); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr> cxxBindTemporaryExpr; /// Matches nodes where temporaries are materialized. /// /// Example: Given /// \code /// struct T {void func();}; /// T f(); /// void g(T); /// \endcode /// materializeTemporaryExpr() matches 'f()' in these statements /// \code /// T u(f()); /// g(f()); /// f().func(); /// \endcode /// but does not match /// \code /// f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, MaterializeTemporaryExpr> materializeTemporaryExpr; /// Matches new expressions. /// /// Given /// \code /// new X; /// \endcode /// cxxNewExpr() /// matches 'new X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr; /// Matches delete expressions. /// /// Given /// \code /// delete X; /// \endcode /// cxxDeleteExpr() /// matches 'delete X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr> cxxDeleteExpr; /// Matches noexcept expressions. /// /// Given /// \code /// bool a() noexcept; /// bool b() noexcept(true); /// bool c() noexcept(false); /// bool d() noexcept(noexcept(a())); /// bool e = noexcept(b()) || noexcept(c()); /// \endcode /// cxxNoexceptExpr() /// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`. /// doesn't match the noexcept specifier in the declarations a, b, c or d. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr> cxxNoexceptExpr; /// Matches array subscript expressions. /// /// Given /// \code /// int i = a[1]; /// \endcode /// arraySubscriptExpr() /// matches "a[1]" extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr> arraySubscriptExpr; /// Matches the value of a default argument at the call site. /// /// Example matches the CXXDefaultArgExpr placeholder inserted for the /// default value of the second parameter in the call expression f(42) /// (matcher = cxxDefaultArgExpr()) /// \code /// void f(int x, int y = 0); /// f(42); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr> cxxDefaultArgExpr; /// Matches overloaded operator calls. /// /// Note that if an operator isn't overloaded, it won't match. Instead, use /// binaryOperator matcher. /// Currently it does not match operators such as new delete. /// FIXME: figure out why these do not match? /// /// Example matches both operator<<((o << b), c) and operator<<(o, b) /// (matcher = cxxOperatorCallExpr()) /// \code /// ostream &operator<< (ostream &out, int i) { }; /// ostream &o; int b = 1, c = 1; /// o << b << c; /// \endcode /// See also the binaryOperation() matcher for more-general matching of binary /// uses of this AST node. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr> cxxOperatorCallExpr; /// Matches rewritten binary operators /// /// Example matches use of "<": /// \code /// #include <compare> /// struct HasSpaceshipMem { /// int a; /// constexpr auto operator<=>(const HasSpaceshipMem&) const = default; /// }; /// void compare() { /// HasSpaceshipMem hs1, hs2; /// if (hs1 < hs2) /// return; /// } /// \endcode /// See also the binaryOperation() matcher for more-general matching /// of this AST node. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXRewrittenBinaryOperator> cxxRewrittenBinaryOperator; /// Matches expressions. /// /// Example matches x() /// \code /// void f() { x(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr; /// Matches expressions that refer to declarations. /// /// Example matches x in if (x) /// \code /// bool x; /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr; /// Matches a reference to an ObjCIvar. /// /// Example: matches "a" in "init" method: /// \code /// @implementation A { /// NSString *a; /// } /// - (void) init { /// a = @"hello"; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr> objcIvarRefExpr; /// Matches a reference to a block. /// /// Example: matches "^{}": /// \code /// void f() { ^{}(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr; /// Matches if statements. /// /// Example matches 'if (x) {}' /// \code /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt; /// Matches for statements. /// /// Example matches 'for (;;) {}' /// \code /// for (;;) {} /// int i[] = {1, 2, 3}; for (auto a : i); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt; /// Matches the increment statement of a for loop. /// /// Example: /// forStmt(hasIncrement(unaryOperator(hasOperatorName("++")))) /// matches '++x' in /// \code /// for (x; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Increment = Node.getInc(); return (Increment != nullptr && InnerMatcher.matches(*Increment, Finder, Builder)); } /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopInit(declStmt())) /// matches 'int x = 0' in /// \code /// for (int x = 0; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Init = Node.getInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches range-based for statements. /// /// cxxForRangeStmt() matches 'for (auto a : i)' /// \code /// int i[] = {1, 2, 3}; for (auto a : i); /// for(int j = 0; j < 5; ++j); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt> cxxForRangeStmt; /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopVariable(anything())) /// matches 'int x' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>, InnerMatcher) { const VarDecl *const Var = Node.getLoopVariable(); return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder)); } /// Matches the range initialization statement of a for loop. /// /// Example: /// forStmt(hasRangeInit(anything())) /// matches 'a' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>, InnerMatcher) { const Expr *const Init = Node.getRangeInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches while statements. /// /// Given /// \code /// while (true) {} /// \endcode /// whileStmt() /// matches 'while (true) {}'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt; /// Matches do statements. /// /// Given /// \code /// do {} while (true); /// \endcode /// doStmt() /// matches 'do {} while(true)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt; /// Matches break statements. /// /// Given /// \code /// while (true) { break; } /// \endcode /// breakStmt() /// matches 'break' extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt; /// Matches continue statements. /// /// Given /// \code /// while (true) { continue; } /// \endcode /// continueStmt() /// matches 'continue' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt> continueStmt; /// Matches return statements. /// /// Given /// \code /// return 1; /// \endcode /// returnStmt() /// matches 'return 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt; /// Matches goto statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// gotoStmt() /// matches 'goto FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt; /// Matches label statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelStmt() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt; /// Matches address of label statements (GNU extension). /// /// Given /// \code /// FOO: bar(); /// void *ptr = &&FOO; /// goto *bar; /// \endcode /// addrLabelExpr() /// matches '&&FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr> addrLabelExpr; /// Matches switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchStmt() /// matches 'switch(a)'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt; /// Matches case and default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchCase() /// matches 'case 42:' and 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase; /// Matches case statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// caseStmt() /// matches 'case 42:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt; /// Matches default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// defaultStmt() /// matches 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt> defaultStmt; /// Matches compound statements. /// /// Example matches '{}' and '{{}}' in 'for (;;) {{}}' /// \code /// for (;;) {{}} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt> compoundStmt; /// Matches catch statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxCatchStmt() /// matches 'catch(int i)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt> cxxCatchStmt; /// Matches try statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxTryStmt() /// matches 'try {}' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt; /// Matches throw expressions. /// /// \code /// try { throw 5; } catch(int i) {} /// \endcode /// cxxThrowExpr() /// matches 'throw 5' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr> cxxThrowExpr; /// Matches null statements. /// /// \code /// foo();; /// \endcode /// nullStmt() /// matches the second ';' extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt; /// Matches asm statements. /// /// \code /// int i = 100; /// __asm("mov al, 2"); /// \endcode /// asmStmt() /// matches '__asm("mov al, 2")' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt; /// Matches bool literals. /// /// Example matches true /// \code /// true /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr> cxxBoolLiteral; /// Matches string literals (also matches wide string literals). /// /// Example matches "abcd", L"abcd" /// \code /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral> stringLiteral; /// Matches character literals (also matches wchar_t). /// /// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral), /// though. /// /// Example matches 'a', L'a' /// \code /// char ch = 'a'; /// wchar_t chw = L'a'; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral> characterLiteral; /// Matches integer literals of all sizes / encodings, e.g. /// 1, 1L, 0x1 and 1U. /// /// Does not match character-encoded integers such as L'a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral> integerLiteral; /// Matches float literals of all sizes / encodings, e.g. /// 1.0, 1.0f, 1.0L and 1e10. /// /// Does not match implicit conversions such as /// \code /// float a = 10; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral> floatLiteral; /// Matches imaginary literals, which are based on integer and floating /// point literals e.g.: 1i, 1.0i extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral> imaginaryLiteral; /// Matches fixed point literals extern const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral> fixedPointLiteral; /// Matches user defined literal operator call. /// /// Example match: "foo"_suffix extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral> userDefinedLiteral; /// Matches compound (i.e. non-scalar) literals /// /// Example match: {1}, (1, 2) /// \code /// int array[4] = {1}; /// vector int myvec = (vector int)(1, 2); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr> compoundLiteralExpr; /// Matches nullptr literal. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr> cxxNullPtrLiteralExpr; /// Matches GNU __builtin_choose_expr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr> chooseExpr; /// Matches GNU __null expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr> gnuNullExpr; /// Matches C11 _Generic expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GenericSelectionExpr> genericSelectionExpr; /// Matches atomic builtins. /// Example matches __atomic_load_n(ptr, 1) /// \code /// void foo() { int *ptr; __atomic_load_n(ptr, 1); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr; /// Matches statement expression (GNU extension). /// /// Example match: ({ int X = 4; X; }) /// \code /// int C = ({ int X = 4; X; }); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr; /// Matches binary operator expressions. /// /// Example matches a || b /// \code /// !(a || b) /// \endcode /// See also the binaryOperation() matcher for more-general matching. extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator> binaryOperator; /// Matches unary operator expressions. /// /// Example matches !a /// \code /// !a || b /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator> unaryOperator; /// Matches conditional operator expressions. /// /// Example matches a ? b : c /// \code /// (a ? b : c) + 42 /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator> conditionalOperator; /// Matches binary conditional operator expressions (GNU extension). /// /// Example matches a ?: b /// \code /// (a ?: b) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryConditionalOperator> binaryConditionalOperator; /// Matches opaque value expressions. They are used as helpers /// to reference another expressions and can be met /// in BinaryConditionalOperators, for example. /// /// Example matches 'a' /// \code /// (a ?: c) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr> opaqueValueExpr; /// Matches a C++ static_assert declaration. /// /// Example: /// staticAssertExpr() /// matches /// static_assert(sizeof(S) == sizeof(int)) /// in /// \code /// struct S { /// int x; /// }; /// static_assert(sizeof(S) == sizeof(int)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl> staticAssertDecl; /// Matches a reinterpret_cast expression. /// /// Either the source expression or the destination type can be matched /// using has(), but hasDestinationType() is more specific and can be /// more readable. /// /// Example matches reinterpret_cast<char*>(&p) in /// \code /// void* p = reinterpret_cast<char*>(&p); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr> cxxReinterpretCastExpr; /// Matches a C++ static_cast expression. /// /// \see hasDestinationType /// \see reinterpretCast /// /// Example: /// cxxStaticCastExpr() /// matches /// static_cast<long>(8) /// in /// \code /// long eight(static_cast<long>(8)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr> cxxStaticCastExpr; /// Matches a dynamic_cast expression. /// /// Example: /// cxxDynamicCastExpr() /// matches /// dynamic_cast<D*>(&b); /// in /// \code /// struct B { virtual ~B() {} }; struct D : B {}; /// B b; /// D* p = dynamic_cast<D*>(&b); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr> cxxDynamicCastExpr; /// Matches a const_cast expression. /// /// Example: Matches const_cast<int*>(&r) in /// \code /// int n = 42; /// const int &r(n); /// int* p = const_cast<int*>(&r); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr> cxxConstCastExpr; /// Matches a C-style cast expression. /// /// Example: Matches (int) 2.2f in /// \code /// int i = (int) 2.2f; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr> cStyleCastExpr; /// Matches explicit cast expressions. /// /// Matches any cast expression written in user code, whether it be a /// C-style cast, a functional-style cast, or a keyword cast. /// /// Does not match implicit conversions. /// /// Note: the name "explicitCast" is chosen to match Clang's terminology, as /// Clang uses the term "cast" to apply to implicit conversions as well as to /// actual cast expressions. /// /// \see hasDestinationType. /// /// Example: matches all five of the casts in /// \code /// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42))))) /// \endcode /// but does not match the implicit conversion in /// \code /// long ell = 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr> explicitCastExpr; /// Matches the implicit cast nodes of Clang's AST. /// /// This matches many different places, including function call return value /// eliding, as well as any type conversions. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr> implicitCastExpr; /// Matches any cast nodes of Clang's AST. /// /// Example: castExpr() matches each of the following: /// \code /// (int) 3; /// const_cast<Expr *>(SubExpr); /// char c = 0; /// \endcode /// but does not match /// \code /// int i = (0); /// int k = 0; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr; /// Matches functional cast expressions /// /// Example: Matches Foo(bar); /// \code /// Foo f = bar; /// Foo g = (Foo) bar; /// Foo h = Foo(bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr> cxxFunctionalCastExpr; /// Matches functional cast expressions having N != 1 arguments /// /// Example: Matches Foo(bar, bar) /// \code /// Foo h = Foo(bar, bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr> cxxTemporaryObjectExpr; /// Matches predefined identifier expressions [C99 6.4.2.2]. /// /// Example: Matches __func__ /// \code /// printf("%s", __func__); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr> predefinedExpr; /// Matches C99 designated initializer expressions [C99 6.7.8]. /// /// Example: Matches { [2].y = 1.0, [0].x = 1.0 } /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr> designatedInitExpr; /// Matches designated initializer expressions that contain /// a specific number of designators. /// /// Example: Given /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }; /// \endcode /// designatorCountIs(2) /// matches '{ [2].y = 1.0, [0].x = 1.0 }', /// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'. AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) { return Node.size() == N; } /// Matches \c QualTypes in the clang AST. extern const internal::VariadicAllOfMatcher<QualType> qualType; /// Matches \c Types in the clang AST. extern const internal::VariadicAllOfMatcher<Type> type; /// Matches \c TypeLocs in the clang AST. extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc; /// Matches if any of the given matchers matches. /// /// Unlike \c anyOf, \c eachOf will generate a match result for each /// matching submatcher. /// /// For example, in: /// \code /// class A { int a; int b; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")), /// has(fieldDecl(hasName("b")).bind("v")))) /// \endcode /// will generate two results binding "v", the first of which binds /// the field declaration of \c a, the second the field declaration of /// \c b. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> eachOf; /// Matches if any of the given matchers matches. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> anyOf; /// Matches if all given matchers match. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> allOf; /// Matches any node regardless of the submatcher. /// /// However, \c optionally will retain any bindings generated by the submatcher. /// Useful when additional information which may or may not present about a main /// matching node is desired. /// /// For example, in: /// \code /// class Foo { /// int bar; /// } /// \endcode /// The matcher: /// \code /// cxxRecordDecl( /// optionally(has( /// fieldDecl(hasName("bar")).bind("var") /// ))).bind("record") /// \endcode /// will produce a result binding for both "record" and "var". /// The matcher will produce a "record" binding for even if there is no data /// member named "bar" in that class. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally; /// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL) /// /// Given /// \code /// Foo x = bar; /// int y = sizeof(x) + alignof(x); /// \endcode /// unaryExprOrTypeTraitExpr() /// matches \c sizeof(x) and \c alignof(x) extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryExprOrTypeTraitExpr> unaryExprOrTypeTraitExpr; /// Matches any of the \p NodeMatchers with InnerMatchers nested within /// /// Given /// \code /// if (true); /// for (; true; ); /// \endcode /// with the matcher /// \code /// mapAnyOf(ifStmt, forStmt).with( /// hasCondition(cxxBoolLiteralExpr(equals(true))) /// ).bind("trueCond") /// \endcode /// matches the \c if and the \c for. It is equivalent to: /// \code /// auto trueCond = hasCondition(cxxBoolLiteralExpr(equals(true))); /// anyOf( /// ifStmt(trueCond).bind("trueCond"), /// forStmt(trueCond).bind("trueCond") /// ); /// \endcode /// /// The with() chain-call accepts zero or more matchers which are combined /// as-if with allOf() in each of the node matchers. /// Usable as: Any Matcher template <typename T, typename... U> auto mapAnyOf(internal::VariadicDynCastAllOfMatcher<T, U> const &...) { return internal::MapAnyOfHelper<U...>(); } /// Matches nodes which can be used with binary operators. /// /// The code /// \code /// var1 != var2; /// \endcode /// might be represented in the clang AST as a binaryOperator, a /// cxxOperatorCallExpr or a cxxRewrittenBinaryOperator, depending on /// /// * whether the types of var1 and var2 are fundamental (binaryOperator) or at /// least one is a class type (cxxOperatorCallExpr) /// * whether the code appears in a template declaration, if at least one of the /// vars is a dependent-type (binaryOperator) /// * whether the code relies on a rewritten binary operator, such as a /// spaceship operator or an inverted equality operator /// (cxxRewrittenBinaryOperator) /// /// This matcher elides details in places where the matchers for the nodes are /// compatible. /// /// Given /// \code /// binaryOperation( /// hasOperatorName("!="), /// hasLHS(expr().bind("lhs")), /// hasRHS(expr().bind("rhs")) /// ) /// \endcode /// matches each use of "!=" in: /// \code /// struct S{ /// bool operator!=(const S&) const; /// }; /// /// void foo() /// { /// 1 != 2; /// S() != S(); /// } /// /// template<typename T> /// void templ() /// { /// 1 != 2; /// T() != S(); /// } /// struct HasOpEq /// { /// bool operator==(const HasOpEq &) const; /// }; /// /// void inverse() /// { /// HasOpEq s1; /// HasOpEq s2; /// if (s1 != s2) /// return; /// } /// /// struct HasSpaceship /// { /// bool operator<=>(const HasOpEq &) const; /// }; /// /// void use_spaceship() /// { /// HasSpaceship s1; /// HasSpaceship s2; /// if (s1 != s2) /// return; /// } /// \endcode extern const internal::MapAnyOfMatcher<BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator> binaryOperation; /// Matches unary expressions that have a specific type of argument. /// /// Given /// \code /// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c); /// \endcode /// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int")) /// matches \c sizeof(a) and \c alignof(c) AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType, internal::Matcher<QualType>, InnerMatcher) { const QualType ArgumentType = Node.getTypeOfArgument(); return InnerMatcher.matches(ArgumentType, Finder, Builder); } /// Matches unary expressions of a certain kind. /// /// Given /// \code /// int x; /// int s = sizeof(x) + alignof(x) /// \endcode /// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf)) /// matches \c sizeof(x) /// /// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter /// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf"). AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) { return Node.getKind() == Kind; } /// Same as unaryExprOrTypeTraitExpr, but only matching /// alignof. inline internal::BindableMatcher<Stmt> alignOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)), InnerMatcher))); } /// Same as unaryExprOrTypeTraitExpr, but only matching /// sizeof. inline internal::BindableMatcher<Stmt> sizeOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(ofKind(UETT_SizeOf), InnerMatcher))); } /// Matches NamedDecl nodes that have the specified name. /// /// Supports specifying enclosing namespaces or classes by prefixing the name /// with '<enclosing>::'. /// Does not match typedefs of an underlying type with the given name. /// /// Example matches X (Name == "X") /// \code /// class X; /// \endcode /// /// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X") /// \code /// namespace a { namespace b { class X; } } /// \endcode inline internal::Matcher<NamedDecl> hasName(StringRef Name) { return internal::Matcher<NamedDecl>( new internal::HasNameMatcher({std::string(Name)})); } /// Matches NamedDecl nodes that have any of the specified names. /// /// This matcher is only provided as a performance optimization of hasName. /// \code /// hasAnyName(a, b, c) /// \endcode /// is equivalent to, but faster than /// \code /// anyOf(hasName(a), hasName(b), hasName(c)) /// \endcode extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef, internal::hasAnyNameFunc> hasAnyName; /// Matches NamedDecl nodes whose fully qualified names contain /// a substring matched by the given RegExp. /// /// Supports specifying enclosing namespaces or classes by /// prefixing the name with '<enclosing>::'. Does not match typedefs /// of an underlying type with the given name. /// /// Example matches X (regexp == "::X") /// \code /// class X; /// \endcode /// /// Example matches X (regexp is one of "::X", "^foo::.*X", among others) /// \code /// namespace foo { namespace bar { class X; } } /// \endcode AST_MATCHER_REGEX(NamedDecl, matchesName, RegExp) { std::string FullNameString = "::" + Node.getQualifiedNameAsString(); return RegExp->match(FullNameString); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// Given: /// \code /// class A { int operator*(); }; /// const A &operator<<(const A &a, const A &b); /// A a; /// a << a; // <-- This matches /// \endcode /// /// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the /// specified line and /// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*"))) /// matches the declaration of \c A. /// /// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl> inline internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)> hasOverloadedOperatorName(StringRef Name) { return internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>( {std::string(Name)}); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// hasAnyOverloadedOperatorName("+", "-") /// Is equivalent to /// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-")) extern const internal::VariadicFunction< internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>, StringRef, internal::hasAnyOverloadedOperatorNameFunc> hasAnyOverloadedOperatorName; /// Matches template-dependent, but known, member names. /// /// In template declarations, dependent members are not resolved and so can /// not be matched to particular named declarations. /// /// This matcher allows to match on the known name of members. /// /// Given /// \code /// template <typename T> /// struct S { /// void mem(); /// }; /// template <typename T> /// void x() { /// S<T> s; /// s.mem(); /// } /// \endcode /// \c cxxDependentScopeMemberExpr(hasMemberName("mem")) matches `s.mem()` AST_MATCHER_P(CXXDependentScopeMemberExpr, hasMemberName, std::string, N) { return Node.getMember().getAsString() == N; } /// Matches template-dependent, but known, member names against an already-bound /// node /// /// In template declarations, dependent members are not resolved and so can /// not be matched to particular named declarations. /// /// This matcher allows to match on the name of already-bound VarDecl, FieldDecl /// and CXXMethodDecl nodes. /// /// Given /// \code /// template <typename T> /// struct S { /// void mem(); /// }; /// template <typename T> /// void x() { /// S<T> s; /// s.mem(); /// } /// \endcode /// The matcher /// @code /// \c cxxDependentScopeMemberExpr( /// hasObjectExpression(declRefExpr(hasType(templateSpecializationType( /// hasDeclaration(classTemplateDecl(has(cxxRecordDecl(has( /// cxxMethodDecl(hasName("mem")).bind("templMem") /// ))))) /// )))), /// memberHasSameNameAsBoundNode("templMem") /// ) /// @endcode /// first matches and binds the @c mem member of the @c S template, then /// compares its name to the usage in @c s.mem() in the @c x function template AST_MATCHER_P(CXXDependentScopeMemberExpr, memberHasSameNameAsBoundNode, std::string, BindingID) { auto MemberName = Node.getMember().getAsString(); return Builder->removeBindings( [this, MemberName](const BoundNodesMap &Nodes) { const auto &BN = Nodes.getNode(this->BindingID); if (const auto *ND = BN.get<NamedDecl>()) { if (!isa<FieldDecl, CXXMethodDecl, VarDecl>(ND)) return true; return ND->getName() != MemberName; } return true; }); } /// Matches C++ classes that are directly or indirectly derived from a class /// matching \c Base, or Objective-C classes that directly or indirectly /// subclass a class matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, Z, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("NSObject")) /// \code /// @interface NSObject @end /// @interface Bar : NSObject @end /// \endcode /// /// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl> AST_POLYMORPHIC_MATCHER_P( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/false); } /// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ classes that have a direct or indirect base matching \p /// BaseSpecMatcher. /// /// Example: /// matcher hasAnyBase(hasType(cxxRecordDecl(hasName("SpecialBase")))) /// \code /// class Foo; /// class Bar : Foo {}; /// class Baz : Bar {}; /// class SpecialBase; /// class Proxy : SpecialBase {}; // matches Proxy /// class IndirectlyDerived : Proxy {}; //matches IndirectlyDerived /// \endcode /// // FIXME: Refactor this and isDerivedFrom to reuse implementation. AST_MATCHER_P(CXXRecordDecl, hasAnyBase, internal::Matcher<CXXBaseSpecifier>, BaseSpecMatcher) { return internal::matchesAnyBase(Node, BaseSpecMatcher, Finder, Builder); } /// Matches C++ classes that have a direct base matching \p BaseSpecMatcher. /// /// Example: /// matcher hasDirectBase(hasType(cxxRecordDecl(hasName("SpecialBase")))) /// \code /// class Foo; /// class Bar : Foo {}; /// class Baz : Bar {}; /// class SpecialBase; /// class Proxy : SpecialBase {}; // matches Proxy /// class IndirectlyDerived : Proxy {}; // doesn't match /// \endcode AST_MATCHER_P(CXXRecordDecl, hasDirectBase, internal::Matcher<CXXBaseSpecifier>, BaseSpecMatcher) { return Node.hasDefinition() && llvm::any_of(Node.bases(), [&](const CXXBaseSpecifier &Base) { return BaseSpecMatcher.matches(Base, Finder, Builder); }); } /// Similar to \c isDerivedFrom(), but also matches classes that directly /// match \c Base. AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { const auto M = anyOf(Base, isDerivedFrom(Base)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Overloaded method as shortcut for /// \c isSameOrDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isSameOrDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ or Objective-C classes that are directly derived from a class /// matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/true); } /// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDirectlyDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches the first method of a class or struct that satisfies \c /// InnerMatcher. /// /// Given: /// \code /// class A { void func(); }; /// class B { void member(); }; /// \endcode /// /// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of /// \c A but not \c B. AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result(*Builder); auto MatchIt = matchesFirstInPointerRange(InnerMatcher, Node.method_begin(), Node.method_end(), Finder, &Result); if (MatchIt == Node.method_end()) return false; if (Finder->isTraversalIgnoringImplicitNodes() && (*MatchIt)->isImplicit()) return false; *Builder = std::move(Result); return true; } /// Matches the generated class of lambda expressions. /// /// Given: /// \code /// auto x = []{}; /// \endcode /// /// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of /// \c decltype(x) AST_MATCHER(CXXRecordDecl, isLambda) { return Node.isLambda(); } /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y /// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// Usable as: Any Matcher /// Note that has is direct matcher, so it also matches things like implicit /// casts and paren casts. If you are matching with expr then you should /// probably consider using ignoringParenImpCasts like: /// has(ignoringParenImpCasts(expr())). extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Z /// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasDescendantMatcher> hasDescendant; /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Y::X, Z::Y, Z::Y::X /// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; /// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X /// // inside Y. /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// As opposed to 'has', 'forEach' will cause a match for each result that /// matches instead of only on the first one. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher> forEach; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, A, A::X, B, B::C, B::C::X /// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; /// class A { class X {}; }; // Matches A, because A::X is a class of name /// // X inside A. /// class B { class C { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for /// each result that matches instead of only on the first one. /// /// Note: Recursively combined ForEachDescendant can cause many matches: /// cxxRecordDecl(forEachDescendant(cxxRecordDecl( /// forEachDescendant(cxxRecordDecl()) /// ))) /// will match 10 times (plus injected class name matches) on: /// \code /// class A { class B { class C { class D { class E {}; }; }; }; }; /// \endcode /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::ForEachDescendantMatcher> forEachDescendant; /// Matches if the node or any descendant matches. /// /// Generates results for each match. /// /// For example, in: /// \code /// class A { class B {}; class C {}; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(hasName("::A"), /// findAll(cxxRecordDecl(isDefinition()).bind("m"))) /// \endcode /// will generate results for \c A, \c B and \c C. /// /// Usable as: Any Matcher template <typename T> internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) { return eachOf(Matcher, forEachDescendant(Matcher)); } /// Matches AST nodes that have a parent that matches the provided /// matcher. /// /// Given /// \code /// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } } /// \endcode /// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }". /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasParentMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasParent; /// Matches AST nodes that have an ancestor that matches the provided /// matcher. /// /// Given /// \code /// void f() { if (true) { int x = 42; } } /// void g() { for (;;) { int x = 43; } } /// \endcode /// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasAncestorMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasAncestor; /// Matches if the provided matcher does not match. /// /// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X")))) /// \code /// class X {}; /// class Y {}; /// \endcode /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> unless; /// Matches a node if the declaration associated with that node /// matches the given matcher. /// /// The associated declaration is: /// - for type nodes, the declaration of the underlying type /// - for CallExpr, the declaration of the callee /// - for MemberExpr, the declaration of the referenced member /// - for CXXConstructExpr, the declaration of the constructor /// - for CXXNewExpr, the declaration of the operator new /// - for ObjCIvarExpr, the declaration of the ivar /// /// For type nodes, hasDeclaration will generally match the declaration of the /// sugared type. Given /// \code /// class X {}; /// typedef X Y; /// Y y; /// \endcode /// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the /// typedefDecl. A common use case is to match the underlying, desugared type. /// This can be achieved by using the hasUnqualifiedDesugaredType matcher: /// \code /// varDecl(hasType(hasUnqualifiedDesugaredType( /// recordType(hasDeclaration(decl()))))) /// \endcode /// In this matcher, the decl will match the CXXRecordDecl of class X. /// /// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>, /// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>, /// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>, /// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>, /// Matcher<TagType>, Matcher<TemplateSpecializationType>, /// Matcher<TemplateTypeParmType>, Matcher<TypedefType>, /// Matcher<UnresolvedUsingType> inline internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)> hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) { return internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)>(InnerMatcher); } /// Matches a \c NamedDecl whose underlying declaration matches the given /// matcher. /// /// Given /// \code /// namespace N { template<class T> void f(T t); } /// template <class T> void g() { using N::f; f(T()); } /// \endcode /// \c unresolvedLookupExpr(hasAnyDeclaration( /// namedDecl(hasUnderlyingDecl(hasName("::N::f"))))) /// matches the use of \c f in \c g() . AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>, InnerMatcher) { const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl(); return UnderlyingDecl != nullptr && InnerMatcher.matches(*UnderlyingDecl, Finder, Builder); } /// Matches on the implicit object argument of a member call expression, after /// stripping off any parentheses or implicit casts. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y {}; /// void z(Y y, X x) { y.m(); (g()).m(); x.m(); } /// \endcode /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y"))))) /// matches `y.m()` and `(g()).m()`. /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m()`. /// cxxMemberCallExpr(on(callExpr())) /// matches `(g()).m()`. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument() ->IgnoreParenImpCasts(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches on the receiver of an ObjectiveC Message expression. /// /// Example /// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *"))); /// matches the [webView ...] message invocation. /// \code /// NSString *webViewJavaScript = ... /// UIWebView *webView = ... /// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>, InnerMatcher) { const QualType TypeDecl = Node.getReceiverType(); return InnerMatcher.matches(TypeDecl, Finder, Builder); } /// Returns true when the Objective-C method declaration is a class method. /// /// Example /// matcher = objcMethodDecl(isClassMethod()) /// matches /// \code /// @interface I + (void)foo; @end /// \endcode /// but not /// \code /// @interface I - (void)bar; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isClassMethod) { return Node.isClassMethod(); } /// Returns true when the Objective-C method declaration is an instance method. /// /// Example /// matcher = objcMethodDecl(isInstanceMethod()) /// matches /// \code /// @interface I - (void)bar; @end /// \endcode /// but not /// \code /// @interface I + (void)foo; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isInstanceMethod) { return Node.isInstanceMethod(); } /// Returns true when the Objective-C message is sent to a class. /// /// Example /// matcher = objcMessageExpr(isClassMessage()) /// matches /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode /// but not /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isClassMessage) { return Node.isClassMessage(); } /// Returns true when the Objective-C message is sent to an instance. /// /// Example /// matcher = objcMessageExpr(isInstanceMessage()) /// matches /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// but not /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isInstanceMessage) { return Node.isInstanceMessage(); } /// Matches if the Objective-C message is sent to an instance, /// and the inner matcher matches on that instance. /// /// For example the method call in /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// is matched by /// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x")))))) AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>, InnerMatcher) { const Expr *ReceiverNode = Node.getInstanceReceiver(); return (ReceiverNode != nullptr && InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches when BaseName == Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) { Selector Sel = Node.getSelector(); return BaseName.compare(Sel.getAsString()) == 0; } /// Matches when at least one of the supplied string equals to the /// Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:")); /// matches both of the expressions below: /// \code /// [myObj methodA:argA]; /// [myObj methodB:argB]; /// \endcode extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>, StringRef, internal::hasAnySelectorFunc> hasAnySelector; /// Matches ObjC selectors whose name contains /// a substring matched by the given RegExp. /// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_REGEX(ObjCMessageExpr, matchesSelector, RegExp) { std::string SelectorString = Node.getSelector().getAsString(); return RegExp->match(SelectorString); } /// Matches when the selector is the empty selector /// /// Matches only when the selector of the objCMessageExpr is NULL. This may /// represent an error condition in the tree! AST_MATCHER(ObjCMessageExpr, hasNullSelector) { return Node.getSelector().isNull(); } /// Matches when the selector is a Unary Selector /// /// matcher = objCMessageExpr(matchesSelector(hasUnarySelector()); /// matches self.bodyView in the code below, but NOT the outer message /// invocation of "loadHTMLString:baseURL:". /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER(ObjCMessageExpr, hasUnarySelector) { return Node.getSelector().isUnarySelector(); } /// Matches when the selector is a keyword selector /// /// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame /// message expression in /// /// \code /// UIWebView *webView = ...; /// CGRect bodyFrame = webView.frame; /// bodyFrame.size.height = self.bodyContentHeight; /// webView.frame = bodyFrame; /// // ^---- matches here /// \endcode AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) { return Node.getSelector().isKeywordSelector(); } /// Matches when the selector has the specified number of arguments /// /// matcher = objCMessageExpr(numSelectorArgs(0)); /// matches self.bodyView in the code below /// /// matcher = objCMessageExpr(numSelectorArgs(2)); /// matches the invocation of "loadHTMLString:baseURL:" but not that /// of self.bodyView /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) { return Node.getSelector().getNumArgs() == N; } /// Matches if the call expression's callee expression matches. /// /// Given /// \code /// class Y { void x() { this->x(); x(); Y y; y.x(); } }; /// void f() { f(); } /// \endcode /// callExpr(callee(expr())) /// matches this->x(), x(), y.x(), f() /// with callee(...) /// matching this->x, x, y.x, f respectively /// /// Note: Callee cannot take the more general internal::Matcher<Expr> /// because this introduces ambiguous overloads with calls to Callee taking a /// internal::Matcher<Decl>, as the matcher hierarchy is purely /// implemented in terms of implicit casts. AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>, InnerMatcher) { const Expr *ExprNode = Node.getCallee(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the call expression's callee's declaration matches the /// given matcher. /// /// Example matches y.x() (matcher = callExpr(callee( /// cxxMethodDecl(hasName("x"))))) /// \code /// class Y { public: void x(); }; /// void z() { Y y; y.x(); } /// \endcode AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher, 1) { return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder); } /// Matches if the expression's or declaration's type matches a type /// matcher. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and U (matcher = typedefDecl(hasType(asString("int"))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// typedef int U; /// class Y { friend class X; }; /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl, ValueDecl), internal::Matcher<QualType>, InnerMatcher, 0) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return InnerMatcher.matches(QT, Finder, Builder); return false; } /// Overloaded to match the declaration of the expression's or value /// declaration's type. /// /// In case of a value declaration (for example a variable declaration), /// this resolves one layer of indirection. For example, in the value /// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of /// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the /// declaration of x. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// class Y { friend class X; }; /// \endcode /// /// Example matches class Derived /// (matcher = cxxRecordDecl(hasAnyBase(hasType(cxxRecordDecl(hasName("Base")))))) /// \code /// class Base {}; /// class Derived : Base {}; /// \endcode /// /// Usable as: Matcher<Expr>, Matcher<FriendDecl>, Matcher<ValueDecl>, /// Matcher<CXXBaseSpecifier> AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl, CXXBaseSpecifier), internal::Matcher<Decl>, InnerMatcher, 1) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder); return false; } /// Matches if the type location of the declarator decl's type matches /// the inner matcher. /// /// Given /// \code /// int x; /// \endcode /// declaratorDecl(hasTypeLoc(loc(asString("int")))) /// matches int x AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) { if (!Node.getTypeSourceInfo()) // This happens for example for implicit destructors. return false; return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder); } /// Matches if the matched type is represented by the given string. /// /// Given /// \code /// class Y { public: void x(); }; /// void z() { Y* y; y->x(); } /// \endcode /// cxxMemberCallExpr(on(hasType(asString("class Y *")))) /// matches y->x() AST_MATCHER_P(QualType, asString, std::string, Name) { return Name == Node.getAsString(); } /// Matches if the matched type is a pointer type and the pointee type /// matches the specified matcher. /// /// Example matches y->x() /// (matcher = cxxMemberCallExpr(on(hasType(pointsTo /// cxxRecordDecl(hasName("Y"))))))) /// \code /// class Y { public: void x(); }; /// void z() { Y *y; y->x(); } /// \endcode AST_MATCHER_P( QualType, pointsTo, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isAnyPointerType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Overloaded to match the pointee type's declaration. AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>, InnerMatcher, 1) { return pointsTo(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if the matched type matches the unqualified desugared /// type of the matched node. /// /// For example, in: /// \code /// class A {}; /// using B = A; /// \endcode /// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches /// both B and A. AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>, InnerMatcher) { return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder, Builder); } /// Matches if the matched type is a reference type and the referenced /// type matches the specified matcher. /// /// Example matches X &x and const X &y /// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X")))))) /// \code /// class X { /// void a(X b) { /// X &x = b; /// const X &y = b; /// } /// }; /// \endcode AST_MATCHER_P(QualType, references, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isReferenceType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Matches QualTypes whose canonical type matches InnerMatcher. /// /// Given: /// \code /// typedef int &int_ref; /// int a; /// int_ref b = a; /// \endcode /// /// \c varDecl(hasType(qualType(referenceType()))))) will not match the /// declaration of b but \c /// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does. AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>, InnerMatcher) { if (Node.isNull()) return false; return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder); } /// Overloaded to match the referenced type's declaration. AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>, InnerMatcher, 1) { return references(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches on the implicit object argument of a member call expression. Unlike /// `on`, matches the argument directly without stripping away anything. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y { void g(); }; /// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); } /// \endcode /// cxxMemberCallExpr(onImplicitObjectArgument(hasType( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`. /// cxxMemberCallExpr(on(callExpr())) /// does not match `(g()).m()`, because the parens are not ignored. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the type of the expression's implicit object argument either /// matches the InnerMatcher, or is a pointer to a type that matches the /// InnerMatcher. /// /// Given /// \code /// class Y { public: void m(); }; /// class X : public Y { void g(); }; /// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); } /// \endcode /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `p->m()` and `x.m()`. /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("X"))))) /// matches `x.g()`. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<QualType>, InnerMatcher, 0) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Overloaded to match the type's declaration. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<Decl>, InnerMatcher, 1) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Matches a DeclRefExpr that refers to a declaration that matches the /// specified matcher. /// /// Example matches x in if(x) /// (matcher = declRefExpr(to(varDecl(hasName("x"))))) /// \code /// bool x; /// if (x) {} /// \endcode AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>, InnerMatcher) { const Decl *DeclNode = Node.getDecl(); return (DeclNode != nullptr && InnerMatcher.matches(*DeclNode, Finder, Builder)); } /// Matches a \c DeclRefExpr that refers to a declaration through a /// specific using shadow declaration. /// /// Given /// \code /// namespace a { void f() {} } /// using a::f; /// void g() { /// f(); // Matches this .. /// a::f(); // .. but not this. /// } /// \endcode /// declRefExpr(throughUsingDecl(anything())) /// matches \c f() AST_MATCHER_P(DeclRefExpr, throughUsingDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { const NamedDecl *FoundDecl = Node.getFoundDecl(); if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl)) return InnerMatcher.matches(*UsingDecl, Finder, Builder); return false; } /// Matches an \c OverloadExpr if any of the declarations in the set of /// overloads matches the given matcher. /// /// Given /// \code /// template <typename T> void foo(T); /// template <typename T> void bar(T); /// template <typename T> void baz(T t) { /// foo(t); /// bar(t); /// } /// \endcode /// unresolvedLookupExpr(hasAnyDeclaration( /// functionTemplateDecl(hasName("foo")))) /// matches \c foo in \c foo(t); but not \c bar in \c bar(t); AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(), Node.decls_end(), Finder, Builder) != Node.decls_end(); } /// Matches the Decl of a DeclStmt which has a single declaration. /// /// Given /// \code /// int a, b; /// int c; /// \endcode /// declStmt(hasSingleDecl(anything())) /// matches 'int c;' but not 'int a, b;'. AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) { if (Node.isSingleDecl()) { const Decl *FoundDecl = Node.getSingleDecl(); return InnerMatcher.matches(*FoundDecl, Finder, Builder); } return false; } /// Matches a variable declaration that has an initializer expression /// that matches the given matcher. /// /// Example matches x (matcher = varDecl(hasInitializer(callExpr()))) /// \code /// bool y() { return true; } /// bool x = y(); /// \endcode AST_MATCHER_P( VarDecl, hasInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getAnyInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// \brief Matches a static variable with local scope. /// /// Example matches y (matcher = varDecl(isStaticLocal())) /// \code /// void f() { /// int x; /// static int y; /// } /// static int z; /// \endcode AST_MATCHER(VarDecl, isStaticLocal) { return Node.isStaticLocal(); } /// Matches a variable declaration that has function scope and is a /// non-static local variable. /// /// Example matches x (matcher = varDecl(hasLocalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasLocalStorage) { return Node.hasLocalStorage(); } /// Matches a variable declaration that does not have local storage. /// /// Example matches y and z (matcher = varDecl(hasGlobalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasGlobalStorage) { return Node.hasGlobalStorage(); } /// Matches a variable declaration that has automatic storage duration. /// /// Example matches x, but not y, z, or a. /// (matcher = varDecl(hasAutomaticStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasAutomaticStorageDuration) { return Node.getStorageDuration() == SD_Automatic; } /// Matches a variable declaration that has static storage duration. /// It includes the variable declared at namespace scope and those declared /// with "static" and "extern" storage class specifiers. /// /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// static int b; /// extern int c; /// varDecl(hasStaticStorageDuration()) /// matches the function declaration y, a, b and c. /// \endcode AST_MATCHER(VarDecl, hasStaticStorageDuration) { return Node.getStorageDuration() == SD_Static; } /// Matches a variable declaration that has thread storage duration. /// /// Example matches z, but not x, z, or a. /// (matcher = varDecl(hasThreadStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasThreadStorageDuration) { return Node.getStorageDuration() == SD_Thread; } /// Matches a variable declaration that is an exception variable from /// a C++ catch block, or an Objective-C \@catch statement. /// /// Example matches x (matcher = varDecl(isExceptionVariable()) /// \code /// void f(int y) { /// try { /// } catch (int x) { /// } /// } /// \endcode AST_MATCHER(VarDecl, isExceptionVariable) { return Node.isExceptionVariable(); } /// Checks that a call expression or a constructor call expression has /// a specific number of arguments (including absent default arguments). /// /// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2))) /// \code /// void f(int x, int y); /// f(0, 0); /// \endcode AST_POLYMORPHIC_MATCHER_P(argumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), unsigned, N) { unsigned NumArgs = Node.getNumArgs(); if (!Finder->isTraversalIgnoringImplicitNodes()) return NumArgs == N; while (NumArgs) { if (!isa<CXXDefaultArgExpr>(Node.getArg(NumArgs - 1))) break; --NumArgs; } return NumArgs == N; } /// Matches the n'th argument of a call expression or a constructor /// call expression. /// /// Example matches y in x(y) /// (matcher = callExpr(hasArgument(0, declRefExpr()))) /// \code /// void x(int) { int y; x(y); } /// \endcode AST_POLYMORPHIC_MATCHER_P2(hasArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), unsigned, N, internal::Matcher<Expr>, InnerMatcher) { if (N >= Node.getNumArgs()) return false; const Expr *Arg = Node.getArg(N); if (Finder->isTraversalIgnoringImplicitNodes() && isa<CXXDefaultArgExpr>(Arg)) return false; return InnerMatcher.matches(*Arg->IgnoreParenImpCasts(), Finder, Builder); } /// Matches the n'th item of an initializer list expression. /// /// Example matches y. /// (matcher = initListExpr(hasInit(0, expr()))) /// \code /// int x{y}. /// \endcode AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { return N < Node.getNumInits() && InnerMatcher.matches(*Node.getInit(N), Finder, Builder); } /// Matches declaration statements that contain a specific number of /// declarations. /// /// Example: Given /// \code /// int a, b; /// int c; /// int d = 2, e; /// \endcode /// declCountIs(2) /// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'. AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) { return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N; } /// Matches the n'th declaration of a declaration statement. /// /// Note that this does not work for global declarations because the AST /// breaks up multiple-declaration DeclStmt's into multiple single-declaration /// DeclStmt's. /// Example: Given non-global declarations /// \code /// int a, b = 0; /// int c; /// int d = 2, e; /// \endcode /// declStmt(containsDeclaration( /// 0, varDecl(hasInitializer(anything())))) /// matches only 'int d = 2, e;', and /// declStmt(containsDeclaration(1, varDecl())) /// \code /// matches 'int a, b = 0' as well as 'int d = 2, e;' /// but 'int c;' is not matched. /// \endcode AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N, internal::Matcher<Decl>, InnerMatcher) { const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end()); if (N >= NumDecls) return false; DeclStmt::const_decl_iterator Iterator = Node.decl_begin(); std::advance(Iterator, N); return InnerMatcher.matches(**Iterator, Finder, Builder); } /// Matches a C++ catch statement that has a catch-all handler. /// /// Given /// \code /// try { /// // ... /// } catch (int) { /// // ... /// } catch (...) { /// // ... /// } /// \endcode /// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int). AST_MATCHER(CXXCatchStmt, isCatchAll) { return Node.getExceptionDecl() == nullptr; } /// Matches a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl( /// hasAnyConstructorInitializer(anything()) /// ))) /// record matches Foo, hasAnyConstructorInitializer matches foo_(1) AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { auto MatchIt = matchesFirstInPointerRange(InnerMatcher, Node.init_begin(), Node.init_end(), Finder, Builder); if (MatchIt == Node.init_end()) return false; return (*MatchIt)->isWritten() || !Finder->isTraversalIgnoringImplicitNodes(); } /// Matches the field declaration of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// forField(hasName("foo_")))))) /// matches Foo /// with forField matching foo_ AST_MATCHER_P(CXXCtorInitializer, forField, internal::Matcher<FieldDecl>, InnerMatcher) { const FieldDecl *NodeAsDecl = Node.getAnyMember(); return (NodeAsDecl != nullptr && InnerMatcher.matches(*NodeAsDecl, Finder, Builder)); } /// Matches the initializer expression of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// withInitializer(integerLiteral(equals(1))))))) /// matches Foo /// with withInitializer matching (1) AST_MATCHER_P(CXXCtorInitializer, withInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr* NodeAsExpr = Node.getInit(); return (NodeAsExpr != nullptr && InnerMatcher.matches(*NodeAsExpr, Finder, Builder)); } /// Matches a constructor initializer if it is explicitly written in /// code (as opposed to implicitly added by the compiler). /// /// Given /// \code /// struct Foo { /// Foo() { } /// Foo(int) : foo_("A") { } /// string foo_; /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten())) /// will match Foo(int), but not Foo() AST_MATCHER(CXXCtorInitializer, isWritten) { return Node.isWritten(); } /// Matches a constructor initializer if it is initializing a base, as /// opposed to a member. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer())) /// will match E(), but not match D(int). AST_MATCHER(CXXCtorInitializer, isBaseInitializer) { return Node.isBaseInitializer(); } /// Matches a constructor initializer if it is initializing a member, as /// opposed to a base. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer())) /// will match D(int), but not match E(). AST_MATCHER(CXXCtorInitializer, isMemberInitializer) { return Node.isMemberInitializer(); } /// Matches any argument of a call expression or a constructor call /// expression, or an ObjC-message-send expression. /// /// Given /// \code /// void x(int, int, int) { int y; x(1, y, 42); } /// \endcode /// callExpr(hasAnyArgument(declRefExpr())) /// matches x(1, y, 42) /// with hasAnyArgument(...) /// matching y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// void foo(I *i) { [i f:12]; } /// \endcode /// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12)))) /// matches [i f:12] AST_POLYMORPHIC_MATCHER_P(hasAnyArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), internal::Matcher<Expr>, InnerMatcher) { for (const Expr *Arg : Node.arguments()) { if (Finder->isTraversalIgnoringImplicitNodes() && isa<CXXDefaultArgExpr>(Arg)) break; BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Arg, Finder, &Result)) { *Builder = std::move(Result); return true; } } return false; } /// Matches any capture of a lambda expression. /// /// Given /// \code /// void foo() { /// int x; /// auto f = [x](){}; /// } /// \endcode /// lambdaExpr(hasAnyCapture(anything())) /// matches [x](){}; AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>, InnerMatcher, 0) { for (const LambdaCapture &Capture : Node.captures()) { if (Capture.capturesVariable()) { BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) { *Builder = std::move(Result); return true; } } } return false; } /// Matches any capture of 'this' in a lambda expression. /// /// Given /// \code /// struct foo { /// void bar() { /// auto f = [this](){}; /// } /// } /// \endcode /// lambdaExpr(hasAnyCapture(cxxThisExpr())) /// matches [this](){}; AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<CXXThisExpr>, InnerMatcher, 1) { return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) { return LC.capturesThis(); }); } /// Matches a constructor call expression which uses list initialization. AST_MATCHER(CXXConstructExpr, isListInitialization) { return Node.isListInitialization(); } /// Matches a constructor call expression which requires /// zero initialization. /// /// Given /// \code /// void foo() { /// struct point { double x; double y; }; /// point pt[2] = { { 1.0, 2.0 } }; /// } /// \endcode /// initListExpr(has(cxxConstructExpr(requiresZeroInitialization())) /// will match the implicit array filler for pt[1]. AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) { return Node.requiresZeroInitialization(); } /// Matches the n'th parameter of a function or an ObjC method /// declaration or a block. /// /// Given /// \code /// class X { void f(int x) {} }; /// \endcode /// cxxMethodDecl(hasParameter(0, hasType(varDecl()))) /// matches f(int x) {} /// with hasParameter(...) /// matching int x /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasParameter(0, hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P2(hasParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), unsigned, N, internal::Matcher<ParmVarDecl>, InnerMatcher) { return (N < Node.parameters().size() && InnerMatcher.matches(*Node.parameters()[N], Finder, Builder)); } /// Matches all arguments and their respective ParmVarDecl. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// \endcode /// callExpr( /// forEachArgumentWithParam( /// declRefExpr(to(varDecl(hasName("y")))), /// parmVarDecl(hasType(isInteger())) /// )) /// matches f(y); /// with declRefExpr(...) /// matching int y /// and parmVarDecl(...) /// matching int i AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<ParmVarDecl>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, ParamMatcher)))), callExpr(callee(functionDecl( hasParameter(ParamIndex, ParamMatcher)))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; } } ++ParamIndex; } *Builder = std::move(Result); return Matched; } /// Matches all arguments and their respective types for a \c CallExpr or /// \c CXXConstructExpr. It is very similar to \c forEachArgumentWithParam but /// it works on calls through function pointers as well. /// /// The difference is, that function pointers do not provide access to a /// \c ParmVarDecl, but only the \c QualType for each argument. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// void (*f_ptr)(int) = f; /// f_ptr(y); /// \endcode /// callExpr( /// forEachArgumentWithParamType( /// declRefExpr(to(varDecl(hasName("y")))), /// qualType(isInteger()).bind("type) /// )) /// matches f(y) and f_ptr(y) /// with declRefExpr(...) /// matching int y /// and qualType(...) /// matching int AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParamType, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<QualType>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; const FunctionProtoType *FProto = nullptr; if (const auto *Call = dyn_cast<CallExpr>(&Node)) { if (const auto *Value = dyn_cast_or_null<ValueDecl>(Call->getCalleeDecl())) { QualType QT = Value->getType().getCanonicalType(); // This does not necessarily lead to a `FunctionProtoType`, // e.g. K&R functions do not have a function prototype. if (QT->isFunctionPointerType()) FProto = QT->getPointeeType()->getAs<FunctionProtoType>(); if (QT->isMemberFunctionPointerType()) { const auto *MP = QT->getAs<MemberPointerType>(); assert(MP && "Must be member-pointer if its a memberfunctionpointer"); FProto = MP->getPointeeType()->getAs<FunctionProtoType>(); assert(FProto && "The call must have happened through a member function " "pointer"); } } } int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex, ++ParamIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); // This test is cheaper compared to the big matcher in the next if. // Therefore, please keep this order. if (FProto) { QualType ParamType = FProto->getParamType(ParamIndex); if (ParamMatcher.matches(ParamType, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; continue; } } if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, hasType(ParamMatcher))))), callExpr(callee(functionDecl( hasParameter(ParamIndex, hasType(ParamMatcher))))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; continue; } } } *Builder = std::move(Result); return Matched; } /// Matches the ParmVarDecl nodes that are at the N'th position in the parameter /// list. The parameter list could be that of either a block, function, or /// objc-method. /// /// /// Given /// /// \code /// void f(int a, int b, int c) { /// } /// \endcode /// /// ``parmVarDecl(isAtPosition(0))`` matches ``int a``. /// /// ``parmVarDecl(isAtPosition(1))`` matches ``int b``. AST_MATCHER_P(ParmVarDecl, isAtPosition, unsigned, N) { const clang::DeclContext *Context = Node.getParentFunctionOrMethod(); if (const auto *Decl = dyn_cast_or_null<FunctionDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; if (const auto *Decl = dyn_cast_or_null<BlockDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; if (const auto *Decl = dyn_cast_or_null<ObjCMethodDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; return false; } /// Matches any parameter of a function or an ObjC method declaration or a /// block. /// /// Does not match the 'this' parameter of a method. /// /// Given /// \code /// class X { void f(int x, int y, int z) {} }; /// \endcode /// cxxMethodDecl(hasAnyParameter(hasName("y"))) /// matches f(int x, int y, int z) {} /// with hasAnyParameter(...) /// matching int y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. /// /// For blocks, given /// \code /// b = ^(int y) { printf("%d", y) }; /// \endcode /// /// the matcher blockDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of the block b with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P(hasAnyParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), internal::Matcher<ParmVarDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(), Node.param_end(), Finder, Builder) != Node.param_end(); } /// Matches \c FunctionDecls and \c FunctionProtoTypes that have a /// specific parameter count. /// /// Given /// \code /// void f(int i) {} /// void g(int i, int j) {} /// void h(int i, int j); /// void j(int i); /// void k(int x, int y, int z, ...); /// \endcode /// functionDecl(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(3)) /// matches \c k AST_POLYMORPHIC_MATCHER_P(parameterCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType), unsigned, N) { return Node.getNumParams() == N; } /// Matches \c FunctionDecls that have a noreturn attribute. /// /// Given /// \code /// void nope(); /// [[noreturn]] void a(); /// __attribute__((noreturn)) void b(); /// struct c { [[noreturn]] c(); }; /// \endcode /// functionDecl(isNoReturn()) /// matches all of those except /// \code /// void nope(); /// \endcode AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); } /// Matches the return type of a function declaration. /// /// Given: /// \code /// class X { int f() { return 1; } }; /// \endcode /// cxxMethodDecl(returns(asString("int"))) /// matches int f() { return 1; } AST_MATCHER_P(FunctionDecl, returns, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getReturnType(), Finder, Builder); } /// Matches extern "C" function or variable declarations. /// /// Given: /// \code /// extern "C" void f() {} /// extern "C" { void g() {} } /// void h() {} /// extern "C" int x = 1; /// extern "C" int y = 2; /// int z = 3; /// \endcode /// functionDecl(isExternC()) /// matches the declaration of f and g, but not the declaration of h. /// varDecl(isExternC()) /// matches the declaration of x and y, but not the declaration of z. AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.isExternC(); } /// Matches variable/function declarations that have "static" storage /// class specifier ("static" keyword) written in the source. /// /// Given: /// \code /// static void f() {} /// static int i = 0; /// extern int j; /// int k; /// \endcode /// functionDecl(isStaticStorageClass()) /// matches the function declaration f. /// varDecl(isStaticStorageClass()) /// matches the variable declaration i. AST_POLYMORPHIC_MATCHER(isStaticStorageClass, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.getStorageClass() == SC_Static; } /// Matches deleted function declarations. /// /// Given: /// \code /// void Func(); /// void DeletedFunc() = delete; /// \endcode /// functionDecl(isDeleted()) /// matches the declaration of DeletedFunc, but not Func. AST_MATCHER(FunctionDecl, isDeleted) { return Node.isDeleted(); } /// Matches defaulted function declarations. /// /// Given: /// \code /// class A { ~A(); }; /// class B { ~B() = default; }; /// \endcode /// functionDecl(isDefaulted()) /// matches the declaration of ~B, but not ~A. AST_MATCHER(FunctionDecl, isDefaulted) { return Node.isDefaulted(); } /// Matches weak function declarations. /// /// Given: /// \code /// void foo() __attribute__((__weakref__("__foo"))); /// void bar(); /// \endcode /// functionDecl(isWeak()) /// matches the weak declaration "foo", but not "bar". AST_MATCHER(FunctionDecl, isWeak) { return Node.isWeak(); } /// Matches functions that have a dynamic exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() noexcept(true); /// void i() noexcept(false); /// void j() throw(); /// void k() throw(int); /// void l() throw(...); /// \endcode /// functionDecl(hasDynamicExceptionSpec()) and /// functionProtoType(hasDynamicExceptionSpec()) /// match the declarations of j, k, and l, but not f, g, h, or i. AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node)) return FnTy->hasDynamicExceptionSpec(); return false; } /// Matches functions that have a non-throwing exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() throw(); /// void i() throw(int); /// void j() noexcept(false); /// \endcode /// functionDecl(isNoThrow()) and functionProtoType(isNoThrow()) /// match the declarations of g, and h, but not f, i or j. AST_POLYMORPHIC_MATCHER(isNoThrow, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node); // If the function does not have a prototype, then it is assumed to be a // throwing function (as it would if the function did not have any exception // specification). if (!FnTy) return false; // Assume the best for any unresolved exception specification. if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType())) return true; return FnTy->isNothrow(); } /// Matches constexpr variable and function declarations, /// and if constexpr. /// /// Given: /// \code /// constexpr int foo = 42; /// constexpr int bar(); /// void baz() { if constexpr(1 > 0) {} } /// \endcode /// varDecl(isConstexpr()) /// matches the declaration of foo. /// functionDecl(isConstexpr()) /// matches the declaration of bar. /// ifStmt(isConstexpr()) /// matches the if statement in baz. AST_POLYMORPHIC_MATCHER(isConstexpr, AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl, FunctionDecl, IfStmt)) { return Node.isConstexpr(); } /// Matches selection statements with initializer. /// /// Given: /// \code /// void foo() { /// if (int i = foobar(); i > 0) {} /// switch (int i = foobar(); i) {} /// for (auto& a = get_range(); auto& x : a) {} /// } /// void bar() { /// if (foobar() > 0) {} /// switch (foobar()) {} /// for (auto& x : get_range()) {} /// } /// \endcode /// ifStmt(hasInitStatement(anything())) /// matches the if statement in foo but not in bar. /// switchStmt(hasInitStatement(anything())) /// matches the switch statement in foo but not in bar. /// cxxForRangeStmt(hasInitStatement(anything())) /// matches the range for statement in foo but not in bar. AST_POLYMORPHIC_MATCHER_P(hasInitStatement, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt, CXXForRangeStmt), internal::Matcher<Stmt>, InnerMatcher) { const Stmt *Init = Node.getInit(); return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder); } /// Matches the condition expression of an if statement, for loop, /// switch statement or conditional operator. /// /// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true)))) /// \code /// if (true) {} /// \endcode AST_POLYMORPHIC_MATCHER_P( hasCondition, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt, SwitchStmt, AbstractConditionalOperator), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Condition = Node.getCond(); return (Condition != nullptr && InnerMatcher.matches(*Condition, Finder, Builder)); } /// Matches the then-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true))))) /// \code /// if (false) true; else false; /// \endcode AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Then = Node.getThen(); return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder)); } /// Matches the else-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true))))) /// \code /// if (false) false; else true; /// \endcode AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Else = Node.getElse(); return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder)); } /// Matches if a node equals a previously bound node. /// /// Matches a node if it equals the node previously bound to \p ID. /// /// Given /// \code /// class X { int a; int b; }; /// \endcode /// cxxRecordDecl( /// has(fieldDecl(hasName("a"), hasType(type().bind("t")))), /// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t")))))) /// matches the class \c X, as \c a and \c b have the same type. /// /// Note that when multiple matches are involved via \c forEach* matchers, /// \c equalsBoundNodes acts as a filter. /// For example: /// compoundStmt( /// forEachDescendant(varDecl().bind("d")), /// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d")))))) /// will trigger a match for each combination of variable declaration /// and reference to that variable declaration within a compound statement. AST_POLYMORPHIC_MATCHER_P(equalsBoundNode, AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type, QualType), std::string, ID) { // FIXME: Figure out whether it makes sense to allow this // on any other node types. // For *Loc it probably does not make sense, as those seem // unique. For NestedNameSepcifier it might make sense, as // those also have pointer identity, but I'm not sure whether // they're ever reused. internal::NotEqualsBoundNodePredicate Predicate; Predicate.ID = ID; Predicate.Node = DynTypedNode::create(Node); return Builder->removeBindings(Predicate); } /// Matches the condition variable statement in an if statement. /// /// Given /// \code /// if (A* a = GetAPointer()) {} /// \endcode /// hasConditionVariableStatement(...) /// matches 'A* a = GetAPointer()'. AST_MATCHER_P(IfStmt, hasConditionVariableStatement, internal::Matcher<DeclStmt>, InnerMatcher) { const DeclStmt* const DeclarationStatement = Node.getConditionVariableDeclStmt(); return DeclarationStatement != nullptr && InnerMatcher.matches(*DeclarationStatement, Finder, Builder); } /// Matches the index expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasIndex(integerLiteral())) /// matches \c i[1] with the \c integerLiteral() matching \c 1 AST_MATCHER_P(ArraySubscriptExpr, hasIndex, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getIdx()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches the base expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasBase(implicitCastExpr( /// hasSourceExpression(declRefExpr())))) /// matches \c i[1] with the \c declRefExpr() matching \c i AST_MATCHER_P(ArraySubscriptExpr, hasBase, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getBase()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches a 'for', 'while', 'do while' statement or a function /// definition that has a given body. Note that in case of functions /// this matcher only matches the definition itself and not the other /// declarations of the same function. /// /// Given /// \code /// for (;;) {} /// \endcode /// hasBody(compoundStmt()) /// matches 'for (;;) {}' /// with compoundStmt() /// matching '{}' /// /// Given /// \code /// void f(); /// void f() {} /// \endcode /// hasBody(functionDecl()) /// matches 'void f() {}' /// with compoundStmt() /// matching '{}' /// but does not match 'void f();' AST_POLYMORPHIC_MATCHER_P(hasBody, AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt, WhileStmt, CXXForRangeStmt, FunctionDecl), internal::Matcher<Stmt>, InnerMatcher) { if (Finder->isTraversalIgnoringImplicitNodes() && isDefaultedHelper(&Node)) return false; const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches a function declaration that has a given body present in the AST. /// Note that this matcher matches all the declarations of a function whose /// body is present in the AST. /// /// Given /// \code /// void f(); /// void f() {} /// void g(); /// \endcode /// hasAnyBody(functionDecl()) /// matches both 'void f();' /// and 'void f() {}' /// with compoundStmt() /// matching '{}' /// but does not match 'void g();' AST_MATCHER_P(FunctionDecl, hasAnyBody, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Statement = Node.getBody(); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches compound statements where at least one substatement matches /// a given matcher. Also matches StmtExprs that have CompoundStmt as children. /// /// Given /// \code /// { {}; 1+2; } /// \endcode /// hasAnySubstatement(compoundStmt()) /// matches '{ {}; 1+2; }' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement, AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt, StmtExpr), internal::Matcher<Stmt>, InnerMatcher) { const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node); return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(), CS->body_end(), Finder, Builder) != CS->body_end(); } /// Checks that a compound statement contains a specific number of /// child statements. /// /// Example: Given /// \code /// { for (;;) {} } /// \endcode /// compoundStmt(statementCountIs(0))) /// matches '{}' /// but does not match the outer compound statement. AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) { return Node.size() == N; } /// Matches literals that are equal to the given value of type ValueT. /// /// Given /// \code /// f('\0', false, 3.14, 42); /// \endcode /// characterLiteral(equals(0)) /// matches '\0' /// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0)) /// match false /// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2)) /// match 3.14 /// integerLiteral(equals(42)) /// matches 42 /// /// Note that you cannot directly match a negative numeric literal because the /// minus sign is not part of the literal: It is a unary operator whose operand /// is the positive numeric literal. Instead, you must use a unaryOperator() /// matcher to match the minus sign: /// /// unaryOperator(hasOperatorName("-"), /// hasUnaryOperand(integerLiteral(equals(13)))) /// /// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>, /// Matcher<FloatingLiteral>, Matcher<IntegerLiteral> template <typename ValueT> internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT> equals(const ValueT &Value) { return internal::PolymorphicMatcherWithParam1< internal::ValueEqualsMatcher, ValueT>(Value); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), bool, Value, 0) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), unsigned, Value, 1) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, FloatingLiteral, IntegerLiteral), double, Value, 2) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } /// Matches the operator Name of operator expressions (binary or /// unary). /// /// Example matches a || b (matcher = binaryOperator(hasOperatorName("||"))) /// \code /// !(a || b) /// \endcode AST_POLYMORPHIC_MATCHER_P( hasOperatorName, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, UnaryOperator), std::string, Name) { if (Optional<StringRef> OpName = internal::getOpName(Node)) return *OpName == Name; return false; } /// Matches operator expressions (binary or unary) that have any of the /// specified names. /// /// hasAnyOperatorName("+", "-") /// Is equivalent to /// anyOf(hasOperatorName("+"), hasOperatorName("-")) extern const internal::VariadicFunction< internal::PolymorphicMatcherWithParam1< internal::HasAnyOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, UnaryOperator)>, StringRef, internal::hasAnyOperatorNameFunc> hasAnyOperatorName; /// Matches all kinds of assignment operators. /// /// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 = s2 /// (matcher = cxxOperatorCallExpr(isAssignmentOperator())) /// \code /// struct S { S& operator=(const S&); }; /// void x() { S s1, s2; s1 = s2; } /// \endcode AST_POLYMORPHIC_MATCHER( isAssignmentOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator)) { return Node.isAssignmentOp(); } /// Matches comparison operators. /// /// Example 1: matches a == b (matcher = binaryOperator(isComparisonOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 < s2 /// (matcher = cxxOperatorCallExpr(isComparisonOperator())) /// \code /// struct S { bool operator<(const S& other); }; /// void x(S s1, S s2) { bool b1 = s1 < s2; } /// \endcode AST_POLYMORPHIC_MATCHER( isComparisonOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator)) { return Node.isComparisonOp(); } /// Matches the left hand side of binary operator expressions. /// /// Example matches a (matcher = binaryOperator(hasLHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasLHS, AST_POLYMORPHIC_SUPPORTED_TYPES( BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *LeftHandSide = internal::getLHS(Node); return (LeftHandSide != nullptr && InnerMatcher.matches(*LeftHandSide, Finder, Builder)); } /// Matches the right hand side of binary operator expressions. /// /// Example matches b (matcher = binaryOperator(hasRHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasRHS, AST_POLYMORPHIC_SUPPORTED_TYPES( BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *RightHandSide = internal::getRHS(Node); return (RightHandSide != nullptr && InnerMatcher.matches(*RightHandSide, Finder, Builder)); } /// Matches if either the left hand side or the right hand side of a /// binary operator matches. AST_POLYMORPHIC_MATCHER_P( hasEitherOperand, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator), internal::Matcher<Expr>, InnerMatcher) { return internal::VariadicDynCastAllOfMatcher<Stmt, NodeType>()( anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if both matchers match with opposite sides of the binary operator. /// /// Example matcher = binaryOperator(hasOperands(integerLiteral(equals(1), /// integerLiteral(equals(2))) /// \code /// 1 + 2 // Match /// 2 + 1 // Match /// 1 + 1 // No match /// 2 + 2 // No match /// \endcode AST_POLYMORPHIC_MATCHER_P2( hasOperands, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator), internal::Matcher<Expr>, Matcher1, internal::Matcher<Expr>, Matcher2) { return internal::VariadicDynCastAllOfMatcher<Stmt, NodeType>()( anyOf(allOf(hasLHS(Matcher1), hasRHS(Matcher2)), allOf(hasLHS(Matcher2), hasRHS(Matcher1)))) .matches(Node, Finder, Builder); } /// Matches if the operand of a unary operator matches. /// /// Example matches true (matcher = hasUnaryOperand( /// cxxBoolLiteral(equals(true)))) /// \code /// !true /// \endcode AST_POLYMORPHIC_MATCHER_P(hasUnaryOperand, AST_POLYMORPHIC_SUPPORTED_TYPES(UnaryOperator, CXXOperatorCallExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Operand = internal::getSubExpr(Node); return (Operand != nullptr && InnerMatcher.matches(*Operand, Finder, Builder)); } /// Matches if the cast's source expression /// or opaque value's source expression matches the given matcher. /// /// Example 1: matches "a string" /// (matcher = castExpr(hasSourceExpression(cxxConstructExpr()))) /// \code /// class URL { URL(string); }; /// URL url = "a string"; /// \endcode /// /// Example 2: matches 'b' (matcher = /// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr()))) /// \code /// int a = b ?: 1; /// \endcode AST_POLYMORPHIC_MATCHER_P(hasSourceExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr, OpaqueValueExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const SubExpression = internal::GetSourceExpressionMatcher<NodeType>::get(Node); return (SubExpression != nullptr && InnerMatcher.matches(*SubExpression, Finder, Builder)); } /// Matches casts that has a given cast kind. /// /// Example: matches the implicit cast around \c 0 /// (matcher = castExpr(hasCastKind(CK_NullToPointer))) /// \code /// int *p = 0; /// \endcode /// /// If the matcher is use from clang-query, CastKind parameter /// should be passed as a quoted string. e.g., hasCastKind("CK_NullToPointer"). AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) { return Node.getCastKind() == Kind; } /// Matches casts whose destination type matches a given matcher. /// /// (Note: Clang's AST refers to other conversions as "casts" too, and calls /// actual casts "explicit" casts.) AST_MATCHER_P(ExplicitCastExpr, hasDestinationType, internal::Matcher<QualType>, InnerMatcher) { const QualType NodeType = Node.getTypeAsWritten(); return InnerMatcher.matches(NodeType, Finder, Builder); } /// Matches implicit casts whose destination type matches a given /// matcher. /// /// FIXME: Unit test this matcher AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getType(), Finder, Builder); } /// Matches TagDecl object that are spelled with "struct." /// /// Example matches S, but not C, U or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isStruct) { return Node.isStruct(); } /// Matches TagDecl object that are spelled with "union." /// /// Example matches U, but not C, S or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isUnion) { return Node.isUnion(); } /// Matches TagDecl object that are spelled with "class." /// /// Example matches C, but not S, U or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isClass) { return Node.isClass(); } /// Matches TagDecl object that are spelled with "enum." /// /// Example matches E, but not C, S or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isEnum) { return Node.isEnum(); } /// Matches the true branch expression of a conditional operator. /// /// Example 1 (conditional ternary operator): matches a /// \code /// condition ? a : b /// \endcode /// /// Example 2 (conditional binary operator): matches opaqueValueExpr(condition) /// \code /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getTrueExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches the false branch expression of a conditional operator /// (binary or ternary). /// /// Example matches b /// \code /// condition ? a : b /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getFalseExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches if a declaration has a body attached. /// /// Example matches A, va, fa /// \code /// class A {}; /// class B; // Doesn't match, as it has no body. /// int va; /// extern int vb; // Doesn't match, as it doesn't define the variable. /// void fa() {} /// void fb(); // Doesn't match, as it has no body. /// @interface X /// - (void)ma; // Doesn't match, interface is declaration. /// @end /// @implementation X /// - (void)ma {} /// @end /// \endcode /// /// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>, /// Matcher<ObjCMethodDecl> AST_POLYMORPHIC_MATCHER(isDefinition, AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl, ObjCMethodDecl, FunctionDecl)) { return Node.isThisDeclarationADefinition(); } /// Matches if a function declaration is variadic. /// /// Example matches f, but not g or h. The function i will not match, even when /// compiled in C mode. /// \code /// void f(...); /// void g(int); /// template <typename... Ts> void h(Ts...); /// void i(); /// \endcode AST_MATCHER(FunctionDecl, isVariadic) { return Node.isVariadic(); } /// Matches the class declaration that the given method declaration /// belongs to. /// /// FIXME: Generalize this for other kinds of declarations. /// FIXME: What other kind of declarations would we need to generalize /// this to? /// /// Example matches A() in the last line /// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl( /// ofClass(hasName("A")))))) /// \code /// class A { /// public: /// A(); /// }; /// A a = A(); /// \endcode AST_MATCHER_P(CXXMethodDecl, ofClass, internal::Matcher<CXXRecordDecl>, InnerMatcher) { ASTChildrenNotSpelledInSourceScope RAII(Finder, false); const CXXRecordDecl *Parent = Node.getParent(); return (Parent != nullptr && InnerMatcher.matches(*Parent, Finder, Builder)); } /// Matches each method overridden by the given method. This matcher may /// produce multiple matches. /// /// Given /// \code /// class A { virtual void f(); }; /// class B : public A { void f(); }; /// class C : public B { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note /// that B::f is not overridden by C::f). /// /// The check can produce multiple matches in case of multiple inheritance, e.g. /// \code /// class A1 { virtual void f(); }; /// class A2 { virtual void f(); }; /// class C : public A1, public A2 { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and /// once with "b" binding "A2::f" and "d" binding "C::f". AST_MATCHER_P(CXXMethodDecl, forEachOverridden, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *Overridden : Node.overridden_methods()) { BoundNodesTreeBuilder OverriddenBuilder(*Builder); const bool OverriddenMatched = InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder); if (OverriddenMatched) { Matched = true; Result.addMatch(OverriddenBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches declarations of virtual methods and C++ base specifers that specify /// virtual inheritance. /// /// Example: /// \code /// class A { /// public: /// virtual void x(); // matches x /// }; /// \endcode /// /// Example: /// \code /// class Base {}; /// class DirectlyDerived : virtual Base {}; // matches Base /// class IndirectlyDerived : DirectlyDerived, Base {}; // matches Base /// \endcode /// /// Usable as: Matcher<CXXMethodDecl>, Matcher<CXXBaseSpecifier> AST_POLYMORPHIC_MATCHER(isVirtual, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXMethodDecl, CXXBaseSpecifier)) { return Node.isVirtual(); } /// Matches if the given method declaration has an explicit "virtual". /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// void x(); /// }; /// \endcode /// matches A::x but not B::x AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) { return Node.isVirtualAsWritten(); } /// Matches if the given method or class declaration is final. /// /// Given: /// \code /// class A final {}; /// /// struct B { /// virtual void f(); /// }; /// /// struct C : B { /// void f() final; /// }; /// \endcode /// matches A and C::f, but not B, C, or B::f AST_POLYMORPHIC_MATCHER(isFinal, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, CXXMethodDecl)) { return Node.template hasAttr<FinalAttr>(); } /// Matches if the given method declaration is pure. /// /// Given /// \code /// class A { /// public: /// virtual void x() = 0; /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isPure) { return Node.isPure(); } /// Matches if the given method declaration is const. /// /// Given /// \code /// struct A { /// void foo() const; /// void bar(); /// }; /// \endcode /// /// cxxMethodDecl(isConst()) matches A::foo() but not A::bar() AST_MATCHER(CXXMethodDecl, isConst) { return Node.isConst(); } /// Matches if the given method declaration declares a copy assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not /// the second one. AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) { return Node.isCopyAssignmentOperator(); } /// Matches if the given method declaration declares a move assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not /// the first one. AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) { return Node.isMoveAssignmentOperator(); } /// Matches if the given method declaration overrides another method. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// virtual void x(); /// }; /// \endcode /// matches B::x AST_MATCHER(CXXMethodDecl, isOverride) { return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>(); } /// Matches method declarations that are user-provided. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &) = default; // #2 /// S(S &&) = delete; // #3 /// }; /// \endcode /// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3. AST_MATCHER(CXXMethodDecl, isUserProvided) { return Node.isUserProvided(); } /// Matches member expressions that are called with '->' as opposed /// to '.'. /// /// Member calls on the implicit this pointer match as called with '->'. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// template <class T> void f() { this->f<T>(); f<T>(); } /// int a; /// static int b; /// }; /// template <class T> /// class Z { /// void x() { this->m; } /// }; /// \endcode /// memberExpr(isArrow()) /// matches this->x, x, y.x, a, this->b /// cxxDependentScopeMemberExpr(isArrow()) /// matches this->m /// unresolvedMemberExpr(isArrow()) /// matches this->f<T>, f<T> AST_POLYMORPHIC_MATCHER( isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr)) { return Node.isArrow(); } /// Matches QualType nodes that are of integer type. /// /// Given /// \code /// void a(int); /// void b(long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isInteger()))) /// matches "a(int)", "b(long)", but not "c(double)". AST_MATCHER(QualType, isInteger) { return Node->isIntegerType(); } /// Matches QualType nodes that are of unsigned integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isUnsignedInteger()))) /// matches "b(unsigned long)", but not "a(int)" and "c(double)". AST_MATCHER(QualType, isUnsignedInteger) { return Node->isUnsignedIntegerType(); } /// Matches QualType nodes that are of signed integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isSignedInteger()))) /// matches "a(int)", but not "b(unsigned long)" and "c(double)". AST_MATCHER(QualType, isSignedInteger) { return Node->isSignedIntegerType(); } /// Matches QualType nodes that are of character type. /// /// Given /// \code /// void a(char); /// void b(wchar_t); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isAnyCharacter()))) /// matches "a(char)", "b(wchar_t)", but not "c(double)". AST_MATCHER(QualType, isAnyCharacter) { return Node->isAnyCharacterType(); } /// Matches QualType nodes that are of any pointer type; this includes /// the Objective-C object pointer type, which is different despite being /// syntactically similar. /// /// Given /// \code /// int *i = nullptr; /// /// @interface Foo /// @end /// Foo *f; /// /// int j; /// \endcode /// varDecl(hasType(isAnyPointer())) /// matches "int *i" and "Foo *f", but not "int j". AST_MATCHER(QualType, isAnyPointer) { return Node->isAnyPointerType(); } /// Matches QualType nodes that are const-qualified, i.e., that /// include "top-level" const. /// /// Given /// \code /// void a(int); /// void b(int const); /// void c(const int); /// void d(const int*); /// void e(int const) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isConstQualified()))) /// matches "void b(int const)", "void c(const int)" and /// "void e(int const) {}". It does not match d as there /// is no top-level const on the parameter type "const int *". AST_MATCHER(QualType, isConstQualified) { return Node.isConstQualified(); } /// Matches QualType nodes that are volatile-qualified, i.e., that /// include "top-level" volatile. /// /// Given /// \code /// void a(int); /// void b(int volatile); /// void c(volatile int); /// void d(volatile int*); /// void e(int volatile) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isVolatileQualified()))) /// matches "void b(int volatile)", "void c(volatile int)" and /// "void e(int volatile) {}". It does not match d as there /// is no top-level volatile on the parameter type "volatile int *". AST_MATCHER(QualType, isVolatileQualified) { return Node.isVolatileQualified(); } /// Matches QualType nodes that have local CV-qualifiers attached to /// the node, not hidden within a typedef. /// /// Given /// \code /// typedef const int const_int; /// const_int i; /// int *const j; /// int *volatile k; /// int m; /// \endcode /// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k. /// \c i is const-qualified but the qualifier is not local. AST_MATCHER(QualType, hasLocalQualifiers) { return Node.hasLocalQualifiers(); } /// Matches a member expression where the member is matched by a /// given matcher. /// /// Given /// \code /// struct { int first, second; } first, second; /// int i(second.first); /// int j(first.second); /// \endcode /// memberExpr(member(hasName("first"))) /// matches second.first /// but not first.second (because the member name there is "second"). AST_MATCHER_P(MemberExpr, member, internal::Matcher<ValueDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder); } /// Matches a member expression where the object expression is matched by a /// given matcher. Implicit object expressions are included; that is, it matches /// use of implicit `this`. /// /// Given /// \code /// struct X { /// int m; /// int f(X x) { x.m; return m; } /// }; /// \endcode /// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m`, but not `m`; however, /// memberExpr(hasObjectExpression(hasType(pointsTo( // cxxRecordDecl(hasName("X")))))) /// matches `m` (aka. `this->m`), but not `x.m`. AST_POLYMORPHIC_MATCHER_P( hasObjectExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr), internal::Matcher<Expr>, InnerMatcher) { if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; return InnerMatcher.matches(*Node.getBase(), Finder, Builder); } /// Matches any using shadow declaration. /// /// Given /// \code /// namespace X { void b(); } /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasName("b")))) /// matches \code using X::b \endcode AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(), Node.shadow_end(), Finder, Builder) != Node.shadow_end(); } /// Matches a using shadow declaration where the target declaration is /// matched by the given matcher. /// /// Given /// \code /// namespace X { int a; void b(); } /// using X::a; /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl()))) /// matches \code using X::b \endcode /// but not \code using X::a \endcode AST_MATCHER_P(UsingShadowDecl, hasTargetDecl, internal::Matcher<NamedDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder); } /// Matches template instantiations of function, class, or static /// member variable template instantiations. /// /// Given /// \code /// template <typename T> class X {}; class A {}; X<A> x; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; template class X<A>; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; extern template class X<A>; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// matches the template instantiation of X<A>. /// /// But given /// \code /// template <typename T> class X {}; class A {}; /// template <> class X<A> {}; X<A> x; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// does not match, as X<A> is an explicit template specialization. /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isTemplateInstantiation, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDeclaration); } /// Matches declarations that are template instantiations or are inside /// template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { T i; } /// A(0); /// A(0U); /// \endcode /// functionDecl(isInstantiated()) /// matches 'A(int) {...};' and 'A(unsigned) {...}'. AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) { auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))); return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation))); } /// Matches statements inside of a template instantiation. /// /// Given /// \code /// int j; /// template<typename T> void A(T t) { T i; j += 42;} /// A(0); /// A(0U); /// \endcode /// declStmt(isInTemplateInstantiation()) /// matches 'int i;' and 'unsigned i'. /// unless(stmt(isInTemplateInstantiation())) /// will NOT match j += 42; as it's shared between the template definition and /// instantiation. AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) { return stmt( hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))))); } /// Matches explicit template specializations of function, class, or /// static member variable template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { } /// template<> void A(int N) { } /// \endcode /// functionDecl(isExplicitTemplateSpecialization()) /// matches the specialization A<int>(). /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization); } /// Matches \c TypeLocs for which the given inner /// QualType-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc, internal::Matcher<QualType>, InnerMatcher, 0) { return internal::BindableMatcher<TypeLoc>( new internal::TypeLocTypeMatcher(InnerMatcher)); } /// Matches type \c bool. /// /// Given /// \code /// struct S { bool func(); }; /// \endcode /// functionDecl(returns(booleanType())) /// matches "bool func();" AST_MATCHER(Type, booleanType) { return Node.isBooleanType(); } /// Matches type \c void. /// /// Given /// \code /// struct S { void func(); }; /// \endcode /// functionDecl(returns(voidType())) /// matches "void func();" AST_MATCHER(Type, voidType) { return Node.isVoidType(); } template <typename NodeType> using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>; /// Matches builtin Types. /// /// Given /// \code /// struct A {}; /// A a; /// int b; /// float c; /// bool d; /// \endcode /// builtinType() /// matches "int b", "float c" and "bool d" extern const AstTypeMatcher<BuiltinType> builtinType; /// Matches all kinds of arrays. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[4]; /// void f() { int c[a[0]]; } /// \endcode /// arrayType() /// matches "int a[]", "int b[4]" and "int c[a[0]]"; extern const AstTypeMatcher<ArrayType> arrayType; /// Matches C99 complex types. /// /// Given /// \code /// _Complex float f; /// \endcode /// complexType() /// matches "_Complex float f" extern const AstTypeMatcher<ComplexType> complexType; /// Matches any real floating-point type (float, double, long double). /// /// Given /// \code /// int i; /// float f; /// \endcode /// realFloatingPointType() /// matches "float f" but not "int i" AST_MATCHER(Type, realFloatingPointType) { return Node.isRealFloatingType(); } /// Matches arrays and C99 complex types that have a specific element /// type. /// /// Given /// \code /// struct A {}; /// A a[7]; /// int b[7]; /// \endcode /// arrayType(hasElementType(builtinType())) /// matches "int b[7]" /// /// Usable as: Matcher<ArrayType>, Matcher<ComplexType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement, AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType, ComplexType)); /// Matches C arrays with a specified constant size. /// /// Given /// \code /// void() { /// int a[2]; /// int b[] = { 2, 3 }; /// int c[b[0]]; /// } /// \endcode /// constantArrayType() /// matches "int a[2]" extern const AstTypeMatcher<ConstantArrayType> constantArrayType; /// Matches nodes that have the specified size. /// /// Given /// \code /// int a[42]; /// int b[2 * 21]; /// int c[41], d[43]; /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// char *w = "a"; /// \endcode /// constantArrayType(hasSize(42)) /// matches "int a[42]" and "int b[2 * 21]" /// stringLiteral(hasSize(4)) /// matches "abcd", L"abcd" AST_POLYMORPHIC_MATCHER_P(hasSize, AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType, StringLiteral), unsigned, N) { return internal::HasSizeMatcher<NodeType>::hasSize(Node, N); } /// Matches C++ arrays whose size is a value-dependent expression. /// /// Given /// \code /// template<typename T, int Size> /// class array { /// T data[Size]; /// }; /// \endcode /// dependentSizedArrayType /// matches "T data[Size]" extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType; /// Matches C arrays with unspecified size. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[42]; /// void f(int c[]) { int d[a[0]]; }; /// \endcode /// incompleteArrayType() /// matches "int a[]" and "int c[]" extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType; /// Matches C arrays with a specified size that is not an /// integer-constant-expression. /// /// Given /// \code /// void f() { /// int a[] = { 2, 3 } /// int b[42]; /// int c[a[0]]; /// } /// \endcode /// variableArrayType() /// matches "int c[a[0]]" extern const AstTypeMatcher<VariableArrayType> variableArrayType; /// Matches \c VariableArrayType nodes that have a specific size /// expression. /// /// Given /// \code /// void f(int b) { /// int a[b]; /// } /// \endcode /// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to( /// varDecl(hasName("b"))))))) /// matches "int a[b]" AST_MATCHER_P(VariableArrayType, hasSizeExpr, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder); } /// Matches atomic types. /// /// Given /// \code /// _Atomic(int) i; /// \endcode /// atomicType() /// matches "_Atomic(int) i" extern const AstTypeMatcher<AtomicType> atomicType; /// Matches atomic types with a specific value type. /// /// Given /// \code /// _Atomic(int) i; /// _Atomic(float) f; /// \endcode /// atomicType(hasValueType(isInteger())) /// matches "_Atomic(int) i" /// /// Usable as: Matcher<AtomicType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue, AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType)); /// Matches types nodes representing C++11 auto types. /// /// Given: /// \code /// auto n = 4; /// int v[] = { 2, 3 } /// for (auto i : v) { } /// \endcode /// autoType() /// matches "auto n" and "auto i" extern const AstTypeMatcher<AutoType> autoType; /// Matches types nodes representing C++11 decltype(<expr>) types. /// /// Given: /// \code /// short i = 1; /// int j = 42; /// decltype(i + j) result = i + j; /// \endcode /// decltypeType() /// matches "decltype(i + j)" extern const AstTypeMatcher<DecltypeType> decltypeType; /// Matches \c AutoType nodes where the deduced type is a specific type. /// /// Note: There is no \c TypeLoc for the deduced type and thus no /// \c getDeducedLoc() matcher. /// /// Given /// \code /// auto a = 1; /// auto b = 2.0; /// \endcode /// autoType(hasDeducedType(isInteger())) /// matches "auto a" /// /// Usable as: Matcher<AutoType> AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType, AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType)); /// Matches \c DecltypeType nodes to find out the underlying type. /// /// Given /// \code /// decltype(1) a = 1; /// decltype(2.0) b = 2.0; /// \endcode /// decltypeType(hasUnderlyingType(isInteger())) /// matches the type of "a" /// /// Usable as: Matcher<DecltypeType> AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType, AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType)); /// Matches \c FunctionType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionType() /// matches "int (*f)(int)" and the type of "g". extern const AstTypeMatcher<FunctionType> functionType; /// Matches \c FunctionProtoType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionProtoType() /// matches "int (*f)(int)" and the type of "g" in C++ mode. /// In C mode, "g" is not matched because it does not contain a prototype. extern const AstTypeMatcher<FunctionProtoType> functionProtoType; /// Matches \c ParenType nodes. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int *array_of_ptrs[4]; /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not /// \c array_of_ptrs. extern const AstTypeMatcher<ParenType> parenType; /// Matches \c ParenType nodes where the inner type is a specific type. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int (*ptr_to_func)(int); /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches /// \c ptr_to_func but not \c ptr_to_array. /// /// Usable as: Matcher<ParenType> AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType, AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType)); /// Matches block pointer types, i.e. types syntactically represented as /// "void (^)(int)". /// /// The \c pointee is always required to be a \c FunctionType. extern const AstTypeMatcher<BlockPointerType> blockPointerType; /// Matches member pointer types. /// Given /// \code /// struct A { int i; } /// A::* ptr = A::i; /// \endcode /// memberPointerType() /// matches "A::* ptr" extern const AstTypeMatcher<MemberPointerType> memberPointerType; /// Matches pointer types, but does not match Objective-C object pointer /// types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int c = 5; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "int *a", but does not match "Foo *f". extern const AstTypeMatcher<PointerType> pointerType; /// Matches an Objective-C object pointer type, which is different from /// a pointer type, despite being syntactically similar. /// /// Given /// \code /// int *a; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "Foo *f", but does not match "int *a". extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType; /// Matches both lvalue and rvalue reference types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f. extern const AstTypeMatcher<ReferenceType> referenceType; /// Matches lvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is /// matched since the type is deduced as int& by reference collapsing rules. extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType; /// Matches rvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not /// matched as it is deduced to int& by reference collapsing rules. extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType; /// Narrows PointerType (and similar) matchers to those where the /// \c pointee matches a given matcher. /// /// Given /// \code /// int *a; /// int const *b; /// float const *f; /// \endcode /// pointerType(pointee(isConstQualified(), isInteger())) /// matches "int const *b" /// /// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>, /// Matcher<PointerType>, Matcher<ReferenceType> AST_TYPELOC_TRAVERSE_MATCHER_DECL( pointee, getPointee, AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType, PointerType, ReferenceType)); /// Matches typedef types. /// /// Given /// \code /// typedef int X; /// \endcode /// typedefType() /// matches "typedef int X" extern const AstTypeMatcher<TypedefType> typedefType; /// Matches enum types. /// /// Given /// \code /// enum C { Green }; /// enum class S { Red }; /// /// C c; /// S s; /// \endcode // /// \c enumType() matches the type of the variable declarations of both \c c and /// \c s. extern const AstTypeMatcher<EnumType> enumType; /// Matches template specialization types. /// /// Given /// \code /// template <typename T> /// class C { }; /// /// template class C<int>; // A /// C<char> var; // B /// \endcode /// /// \c templateSpecializationType() matches the type of the explicit /// instantiation in \c A and the type of the variable declaration in \c B. extern const AstTypeMatcher<TemplateSpecializationType> templateSpecializationType; /// Matches C++17 deduced template specialization types, e.g. deduced class /// template types. /// /// Given /// \code /// template <typename T> /// class C { public: C(T); }; /// /// C c(123); /// \endcode /// \c deducedTemplateSpecializationType() matches the type in the declaration /// of the variable \c c. extern const AstTypeMatcher<DeducedTemplateSpecializationType> deducedTemplateSpecializationType; /// Matches types nodes representing unary type transformations. /// /// Given: /// \code /// typedef __underlying_type(T) type; /// \endcode /// unaryTransformType() /// matches "__underlying_type(T)" extern const AstTypeMatcher<UnaryTransformType> unaryTransformType; /// Matches record types (e.g. structs, classes). /// /// Given /// \code /// class C {}; /// struct S {}; /// /// C c; /// S s; /// \endcode /// /// \c recordType() matches the type of the variable declarations of both \c c /// and \c s. extern const AstTypeMatcher<RecordType> recordType; /// Matches tag types (record and enum types). /// /// Given /// \code /// enum E {}; /// class C {}; /// /// E e; /// C c; /// \endcode /// /// \c tagType() matches the type of the variable declarations of both \c e /// and \c c. extern const AstTypeMatcher<TagType> tagType; /// Matches types specified with an elaborated type keyword or with a /// qualified name. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// class C {}; /// /// class C c; /// N::M::D d; /// \endcode /// /// \c elaboratedType() matches the type of the variable declarations of both /// \c c and \c d. extern const AstTypeMatcher<ElaboratedType> elaboratedType; /// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier, /// matches \c InnerMatcher if the qualifier exists. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))) /// matches the type of the variable declaration of \c d. AST_MATCHER_P(ElaboratedType, hasQualifier, internal::Matcher<NestedNameSpecifier>, InnerMatcher) { if (const NestedNameSpecifier *Qualifier = Node.getQualifier()) return InnerMatcher.matches(*Qualifier, Finder, Builder); return false; } /// Matches ElaboratedTypes whose named type matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(namesType(recordType( /// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable /// declaration of \c d. AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getNamedType(), Finder, Builder); } /// Matches types that represent the result of substituting a type for a /// template type parameter. /// /// Given /// \code /// template <typename T> /// void F(T t) { /// int i = 1 + t; /// } /// \endcode /// /// \c substTemplateTypeParmType() matches the type of 't' but not '1' extern const AstTypeMatcher<SubstTemplateTypeParmType> substTemplateTypeParmType; /// Matches template type parameter substitutions that have a replacement /// type that matches the provided matcher. /// /// Given /// \code /// template <typename T> /// double F(T t); /// int i; /// double j = F(i); /// \endcode /// /// \c substTemplateTypeParmType(hasReplacementType(type())) matches int AST_TYPE_TRAVERSE_MATCHER( hasReplacementType, getReplacementType, AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType)); /// Matches template type parameter types. /// /// Example matches T, but not int. /// (matcher = templateTypeParmType()) /// \code /// template <typename T> void f(int i); /// \endcode extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType; /// Matches injected class name types. /// /// Example matches S s, but not S<T> s. /// (matcher = parmVarDecl(hasType(injectedClassNameType()))) /// \code /// template <typename T> struct S { /// void f(S s); /// void g(S<T> s); /// }; /// \endcode extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType; /// Matches decayed type /// Example matches i[] in declaration of f. /// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType()))))) /// Example matches i[1]. /// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType()))))) /// \code /// void f(int i[]) { /// i[1] = 0; /// } /// \endcode extern const AstTypeMatcher<DecayedType> decayedType; /// Matches the decayed type, whoes decayed type matches \c InnerMatcher AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>, InnerType) { return InnerType.matches(Node.getDecayedType(), Finder, Builder); } /// Matches declarations whose declaration context, interpreted as a /// Decl, matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// \endcode /// /// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the /// declaration of \c class \c D. AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) { const DeclContext *DC = Node.getDeclContext(); if (!DC) return false; return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder); } /// Matches nested name specifiers. /// /// Given /// \code /// namespace ns { /// struct A { static void f(); }; /// void A::f() {} /// void g() { A::f(); } /// } /// ns::A a; /// \endcode /// nestedNameSpecifier() /// matches "ns::" and both "A::" extern const internal::VariadicAllOfMatcher<NestedNameSpecifier> nestedNameSpecifier; /// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc. extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc> nestedNameSpecifierLoc; /// Matches \c NestedNameSpecifierLocs for which the given inner /// NestedNameSpecifier-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD( internal::BindableMatcher<NestedNameSpecifierLoc>, loc, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) { return internal::BindableMatcher<NestedNameSpecifierLoc>( new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>( InnerMatcher)); } /// Matches nested name specifiers that specify a type matching the /// given \c QualType matcher without qualifiers. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(specifiesType( /// hasDeclaration(cxxRecordDecl(hasName("A"))) /// )) /// matches "A::" AST_MATCHER_P(NestedNameSpecifier, specifiesType, internal::Matcher<QualType>, InnerMatcher) { if (!Node.getAsType()) return false; return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder); } /// Matches nested name specifier locs that specify a type matching the /// given \c TypeLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type( /// hasDeclaration(cxxRecordDecl(hasName("A"))))))) /// matches "A::" AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc, internal::Matcher<TypeLoc>, InnerMatcher) { return Node && Node.getNestedNameSpecifier()->getAsType() && InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifier. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 0) { const NestedNameSpecifier *NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(*NextNode, Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifierLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A"))))) /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix, internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher, 1) { NestedNameSpecifierLoc NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(NextNode, Finder, Builder); } /// Matches nested name specifiers that specify a namespace matching the /// given namespace matcher. /// /// Given /// \code /// namespace ns { struct A {}; } /// ns::A a; /// \endcode /// nestedNameSpecifier(specifiesNamespace(hasName("ns"))) /// matches "ns::" AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace, internal::Matcher<NamespaceDecl>, InnerMatcher) { if (!Node.getAsNamespace()) return false; return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder); } /// Overloads for the \c equalsNode matcher. /// FIXME: Implement for other node types. /// @{ /// Matches if a node equals another node. /// /// \c Decl has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Stmt has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Type has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) { return &Node == Other; } /// @} /// Matches each case or default statement belonging to the given switch /// statement. This matcher may produce multiple matches. /// /// Given /// \code /// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } } /// \endcode /// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s") /// matches four times, with "c" binding each of "case 1:", "case 2:", /// "case 3:" and "case 4:", and "s" respectively binding "switch (1)", /// "switch (1)", "switch (2)" and "switch (2)". AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>, InnerMatcher) { BoundNodesTreeBuilder Result; // FIXME: getSwitchCaseList() does not necessarily guarantee a stable // iteration order. We should use the more general iterating matchers once // they are capable of expressing this matcher (for example, it should ignore // case statements belonging to nested switch statements). bool Matched = false; for (const SwitchCase *SC = Node.getSwitchCaseList(); SC; SC = SC->getNextSwitchCase()) { BoundNodesTreeBuilder CaseBuilder(*Builder); bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder); if (CaseMatched) { Matched = true; Result.addMatch(CaseBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches each constructor initializer in a constructor definition. /// /// Given /// \code /// class A { A() : i(42), j(42) {} int i; int j; }; /// \endcode /// cxxConstructorDecl(forEachConstructorInitializer( /// forField(decl().bind("x")) /// )) /// will trigger two matches, binding for 'i' and 'j' respectively. AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *I : Node.inits()) { if (Finder->isTraversalIgnoringImplicitNodes() && !I->isWritten()) continue; BoundNodesTreeBuilder InitBuilder(*Builder); if (InnerMatcher.matches(*I, Finder, &InitBuilder)) { Matched = true; Result.addMatch(InitBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches constructor declarations that are copy constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3. AST_MATCHER(CXXConstructorDecl, isCopyConstructor) { return Node.isCopyConstructor(); } /// Matches constructor declarations that are move constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2. AST_MATCHER(CXXConstructorDecl, isMoveConstructor) { return Node.isMoveConstructor(); } /// Matches constructor declarations that are default constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3. AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) { return Node.isDefaultConstructor(); } /// Matches constructors that delegate to another constructor. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(int) {} // #2 /// S(S &&) : S() {} // #3 /// }; /// S::S() : S(0) {} // #4 /// \endcode /// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not /// #1 or #2. AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) { return Node.isDelegatingConstructor(); } /// Matches constructor, conversion function, and deduction guide declarations /// that have an explicit specifier if this explicit specifier is resolved to /// true. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9. /// cxxConversionDecl(isExplicit()) will match #4, but not #3. /// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5. AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES( CXXConstructorDecl, CXXConversionDecl, CXXDeductionGuideDecl)) { return Node.isExplicit(); } /// Matches the expression in an explicit specifier if present in the given /// declaration. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2. /// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4. /// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6. AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>, InnerMatcher) { ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node); if (!ES.getExpr()) return false; ASTChildrenNotSpelledInSourceScope RAII(Finder, false); return InnerMatcher.matches(*ES.getExpr(), Finder, Builder); } /// Matches function and namespace declarations that are marked with /// the inline keyword. /// /// Given /// \code /// inline void f(); /// void g(); /// namespace n { /// inline namespace m {} /// } /// \endcode /// functionDecl(isInline()) will match ::f(). /// namespaceDecl(isInline()) will match n::m. AST_POLYMORPHIC_MATCHER(isInline, AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl, FunctionDecl)) { // This is required because the spelling of the function used to determine // whether inline is specified or not differs between the polymorphic types. if (const auto *FD = dyn_cast<FunctionDecl>(&Node)) return FD->isInlineSpecified(); else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node)) return NSD->isInline(); llvm_unreachable("Not a valid polymorphic type"); } /// Matches anonymous namespace declarations. /// /// Given /// \code /// namespace n { /// namespace {} // #1 /// } /// \endcode /// namespaceDecl(isAnonymous()) will match #1 but not ::n. AST_MATCHER(NamespaceDecl, isAnonymous) { return Node.isAnonymousNamespace(); } /// Matches declarations in the namespace `std`, but not in nested namespaces. /// /// Given /// \code /// class vector {}; /// namespace foo { /// class vector {}; /// namespace std { /// class vector {}; /// } /// } /// namespace std { /// inline namespace __1 { /// class vector {}; // #1 /// namespace experimental { /// class vector {}; /// } /// } /// } /// \endcode /// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1. AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); } /// If the given case statement does not use the GNU case range /// extension, matches the constant given in the statement. /// /// Given /// \code /// switch (1) { case 1: case 1+1: case 3 ... 4: ; } /// \endcode /// caseStmt(hasCaseConstant(integerLiteral())) /// matches "case 1:" AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>, InnerMatcher) { if (Node.getRHS()) return false; return InnerMatcher.matches(*Node.getLHS(), Finder, Builder); } /// Matches declaration that has a given attribute. /// /// Given /// \code /// __attribute__((device)) void f() { ... } /// \endcode /// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of /// f. If the matcher is used from clang-query, attr::Kind parameter should be /// passed as a quoted string. e.g., hasAttr("attr::CUDADevice"). AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) { for (const auto *Attr : Node.attrs()) { if (Attr->getKind() == AttrKind) return true; } return false; } /// Matches the return value expression of a return statement /// /// Given /// \code /// return a + b; /// \endcode /// hasReturnValue(binaryOperator()) /// matches 'return a + b' /// with binaryOperator() /// matching 'a + b' AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>, InnerMatcher) { if (const auto *RetValue = Node.getRetValue()) return InnerMatcher.matches(*RetValue, Finder, Builder); return false; } /// Matches CUDA kernel call expression. /// /// Example matches, /// \code /// kernel<<<i,j>>>(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr> cudaKernelCallExpr; /// Matches expressions that resolve to a null pointer constant, such as /// GNU's __null, C++11's nullptr, or C's NULL macro. /// /// Given: /// \code /// void *v1 = NULL; /// void *v2 = nullptr; /// void *v3 = __null; // GNU extension /// char *cp = (char *)0; /// int *ip = 0; /// int i = 0; /// \endcode /// expr(nullPointerConstant()) /// matches the initializer for v1, v2, v3, cp, and ip. Does not match the /// initializer for i. AST_MATCHER(Expr, nullPointerConstant) { return Node.isNullPointerConstant(Finder->getASTContext(), Expr::NPC_ValueDependentIsNull); } /// Matches declaration of the function the statement belongs to /// /// Given: /// \code /// F& operator=(const F& o) { /// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; }); /// return *this; /// } /// \endcode /// returnStmt(forFunction(hasName("operator="))) /// matches 'return *this' /// but does not match 'return v > 0' AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>, InnerMatcher) { const auto &Parents = Finder->getASTContext().getParents(Node); llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end()); while(!Stack.empty()) { const auto &CurNode = Stack.back(); Stack.pop_back(); if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) { if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) { return true; } } else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) { if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder, Builder)) { return true; } } else { for(const auto &Parent: Finder->getASTContext().getParents(CurNode)) Stack.push_back(Parent); } } return false; } /// Matches a declaration that has external formal linkage. /// /// Example matches only z (matcher = varDecl(hasExternalFormalLinkage())) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode /// /// Example matches f() because it has external formal linkage despite being /// unique to the translation unit as though it has internal likage /// (matcher = functionDecl(hasExternalFormalLinkage())) /// /// \code /// namespace { /// void f() {} /// } /// \endcode AST_MATCHER(NamedDecl, hasExternalFormalLinkage) { return Node.hasExternalFormalLinkage(); } /// Matches a declaration that has default arguments. /// /// Example matches y (matcher = parmVarDecl(hasDefaultArgument())) /// \code /// void x(int val) {} /// void y(int val = 0) {} /// \endcode /// /// Deprecated. Use hasInitializer() instead to be able to /// match on the contents of the default argument. For example: /// /// \code /// void x(int val = 7) {} /// void y(int val = 42) {} /// \endcode /// parmVarDecl(hasInitializer(integerLiteral(equals(42)))) /// matches the parameter of y /// /// A matcher such as /// parmVarDecl(hasInitializer(anything())) /// is equivalent to parmVarDecl(hasDefaultArgument()). AST_MATCHER(ParmVarDecl, hasDefaultArgument) { return Node.hasDefaultArg(); } /// Matches array new expressions. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(isArray()) /// matches the expression 'new MyClass[10]'. AST_MATCHER(CXXNewExpr, isArray) { return Node.isArray(); } /// Matches placement new expression arguments. /// /// Given: /// \code /// MyClass *p1 = new (Storage, 16) MyClass(); /// \endcode /// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16)))) /// matches the expression 'new (Storage, 16) MyClass()'. AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index, internal::Matcher<Expr>, InnerMatcher) { return Node.getNumPlacementArgs() > Index && InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder); } /// Matches any placement new expression arguments. /// /// Given: /// \code /// MyClass *p1 = new (Storage) MyClass(); /// \endcode /// cxxNewExpr(hasAnyPlacementArg(anything())) /// matches the expression 'new (Storage, 16) MyClass()'. AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>, InnerMatcher) { return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) { return InnerMatcher.matches(*Arg, Finder, Builder); }); } /// Matches array new expressions with a given array size. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(hasArraySize(integerLiteral(equals(10)))) /// matches the expression 'new MyClass[10]'. AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) { return Node.isArray() && *Node.getArraySize() && InnerMatcher.matches(**Node.getArraySize(), Finder, Builder); } /// Matches a class declaration that is defined. /// /// Example matches x (matcher = cxxRecordDecl(hasDefinition())) /// \code /// class x {}; /// class y; /// \endcode AST_MATCHER(CXXRecordDecl, hasDefinition) { return Node.hasDefinition(); } /// Matches C++11 scoped enum declaration. /// /// Example matches Y (matcher = enumDecl(isScoped())) /// \code /// enum X {}; /// enum class Y {}; /// \endcode AST_MATCHER(EnumDecl, isScoped) { return Node.isScoped(); } /// Matches a function declared with a trailing return type. /// /// Example matches Y (matcher = functionDecl(hasTrailingReturn())) /// \code /// int X() {} /// auto Y() -> int {} /// \endcode AST_MATCHER(FunctionDecl, hasTrailingReturn) { if (const auto *F = Node.getType()->getAs<FunctionProtoType>()) return F->hasTrailingReturn(); return false; } /// Matches expressions that match InnerMatcher that are possibly wrapped in an /// elidable constructor and other corresponding bookkeeping nodes. /// /// In C++17, elidable copy constructors are no longer being generated in the /// AST as it is not permitted by the standard. They are, however, part of the /// AST in C++14 and earlier. So, a matcher must abstract over these differences /// to work in all language modes. This matcher skips elidable constructor-call /// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and /// various implicit nodes inside the constructor calls, all of which will not /// appear in the C++17 AST. /// /// Given /// /// \code /// struct H {}; /// H G(); /// void f() { /// H D = G(); /// } /// \endcode /// /// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))`` /// matches ``H D = G()`` in C++11 through C++17 (and beyond). AST_MATCHER_P(Expr, ignoringElidableConstructorCall, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { // E tracks the node that we are examining. const Expr *E = &Node; // If present, remove an outer `ExprWithCleanups` corresponding to the // underlying `CXXConstructExpr`. This check won't cover all cases of added // `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the // EWC is placed on the outermost node of the expression, which this may not // be), but, it still improves the coverage of this matcher. if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node)) E = CleanupsExpr->getSubExpr(); if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) { if (CtorExpr->isElidable()) { if (const auto *MaterializeTemp = dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) { return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder, Builder); } } } return InnerMatcher.matches(Node, Finder, Builder); } //----------------------------------------------------------------------------// // OpenMP handling. //----------------------------------------------------------------------------// /// Matches any ``#pragma omp`` executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective()`` matches ``omp parallel``, /// ``omp parallel default(none)`` and ``omp taskyield``. extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective> ompExecutableDirective; /// Matches standalone OpenMP directives, /// i.e., directives that can't have a structured block. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective(isStandaloneDirective()))`` matches /// ``omp taskyield``. AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) { return Node.isStandaloneDirective(); } /// Matches the structured-block of the OpenMP executable directive /// /// Prerequisite: the executable directive must not be standalone directive. /// If it is, it will never match. /// /// Given /// /// \code /// #pragma omp parallel /// ; /// #pragma omp parallel /// {} /// \endcode /// /// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;`` AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock, internal::Matcher<Stmt>, InnerMatcher) { if (Node.isStandaloneDirective()) return false; // Standalone directives have no structured blocks. return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder); } /// Matches any clause in an OpenMP directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// \endcode /// /// ``ompExecutableDirective(hasAnyClause(anything()))`` matches /// ``omp parallel default(none)``. AST_MATCHER_P(OMPExecutableDirective, hasAnyClause, internal::Matcher<OMPClause>, InnerMatcher) { ArrayRef<OMPClause *> Clauses = Node.clauses(); return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(), Clauses.end(), Finder, Builder) != Clauses.end(); } /// Matches OpenMP ``default`` clause. /// /// Given /// /// \code /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// #pragma omp parallel /// \endcode /// /// ``ompDefaultClause()`` matches ``default(none)``, ``default(shared)``, and /// ``default(firstprivate)`` extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause> ompDefaultClause; /// Matches if the OpenMP ``default`` clause has ``none`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``. AST_MATCHER(OMPDefaultClause, isNoneKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none; } /// Matches if the OpenMP ``default`` clause has ``shared`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``. AST_MATCHER(OMPDefaultClause, isSharedKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared; } /// Matches if the OpenMP ``default`` clause has ``firstprivate`` kind /// specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isFirstPrivateKind())`` matches only /// ``default(firstprivate)``. AST_MATCHER(OMPDefaultClause, isFirstPrivateKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_firstprivate; } /// Matches if the OpenMP directive is allowed to contain the specified OpenMP /// clause kind. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel for /// #pragma omp for /// \endcode /// /// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches /// ``omp parallel`` and ``omp parallel for``. /// /// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter /// should be passed as a quoted string. e.g., /// ``isAllowedToContainClauseKind("OMPC_default").`` AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind, OpenMPClauseKind, CKind) { return llvm::omp::isAllowedClauseForDirective( Node.getDirectiveKind(), CKind, Finder->getASTContext().getLangOpts().OpenMP); } //----------------------------------------------------------------------------// // End OpenMP handling. //----------------------------------------------------------------------------// } // namespace ast_matchers } // namespace clang #endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
declare_reduction_ast_print.c
// RUN: %clang_cc1 -verify -fopenmp -ast-print %s | FileCheck %s // RUN: %clang_cc1 -fopenmp -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -include-pch %t -fsyntax-only -verify %s -ast-print | FileCheck %s // RUN: %clang_cc1 -verify -fopenmp-simd -ast-print %s | FileCheck %s // RUN: %clang_cc1 -fopenmp-simd -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp-simd -include-pch %t -fsyntax-only -verify %s -ast-print | FileCheck %s // expected-no-diagnostics #ifndef HEADER #define HEADER #pragma omp declare reduction(+ : int, char : omp_out *= omp_in) // CHECK: #pragma omp declare reduction (+ : int : omp_out *= omp_in){{$}} // CHECK-NEXT: #pragma omp declare reduction (+ : char : omp_out *= omp_in) #pragma omp declare reduction(fun : float : omp_out += omp_in) initializer(omp_priv = omp_orig + 15) // CHECK: #pragma omp declare reduction (fun : float : omp_out += omp_in) initializer(omp_priv = omp_orig + 15) // CHECK: struct SSS { struct SSS { int field; #pragma omp declare reduction(+ : int, char : omp_out *= omp_in) // CHECK: #pragma omp declare reduction (+ : int : omp_out *= omp_in) // CHECK-NEXT: #pragma omp declare reduction (+ : char : omp_out *= omp_in) }; // CHECK: }; void init(struct SSS *priv, struct SSS orig); #pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig)) // CHECK: #pragma omp declare reduction (fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig)) // CHECK: int main() { int main() { #pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig)) // CHECK: #pragma omp declare reduction (fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig)) { #pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig)) // CHECK: #pragma omp declare reduction (fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig)) } return 0; } // CHECK: } #pragma omp declare reduction(mymin:int \ : omp_out = omp_out > omp_in ? omp_in : omp_out) \ initializer(omp_priv = 2147483647) #pragma omp declare reduction(mymin \ : struct SSS \ : omp_out = omp_out.field > omp_in.field ? omp_in : omp_out) int foo(int argc, char **argv) { int x; struct SSS ss; #pragma omp parallel for reduction(mymin : x, ss) for (int i = 0; i < 1000; i++) ; return 0; } // CHECK: #pragma omp parallel for reduction(mymin: x,ss) #endif
GB_subassign_17.c
//------------------------------------------------------------------------------ // GB_subassign_17: C(I,J)<!M,repl> = scalar ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Method 17: C(I,J)<!M,repl> = scalar ; using S // M: present // Mask_comp: true // C_replace: true // accum: NULL // A: scalar // S: constructed #include "GB_subassign_methods.h" GrB_Info GB_subassign_17 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_Matrix M, const bool Mask_struct, const void *scalar, const GrB_Type atype, const GrB_Matrix S, GB_Context Context ) { //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_GET_C ; const bool C_is_hyper = C->is_hyper ; const int64_t Cnvec = C->nvec ; const int64_t *GB_RESTRICT Ch = C->h ; const int64_t *GB_RESTRICT Cp = C->p ; GB_GET_MASK ; const bool M_is_hyper = M->is_hyper ; const int64_t Mnvec = M->nvec ; GB_GET_SCALAR ; GB_GET_S ; const int64_t *GB_RESTRICT Sh = S->h ; const int64_t Snvec = S->nvec ; const bool S_is_hyper = S->is_hyper ; GrB_BinaryOp accum = NULL ; //-------------------------------------------------------------------------- // Method 17: C(I,J)<!M,repl> = scalar ; using S //-------------------------------------------------------------------------- // Time: Close to optimal; must visit all IxJ, so Omega(|I|*|J|) is // required. The sparsity of !M cannot be exploited. // Methods 13, 15, 17, and 19 are very similar. //-------------------------------------------------------------------------- // Parallel: all IxJ (Methods 01, 03, 13, 15, 17, 19) //-------------------------------------------------------------------------- GB_SUBASSIGN_IXJ_SLICE ; //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t j = kfirst ; j <= klast ; j++) { //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ GB_GET_jC ; //------------------------------------------------------------------ // get S(iA_start:end,j) and M(iA_start:end,j) //------------------------------------------------------------------ GB_GET_VECTOR_FOR_IXJ (S) ; GB_GET_VECTOR_FOR_IXJ (M) ; //------------------------------------------------------------------ // C(I(iA_start,iA_end-1),jC)<!M,repl> = scalar //------------------------------------------------------------------ for (int64_t iA = iA_start ; iA < iA_end ; iA++) { //-------------------------------------------------------------- // Get the indices at the top of each list. //-------------------------------------------------------------- int64_t iS = (pS < pS_end) ? Si [pS] : INT64_MAX ; int64_t iM = (pM < pM_end) ? Mi [pM] : INT64_MAX ; //-------------------------------------------------------------- // find the smallest index of [iS iA iM] (always iA) //-------------------------------------------------------------- int64_t i = iA ; //-------------------------------------------------------------- // get M(i,j) //-------------------------------------------------------------- bool mij ; if (i == iM) { // mij = (bool) M [pM] mij = GB_mcast (Mx, pM, msize) ; GB_NEXT (M) ; } else { // mij not present, implicitly false ASSERT (i < iM) ; mij = false ; } // complement the mask entry mij since Mask_comp is true mij = !mij ; //-------------------------------------------------------------- // assign the entry //-------------------------------------------------------------- if (i == iS) { ASSERT (i == iA) ; { // both S (i,j) and A (i,j) present GB_C_S_LOOKUP ; if (mij) { // ----[C A 1] or [X A 1]--------------------------- // [C A 1]: action: ( =A ): copy A, no accum // [X A 1]: action: ( undelete ): zombie lives GB_noaccum_C_A_1_scalar ; } else { // ----[C A 0] or [X A 0]--------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): zombie GB_DELETE_ENTRY ; } GB_NEXT (S) ; } } else { ASSERT (i == iA) ; { // S (i,j) is not present, A (i,j) is present if (mij) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) task_pending++ ; } } } } } GB_PHASE1_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t j = kfirst ; j <= klast ; j++) { //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ GB_GET_jC ; //------------------------------------------------------------------ // get S(iA_start:end,j) and M(iA_start:end,j) //------------------------------------------------------------------ GB_GET_VECTOR_FOR_IXJ (S) ; GB_GET_VECTOR_FOR_IXJ (M) ; //------------------------------------------------------------------ // C(I(iA_start,iA_end-1),jC)<!M,repl> = scalar //------------------------------------------------------------------ for (int64_t iA = iA_start ; iA < iA_end ; iA++) { //-------------------------------------------------------------- // Get the indices at the top of each list. //-------------------------------------------------------------- int64_t iS = (pS < pS_end) ? Si [pS] : INT64_MAX ; int64_t iM = (pM < pM_end) ? Mi [pM] : INT64_MAX ; //-------------------------------------------------------------- // find the smallest index of [iS iA iM] (always iA) //-------------------------------------------------------------- int64_t i = iA ; //-------------------------------------------------------------- // get M(i,j) //-------------------------------------------------------------- bool mij ; if (i == iM) { // mij = (bool) M [pM] mij = GB_mcast (Mx, pM, msize) ; GB_NEXT (M) ; } else { // mij not present, implicitly false ASSERT (i < iM) ; mij = false ; } // complement the mask entry mij since Mask_comp is true mij = !mij ; //-------------------------------------------------------------- // assign the entry //-------------------------------------------------------------- if (i == iS) { ASSERT (i == iA) ; { GB_NEXT (S) ; } } else { ASSERT (i == iA) ; { // S (i,j) is not present, A (i,j) is present if (mij) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } } } } } GB_PHASE2_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
GB_binop__le_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__le_int32) // A.*B function (eWiseMult): GB (_AemultB_08__le_int32) // A.*B function (eWiseMult): GB (_AemultB_02__le_int32) // A.*B function (eWiseMult): GB (_AemultB_04__le_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__le_int32) // A*D function (colscale): GB (_AxD__le_int32) // D*A function (rowscale): GB (_DxB__le_int32) // C+=B function (dense accum): GB (_Cdense_accumB__le_int32) // C+=b function (dense accum): GB (_Cdense_accumb__le_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_int32) // C=scalar+B GB (_bind1st__le_int32) // C=scalar+B' GB (_bind1st_tran__le_int32) // C=A+scalar GB (_bind2nd__le_int32) // C=A'+scalar GB (_bind2nd_tran__le_int32) // C type: bool // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_INT32 || GxB_NO_LE_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__le_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__le_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__le_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__le_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__le_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__le_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__le_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__le_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__le_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__le_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__le_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__le_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__le_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__le_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
special_random_ops.h
// // @author raver119@gmail.com // #ifndef LIBND4J_SPECIAL_RANDOM_OPS_H #define LIBND4J_SPECIAL_RANDOM_OPS_H #include <ops/random_ops.h> namespace randomOps { ////////////////////////////////////////////////////////////////////// template<typename T> class Choice { public: method_idx method_X method_XY static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, int *xShapeBuffer, T *y, int *yShapeBuffer, T *z, int *zShapeBuffer, T *extraArguments) { /** * X holds data, * Y holds probabilities * Z will hold results */ // TODO: we probably might want to skip this sum, and state that probabilities array should be real probabilities, i.e. should sum to 1.0 //T probSum = extraArguments[0]; __shared__ Nd4jIndex xLength; __shared__ Nd4jIndex yLength; __shared__ Nd4jIndex zLength; __shared__ int xEWS; __shared__ int yEWS; __shared__ int zEWS; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = (nd4j::random::RandomBuffer *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); xLength = shape::length(xShapeBuffer); yLength = shape::length(yShapeBuffer); zLength = shape::length(zShapeBuffer); xEWS = shape::elementWiseStride(xShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; if (zEWS >= 1 && xEWS >= 1 && yEWS >= 1) { for (Nd4jIndex e = tid; e < zLength; e+=blockDim.x * gridDim.x) { T prob = buffer->relativeT<T>(e); T cumProb = (T) 0.0f; for (Nd4jIndex f = 0; f < yLength; f++) { T relProb = y[f * yEWS]; cumProb += relProb; if (prob <= cumProb || f == yLength - 1) { z[e * zEWS] = x[f * xEWS]; f += yLength; } __syncthreads(); } __syncthreads(); } } else { int xCoord[MAX_RANK]; int yCoord[MAX_RANK]; int zCoord[MAX_RANK]; __shared__ int xRank; __shared__ int yRank; __shared__ int zRank; __shared__ int *xShape; __shared__ int *yShape; __shared__ int *zShape; __shared__ int *xStride; __shared__ int *yStride; __shared__ int *zStride; __shared__ int xOffset; __shared__ int yOffset; __shared__ int zOffset; if (threadIdx.x == 0) { xRank = shape::rank(xShapeBuffer); yRank = shape::rank(yShapeBuffer); zRank = shape::rank(zShapeBuffer); xShape = shape::shapeOf(xShapeBuffer); yShape = shape::shapeOf(yShapeBuffer); zShape = shape::shapeOf(zShapeBuffer); xStride = shape::stride(xShapeBuffer); yStride = shape::stride(yShapeBuffer); zStride = shape::stride(zShapeBuffer); xOffset = shape::offset(xShapeBuffer); yOffset = shape::offset(yShapeBuffer); zOffset = shape::offset(zShapeBuffer); } __syncthreads(); for (Nd4jIndex i = tid; i < zLength; i+=blockDim.x * gridDim.x) { shape::ind2sub(zRank, zShape, i, zCoord); Nd4jIndex zOffset2 = shape::getOffset(zOffset, zShape, zStride, zCoord, zRank); T prob = buffer->relativeT<T>(i); T cumProb = (T) 0.0f; for (Nd4jIndex f = 0; f < yLength; f++) { shape::ind2sub(yRank, yShape, i, yCoord); Nd4jIndex yOffset2 = shape::getOffset(yOffset, yShape, yStride, yCoord, yRank); T relProb = y[yOffset2]; cumProb += relProb; if (prob <= cumProb || f == yLength - 1) { shape::ind2sub(xRank, xShape, f, xCoord); Nd4jIndex xOffset2 = shape::getOffset(xOffset, xShape, xStride, xCoord, xRank); z[zOffset2] = x[xOffset2]; f += yLength; } __syncthreads(); } __syncthreads(); } } __syncthreads(); devBuffer->rewind(zLength); } #endif static inline void specialOp(Nd4jPointer state, T *x, int *xShapeBuffer, T *y, int *yShapeBuffer, T *z, int *zShapeBuffer, T *extraArguments) { /** * X holds data, * Y holds probabilities * Z will hold results */ nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); // TODO: we probably might want to skip this sum, and state that probabilities array should be real probabilities, i.e. should sum to 1.0 //T probSum = extraArguments[0]; Nd4jIndex yLength = shape::length(yShapeBuffer); Nd4jIndex zLength = shape::length(zShapeBuffer); int xEWS = shape::elementWiseStride(xShapeBuffer); int yEWS = shape::elementWiseStride(yShapeBuffer); int zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); if (zEWS >= 1 && xEWS >= 1 && yEWS >= 1) { #pragma omp parallel for num_threads(_threads) if (_threads > 1) schedule(guided) for (Nd4jIndex e = 0; e < zLength; e++) { T prob = buffer->relativeT<T>(e); T cumProb = (T) 0.0f; for (Nd4jIndex f = 0; f < yLength; f++) { T relProb = y[f * yEWS]; cumProb += relProb; if (prob <= cumProb || f == yLength - 1) { z[e * zEWS] = x[f * xEWS]; f += yLength; } } } } else { int xCoord[MAX_RANK]; int yCoord[MAX_RANK]; int zCoord[MAX_RANK]; int xRank = shape::rank(xShapeBuffer); int yRank = shape::rank(yShapeBuffer); int zRank = shape::rank(zShapeBuffer); int *xShape = shape::shapeOf(xShapeBuffer); int *yShape = shape::shapeOf(yShapeBuffer); int *zShape = shape::shapeOf(zShapeBuffer); int *xStride = shape::stride(xShapeBuffer); int *yStride = shape::stride(yShapeBuffer); int *zStride = shape::stride(zShapeBuffer); int xOffset = shape::offset(xShapeBuffer); int yOffset = shape::offset(yShapeBuffer); int zOffset = shape::offset(zShapeBuffer); #pragma omp parallel for num_threads(_threads) if (_threads > 1) schedule(guided) for (Nd4jIndex i = 0; i < zLength; i++) { shape::ind2sub(zRank, zShape, i, zCoord); Nd4jIndex zOffset2 = shape::getOffset(zOffset, zShape, zStride, zCoord, zRank); T prob = buffer->relativeT<T>(i); T cumProb = (T) 0.0f; for (Nd4jIndex f = 0; f < yLength; f++) { shape::ind2sub(yRank, yShape, i, yCoord); Nd4jIndex yOffset2 = shape::getOffset(yOffset, yShape, yStride, yCoord, yRank); T relProb = y[yOffset2]; cumProb += relProb; if (prob <= cumProb || f == yLength - 1) { shape::ind2sub(xRank, xShape, f, xCoord); Nd4jIndex xOffset2 = shape::getOffset(xOffset, xShape, xStride, xCoord, xRank); z[zOffset2] = x[xOffset2]; f += yLength; } } } } // update rng state buffer->rewindH(zLength); } }; ////////////////////////////////////////////////////////////////////// /** * This Op produces random values within specified boundaries. Distribuion is Gaussian */ template<typename T> class GaussianDistribution { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, int *xShapeBuffer, T *y, int *yShapeBuffer, T *z, int *zShapeBuffer, T *extraArguments) { __shared__ T epsilon; __shared__ T two_pi; __shared__ Nd4jIndex zLength; __shared__ int zEWS; __shared__ int yEWS; __shared__ T mean; __shared__ T stddev; __shared__ int step; __shared__ T *tZ; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = (nd4j::random::RandomBuffer *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); tZ = (T *) (shmem + sizeof(nd4j::random::RandomBuffer)); zLength = shape::length(zShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); epsilon = (T) 1e-5; two_pi = (T) 2.0 * (T) 3.14159265358979323846; mean = extraArguments[0]; stddev = extraArguments[1]; step = (blockDim.x * gridDim.x); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jIndex e = tid; e < zLength; e += step) { // we need to get random values tZ[threadIdx.x] = buffer->relativeT<T>(e, epsilon, (T) 1.0f); // fix for "next rng value" if (e + 1 >= zLength && e % 2 == 0) { tZ[threadIdx.x+1] = buffer->relativeT<T>(e+1, epsilon, (T) 1.0f); } T realMean = y == z ? mean : y[e * yEWS]; __syncthreads(); if (e % 2 == 0) z[e *zEWS] = (nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(tZ[threadIdx.x])) * nd4j::math::nd4j_cos<T>(two_pi * tZ[threadIdx.x+1])) * stddev + realMean; else z[e *zEWS] = (nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(tZ[threadIdx.x-1])) * nd4j::math::nd4j_sin<T>(two_pi * tZ[threadIdx.x])) * stddev + realMean; __syncthreads(); } __syncthreads(); devBuffer->rewind(zLength); } #endif static inline void specialOp(Nd4jPointer state, T *x, int *xShapeBuffer, T *y, int *yShapeBuffer, T *z, int *zShapeBuffer, T *extraArguments) { const T two_pi = (T) 2.0 * 3.14159265358979323846; Nd4jIndex zLength = shape::length(zShapeBuffer); int yEWS = shape::elementWiseStride(yShapeBuffer); int zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (zLength / _threads) + 8; // we're enforcing even chunks, since it's mandatory for this algorithm span -= span % 2; nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); T mean = extraArguments[0]; T stddev = extraArguments[1]; #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jIndex start = span * tid; Nd4jIndex end = span * (tid + 1); if (end > zLength) end = zLength; T z0, z1; T u0, u1; T lnU0; bool generated = false; for (Nd4jIndex e = start; e < end; e++) { if (!generated) { /* * Since box-muller transform expects non-zero u0 value, we'll just use rng with boundaries */ u0 = buffer->relativeT<T>(e, (T) 1e-5f, (T) 1.0f); u1 = buffer->relativeT<T>((e + 1), (T) 1e-5f, (T) 1.0f); lnU0 = nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(u0)); z0 = lnU0 * nd4j::math::nd4j_cos<T>(two_pi * u1); z1 = lnU0 * nd4j::math::nd4j_sin<T>(two_pi * u1); generated = true; T realMean = y == z ? mean : y[e * yEWS]; z[e * zEWS] = z0 * stddev + realMean; } else { T realMean = y == z ? mean : y[e * yEWS]; z[e * zEWS] = z1 * stddev + realMean; generated = false; } } } // update rng state buffer->rewindH(zLength); } }; ////////////////////////////////////////////////////////////////////// /** * This Op produces random values within [0..N], Distribuion is binomial */ template<typename T> class BinomialDistribution { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, int *xShapeBuffer, T *y, int *yShapeBuffer, T *z, int *zShapeBuffer, T *extraArguments) { int trials = (int) extraArguments[0]; T prob = extraArguments[1]; __shared__ Nd4jIndex zLength; __shared__ int yEWS; __shared__ int zEWS; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = (nd4j::random::RandomBuffer *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); zLength = shape::length(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jIndex e = tid; e < zLength; e += blockDim.x * gridDim.x) { int success = 0; for (int t = 1; t <= trials; t++) { T randVal = buffer->relativeT<T>((e+1) * t); if (y != z) { // we're using external probs prob = y[(t-1) * yEWS]; } if (randVal < prob) success++; } // we need this, to eliminate excessive code branching in runtime __syncthreads(); // if trials is set to 0, effectively we just have successful memset z[e * zEWS] = (T) success; } __syncthreads(); if (trials > 0) devBuffer->rewind(zLength * trials); } #endif static inline void specialOp(Nd4jPointer state, T *x, int *xShapeBuffer, T *y, int *yShapeBuffer, T *z, int *zShapeBuffer, T *extraArguments) { int trials = (int) extraArguments[0]; Nd4jIndex zLength = shape::length(zShapeBuffer); int yEWS = shape::elementWiseStride(yShapeBuffer); int zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (zLength / _threads) + 8; nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jIndex start = span * tid; Nd4jIndex end = span * (tid + 1); if (end > zLength) end = zLength; T prob = extraArguments[1]; for (Nd4jIndex e = start; e < end; e++) { int success = 0; for (int t = 1; t <= trials; t++) { T randVal = buffer->relativeT<T>((e+1) * t); if (y != z) { // we're using external probs prob = y[(t-1) * yEWS]; } if (randVal < prob) success++; } // if trials is set to 0, effectively we just have successful memset z[e * zEWS] = (T) success; } } // update rng state if (trials > 0) buffer->rewindH(zLength * trials); } }; ////////////////////////////////////////////////////////////////////// /** * This Op produces random values within [0..N], Distribuion is binomial */ template<typename T> class BinomialDistributionEx { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, int *xShapeBuffer, T *y, int *yShapeBuffer, T *z, int *zShapeBuffer, T *extraArguments) { int trials = (int) extraArguments[0]; T prob = extraArguments[1]; __shared__ Nd4jIndex zLength; __shared__ int yEWS; __shared__ int zEWS; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = (nd4j::random::RandomBuffer *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); zLength = shape::length(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jIndex e = tid; e < zLength; e += blockDim.x * gridDim.x) { int success = 0; for (int t = 1; t <= trials; t++) { T randVal = buffer->relativeT<T>((e+1) * t); if (y != z) { // we're using external probs prob = y[e * yEWS]; } if (randVal < prob) success++; } // we need this, to eliminate excessive code branching in runtime __syncthreads(); // if trials is set to 0, effectively we just have successful memset z[e * zEWS] = (T) success; } __syncthreads(); if (trials > 0) devBuffer->rewind(zLength * trials); } #endif static inline void specialOp(Nd4jPointer state, T *x, int *xShapeBuffer, T *y, int *yShapeBuffer, T *z, int *zShapeBuffer, T *extraArguments) { int trials = (int) extraArguments[0]; Nd4jIndex zLength = shape::length(zShapeBuffer); int yEWS = shape::elementWiseStride(yShapeBuffer); int zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (zLength / _threads) + 8; nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jIndex start = span * tid; Nd4jIndex end = span * (tid + 1); if (end > zLength) end = zLength; T prob = extraArguments[1]; for (Nd4jIndex e = start; e < end; e++) { int success = 0; for (int t = 1; t <= trials; t++) { T randVal = buffer->relativeT<T>((e+1) * t); if (y != z) { // we're using external probs prob = y[e * yEWS]; } if (randVal < prob) success++; } // if trials is set to 0, effectively we just have successful memset z[e * zEWS] = (T) success; } } // update rng state if (trials > 0) buffer->rewindH(zLength * trials); } }; ////////////////////////////////////////////////////////////////////// // This Op produces random Gaussian values within [mean-2*stddev,mean+2*stddev] template<typename T> class TruncatedNormalDistribution { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, int *xShapeBuffer, T *y, int *yShapeBuffer, T *z, int *zShapeBuffer, T *extraArguments) { __shared__ T epsilon; __shared__ T two_pi; __shared__ Nd4jIndex zLength; __shared__ int zEWS; __shared__ int yEWS; __shared__ T mean; __shared__ T stddev; __shared__ int step; __shared__ T *tZ; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = (nd4j::random::RandomBuffer *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); tZ = (T *) (shmem + sizeof(nd4j::random::RandomBuffer)); zLength = shape::length(zShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); epsilon = (T) 1e-6f; two_pi = (T) 2.0 * (T) 3.14159265358979323846; mean = extraArguments[0]; stddev = extraArguments[1]; step = (blockDim.x * gridDim.x); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; int middle = zLength % 2 == 0 ? zLength / 2 : zLength / 2 + 1; T result0, result1, u0, u1, z0, z1; T ds = nd4j::math::nd4j_abs<T>(stddev) * (T) 2.0f; for (Nd4jIndex e = tid; e < middle; e += step) { // we need to get random values Nd4jIndex generation0 = 0; T realMean0 = y == z ? mean : y[e * yEWS]; T realMean1 = y == z ? mean : y[(e + middle) * yEWS]; do { T u0 = buffer->relativeT<T>(e + generation0, epsilon, (T) 1.0f); T u1 = buffer->relativeT<T>(e + middle + generation0, epsilon, (T) 1.0f); z0 = nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(u0)) * nd4j::math::nd4j_cos<T>(two_pi * u1); z1 = nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(u0)) * nd4j::math::nd4j_sin<T>(two_pi * u1); result0 = z0 * stddev + realMean0; result1 = z1 * stddev + realMean1; generation0 += zLength; } while (nd4j::math::nd4j_abs<T>(realMean0) + nd4j::math::nd4j_abs<T>(result0) > ds || nd4j::math::nd4j_abs<T>(realMean1) + nd4j::math::nd4j_abs<T>(result1) > ds); z[e*zEWS] = result0; if((e+middle) < zLength) z[(e + middle) * zEWS] = result1; } __syncthreads(); devBuffer->rewind(zLength); } #endif static inline void specialOp(Nd4jPointer state, T *x, int *xShapeBuffer, T *y, int *yShapeBuffer, T *z, int *zShapeBuffer, T *extraArguments) { const T two_pi = (T) 2.0 * 3.14159265358979323846; Nd4jIndex zLength = shape::length(zShapeBuffer); int yEWS = shape::elementWiseStride(yShapeBuffer); int zEWS = shape::elementWiseStride(zShapeBuffer); int middle = zLength % 2 == 0 ? zLength / 2 : zLength / 2 + 1; int elementsPerThread = middle / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (middle / _threads) + 8; // we're enforcing even chunks, since it's mandatory for this algorithm span -= span % 2; nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); T mean = extraArguments[0]; T stddev = extraArguments[1]; #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jIndex start = span * tid; Nd4jIndex end = span * (tid + 1); if (end > middle) { end = middle; } T z0, z1; T u0, u1; T result0, result1, lnu0; T ds = nd4j::math::nd4j_abs<T>(stddev) * (T) 2.0f; for (Nd4jIndex e = start; e < end; e++) { /* * Since box-muller transform expects non-zero u0 value, we'll just use rng with boundaries */ Nd4jIndex generation0 = 0; T realMean0 = y == z ? mean : y[e * yEWS]; T realMean1 = y == z ? mean : y[(e + middle) * yEWS]; do { u0 = buffer->relativeT<T>(e + generation0, (T) 1e-6f, (T) 1.0f); u1 = buffer->relativeT<T>((e + middle + generation0), (T) 1e-6f, (T) 1.0f); lnu0 = nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(u0)); z0 = lnu0 * nd4j::math::nd4j_cos<T>(two_pi * u1); z1 = lnu0 * nd4j::math::nd4j_sin<T>(two_pi * u1); result0 = z0 * stddev + realMean0; result1 = z1 * stddev + realMean1; generation0 += zLength; } while (nd4j::math::nd4j_abs<T>(realMean0) + nd4j::math::nd4j_abs<T>(result0) > ds || nd4j::math::nd4j_abs<T>(realMean1) + nd4j::math::nd4j_abs<T>(result1) > ds); z[e*zEWS] = result0; if((e+middle) < zLength) z[(e + middle) * zEWS] = result1; } } // update rng state buffer->rewindH(zLength); } }; ////////////////////////////////////////////////////////////////////// // This Op produces random Log-normal distribution template<typename T> class LogNormalDistribution { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, int *xShapeBuffer, T *y, int *yShapeBuffer, T *z, int *zShapeBuffer, T *extraArguments) { __shared__ T epsilon; __shared__ T two_pi; __shared__ Nd4jIndex zLength; __shared__ int zEWS; __shared__ int yEWS; __shared__ T mean; __shared__ T stddev; __shared__ int step; __shared__ T *tZ; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = (nd4j::random::RandomBuffer *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); tZ = (T *) (shmem + sizeof(nd4j::random::RandomBuffer)); zLength = shape::length(zShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); epsilon = (T) 1e-5; two_pi = (T) 2.0 * (T) 3.14159265358979323846; mean = extraArguments[0]; stddev = extraArguments[1]; step = (blockDim.x * gridDim.x); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jIndex e = tid; e < zLength; e += step) { // we need to get random values tZ[threadIdx.x] = buffer->relativeT<T>(e, epsilon, (T) 1.0f); // fix for "next rng value" if (e + 1 >= zLength && e % 2 == 0) { tZ[threadIdx.x+1] = buffer->relativeT<T>(e+1, epsilon, (T) 1.0f); } T realMean = y == z ? mean : y[e * yEWS]; __syncthreads(); if (e % 2 == 0) z[e *zEWS] = nd4j::math::nd4j_exp<T>((nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(tZ[threadIdx.x])) * nd4j::math::nd4j_cos<T>(two_pi * tZ[threadIdx.x+1])) * stddev + realMean); else z[e *zEWS] = nd4j::math::nd4j_exp<T>((nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(tZ[threadIdx.x-1])) * nd4j::math::nd4j_sin<T>(two_pi * tZ[threadIdx.x])) * stddev + realMean); __syncthreads(); } __syncthreads(); devBuffer->rewind(zLength); } #endif static inline void specialOp(Nd4jPointer state, T *x, int *xShapeBuffer, T *y, int *yShapeBuffer, T *z, int *zShapeBuffer, T *extraArguments) { const T two_pi = (T) 2.0 * 3.14159265358979323846; Nd4jIndex zLength = shape::length(zShapeBuffer); int yEWS = shape::elementWiseStride(yShapeBuffer); int zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (zLength / _threads) + 8; // we're enforcing even chunks, since it's mandatory for this algorithm span -= span % 2; nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); T mean = extraArguments[0]; T stddev = extraArguments[1]; #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jIndex start = span * tid; Nd4jIndex end = span * (tid + 1); if (end > zLength) end = zLength; T z0, z1; T u0, u1; T lnU0; bool generated = false; for (Nd4jIndex e = start; e < end; e++) { if (!generated) { /* * Since box-muller transform expects non-zero u0 value, we'll just use rng with boundaries */ u0 = buffer->relativeT<T>(e, (T) 1e-5f, (T) 1.0f); u1 = buffer->relativeT<T>((e + 1), (T) 1e-5f, (T) 1.0f); lnU0 = nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(u0)); z0 = lnU0 * nd4j::math::nd4j_cos<T>(two_pi * u1); z1 = lnU0 * nd4j::math::nd4j_sin<T>(two_pi * u1); generated = true; T realMean = y == z ? mean : y[e * yEWS]; z[e * zEWS] = nd4j::math::nd4j_exp<T>(z0 * stddev + realMean); } else { T realMean = y == z ? mean : y[e * yEWS]; z[e * zEWS] = nd4j::math::nd4j_exp<T>(z1 * stddev + realMean); generated = false; } } } // update rng state buffer->rewindH(zLength); } }; } #endif //LIBND4J_SPECIAL_RANDOM_OPS_H
GB_unop__identity_uint8_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint8_fp64) // op(A') function: GB (_unop_tran__identity_uint8_fp64) // C type: uint8_t // A type: double // cast: uint8_t cij = GB_cast_to_uint8_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint8_fp64) ( uint8_t *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint8_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
concat_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: jjzeng@openailab.com */ #include <math.h> #include "sys_port.h" #include "module.h" #include "tengine_errno.h" #include "tengine_log.h" #include "tengine_ir.h" #include "../../cpu_node_ops.h" #include "tengine_op.h" #include "concat_param.h" struct shape_dim { int dim[4]; float scale; int zero; }; struct concat_op_param { struct shape_dim* input_shape; int input_counts; int input_dim; struct shape_dim output_shape; int output_dim; int axis; float out_scale; void** input_data; }; static int ref_concat_fp32(const float** in_data, float* out_data, const struct concat_op_param* param, int num_thread) { int axis = param->axis; int concat_dim = 0; for (int ii = 0; ii < param->input_counts; ++ii) { concat_dim += param->input_shape[ii].dim[axis]; } if (concat_dim != param->output_shape.dim[axis]) { fprintf(stderr, "concant dimensions[%d] is not same output[%d]\n", concat_dim, param->output_shape.dim[axis]); return -1; } int out_size, in_size; out_size = 1; for (int ii = 0; ii < axis; ++ii) { out_size *= param->output_shape.dim[ii]; } in_size = 1; for (int ii = axis + 1; ii < param->output_dim; ++ii) { in_size *= param->input_shape[0].dim[ii]; } float* output_ptr = out_data; for (int k = 0; k < out_size; ++k) { // #pragma omp parallel for num_threads(num_thread) for (int j = 0; j < param->input_counts; ++j) { int cp_size = param->input_shape[j].dim[axis] * in_size; memcpy(output_ptr, in_data[j] + k * cp_size, cp_size * sizeof(float)); output_ptr += cp_size; } } return 0; } static int ref_concat_uint8(const uint8_t** in_data, uint8_t* out_data, const struct concat_op_param* param, int num_thread) { int axis = param->axis; int concat_dim = 0; for (int ii = 0; ii < param->input_counts; ++ii) { concat_dim += param->input_shape[ii].dim[axis]; } if (concat_dim != param->output_shape.dim[axis]) { fprintf(stderr, "concat dimensions is not same output: ( %d -- %d )\n", concat_dim, param->output_shape.dim[axis]); return -1; } int outer_size, in_size; outer_size = 1; for (int ii = 0; ii < axis; ++ii) { outer_size *= param->output_shape.dim[ii]; } in_size = 1; for (int ii = axis + 1; ii < param->output_dim; ++ii) { in_size *= param->output_shape.dim[ii]; } int output_size = 1; for (int ii = 0; ii < param->output_dim; ++ii) { output_size *= param->output_shape.dim[ii]; } uint8_t* output_ptr = out_data; float out_scale = param->output_shape.scale; uint8_t out_zero = param->output_shape.zero; for (int k = 0; k < outer_size; ++k) { for (int j = 0; j < param->input_counts; ++j) { int cp_size = param->input_shape[j].dim[axis] * in_size; float scale = param->input_shape[j].scale; uint8_t input_zero = param->input_shape[j].zero; const uint8_t* input_ptr = ( const uint8_t* )(in_data[j] + k * cp_size); if (scale == out_scale && input_zero == out_zero) { memcpy(output_ptr, input_ptr, cp_size); } else { float t_scale = scale / out_scale; for (int ii = 0; ii < cp_size; ++ii) { output_ptr[ii] = round((input_ptr[ii] - input_zero) * t_scale) + out_zero; } } output_ptr += cp_size; } } return 0; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct concat_op_param* concat_op_param = ( struct concat_op_param* )sys_malloc(sizeof(struct concat_op_param)); concat_op_param->axis = 0; concat_op_param->input_counts = 1; concat_op_param->input_dim = 1; concat_op_param->input_shape = NULL; concat_op_param->out_scale = 0.1f; concat_op_param->output_dim = 1; exec_node->ops_priv = concat_op_param; return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { sys_free(exec_node->ops_priv); return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* output_tensor; output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct concat_op_param* concat_op_param = ( struct concat_op_param* )exec_node->ops_priv; struct concat_param* concat_param = ( struct concat_param* )ir_node->op.param_mem; concat_op_param->axis = concat_param->axis; concat_op_param->input_counts = ir_node->input_num; concat_op_param->input_shape = ( struct shape_dim* )sys_malloc(sizeof(struct shape_dim) * ir_node->input_num); concat_op_param->output_dim = output_tensor->dim_num; for (int ii = 0; ii < output_tensor->dim_num; ii++) { concat_op_param->output_shape.dim[ii] = output_tensor->dims[ii]; concat_op_param->output_shape.scale = output_tensor->scale; concat_op_param->output_shape.zero = output_tensor->zero_point; } concat_op_param->input_data = ( void* )sys_malloc(sizeof(void*) * ir_node->input_num); return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor; struct ir_tensor* output_tensor; output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct concat_op_param* concat_op_param = ( struct concat_op_param* )exec_node->ops_priv; void* out_data = output_tensor->data; for (int i = 0; i < ir_node->input_num; i++) { input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[i]); int number = input_tensor->dim_num; for (int j = 0; j < number; j++) { concat_op_param->input_shape[i].dim[j] = input_tensor->dims[j]; concat_op_param->input_shape[i].scale = input_tensor->scale; concat_op_param->input_shape[i].zero = input_tensor->zero_point; } concat_op_param->input_data[i] = input_tensor->data; } if (input_tensor->data_type == TENGINE_DT_FP32) ref_concat_fp32(( const float** )concat_op_param->input_data, out_data, concat_op_param, exec_graph->num_thread); else ref_concat_uint8(( const uint8_t** )concat_op_param->input_data, out_data, concat_op_param, exec_graph->num_thread); return 0; } static int postrun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct concat_op_param* concat_op_param = ( struct concat_op_param* )exec_node->ops_priv; sys_free(concat_op_param->input_shape); sys_free(concat_op_param->input_data); return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node) { return OPS_SCORE_BEST; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = NULL, .postrun = postrun, .init_node = init_node, .release_node = release_node, .score = score}; static int reg_concat_hcl_ops(void* arg) { return register_builtin_node_ops(OP_CONCAT, &hcl_node_ops); } static int unreg_concat_hcl_ops(void* arg) { return unregister_builtin_node_ops(OP_CONCAT, &hcl_node_ops); } AUTO_REGISTER_OPS(reg_concat_hcl_ops); AUTO_UNREGISTER_OPS(unreg_concat_hcl_ops);
bigfile.c
#define _POSIX_C_SOURCE 200809L // FIXME: scandir needs this #include <stdio.h> #include <stdlib.h> #include <string.h> #include <alloca.h> #include <stdint.h> #include <stddef.h> #include <limits.h> #include <stdarg.h> #include <errno.h> #include <sys/stat.h> #include <sys/types.h> #include <sys/time.h> #include <unistd.h> #include <dirent.h> #include "bigfile.h" #include "bigfile-internal.h" #define EXT_HEADER "header" #define EXT_ATTR "attr" #define EXT_ATTR_V2 "attr-v2" #define EXT_DATA "%06X" #define FILEID_ATTR -2 #define FILEID_ATTR_V2 -3 #define FILEID_HEADER -1 #if __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) #include <stdatomic.h> static char * volatile _Atomic ERRORSTR = NULL; #else static char * volatile ERRORSTR = NULL; #endif static size_t CHUNK_BYTES = 64 * 1024 * 1024; /* Internal AttrSet API */ struct BigAttrSet { /* All members are readonly */ int dirty; char * attrbuf; size_t bufused; size_t bufsize; BigAttr * attrlist; size_t listused; size_t listsize; }; static BigAttrSet * attrset_create(void); static void attrset_free(BigAttrSet * attrset); static int attrset_read_attr_set_v1(BigAttrSet * attrset, const char * basename); static int attrset_read_attr_set_v2(BigAttrSet * attrset, const char * basename); static int attrset_write_attr_set_v2(BigAttrSet * attrset, const char * basename); static BigAttr * attrset_lookup_attr(BigAttrSet * attrset, const char * attrname); static int attrset_remove_attr(BigAttrSet * attrset, const char * attrname); static BigAttr * attrset_list_attrs(BigAttrSet * attrset, size_t * count); static int attrset_set_attr(BigAttrSet * attrset, const char * attrname, const void * data, const char * dtype, int nmemb); static int attrset_get_attr(BigAttrSet * attrset, const char * attrname, void * data, const char * dtype, int nmemb); /* Internal dtype API */ static int dtype_convert_simple(void * dst, const char * dstdtype, const void * src, const char * srcdtype, size_t nmemb); /*Check dtype is valid*/ static int dtype_isvalid(const char * dtype); /* postfix detection, 1 if postfix is a postfix of str */ static int endswith(const char * str, const char * postfix) { const char * p = str + strlen(str) - 1; const char * q = postfix + strlen(postfix) - 1; for(; p >= str && q >= postfix; p --, q--) { if(*p != *q) return 0; } return 1; } /* global settings */ int big_file_set_buffer_size(size_t bytes) { CHUNK_BYTES = bytes; return 0; } /* Error handling */ char * big_file_get_error_message() { return ERRORSTR; } void big_file_set_error_message(char * msg) { char * errorstr; if(msg != NULL) msg = _strdup(msg); #if __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) errorstr = atomic_exchange(&ERRORSTR, msg); #elif defined(__ATOMIC_SEQ_CST) errorstr = __atomic_exchange_n(&ERRORSTR, msg, __ATOMIC_SEQ_CST); #elif _OPENMP >= 201307 /* openmp 4.0 supports swap capture*/ #pragma omp capture { errorstr = ERRORSTR; ERRORSTR = msg; } #else errorstr = ERRORSTR; ERRORSTR = msg; /* really should have propagated errors over the stack! */ #warning "enable -std=c11 or -std=gnu11, or luse gcc for thread friendly error handling" #endif if(errorstr) free(errorstr); } static char * _path_join(const char * part1, const char * part2) { size_t l1 = part1?strlen(part1):0; size_t l2 = strlen(part2); char * result = malloc(l1 + l2 + 10); if(l1 == 0) { sprintf(result, "%s", part2); } else if(part1[l1 - 1] == '/') { sprintf(result, "%s%s", part1, part2); } else { sprintf(result, "%s/%s", part1, part2); } return result; } static int _big_file_path_is_block(const char * basename) { FILE * fheader = _big_file_open_a_file(basename, FILEID_HEADER, "r", 0); if(fheader == NULL) { return 0; } fclose(fheader); return 1; } void _big_file_raise(const char * msg, const char * file, const int line, ...) { if(!msg) { if(ERRORSTR) { msg = ERRORSTR; } else { msg = "UNKNOWN ERROR"; } } size_t len = strlen(msg) + strlen(file) + 512; char * errorstr = malloc(len); char * fmtstr = alloca(len); sprintf(fmtstr, "%s @(%s:%d)", msg, file, line); va_list va; va_start(va, line); vsprintf(errorstr, fmtstr, va); va_end(va); big_file_set_error_message(errorstr); free(errorstr); } /* BigFile */ int big_file_open(BigFile * bf, const char * basename) { struct stat st; RAISEIF(0 != stat(basename, &st), ex_stat, "Big File `%s' does not exist (%s)", basename, strerror(errno)); bf->basename = _strdup(basename); return 0; ex_stat: return -1; } int big_file_create(BigFile * bf, const char * basename) { bf->basename = _strdup(basename); RAISEIF(0 != _big_file_mksubdir_r(NULL, basename), ex_subdir, NULL); return 0; ex_subdir: return -1; } struct bblist { struct bblist * next; char * blockname; }; static int (filter)(const struct dirent * ent) { if(ent->d_name[0] == '.') return 0; return 1; } /* taken from glibc not always there. e.g. with c99. */ static int _alphasort (const struct dirent **a, const struct dirent **b) { return strcoll ((*a)->d_name, (*b)->d_name); } static struct bblist * listbigfile_r(const char * basename, char * blockname, struct bblist * bblist) { struct dirent **namelist; int n; int i; char * current; current = _path_join(basename, blockname); n = scandir(current, &namelist, filter, _alphasort); free(current); if (n < 0) return bblist; for(i = 0; i < n ; i ++) { char * blockname1 = _path_join(blockname, namelist[i]->d_name); char * fullpath1 = _path_join(basename, blockname1); if(_big_file_path_is_block(fullpath1)) { struct bblist * n = malloc(sizeof(struct bblist) + strlen(blockname1) + 1); n->next = bblist; n->blockname = (char*) &n[1]; strcpy(n->blockname, blockname1); bblist = n; } else { bblist = listbigfile_r(basename, blockname1, bblist); } free(fullpath1); free(blockname1); free(namelist[i]); } free(namelist); return bblist; } int big_file_list(BigFile * bf, char *** blocknames, int * Nblocks) { struct bblist * bblist = listbigfile_r(bf->basename, "", NULL); struct bblist * p; int N = 0; int i; for(p = bblist; p; p = p->next) { N ++; } *Nblocks = N; *blocknames = malloc(sizeof(char*) * N); for(i = 0; i < N; i ++) { (*blocknames)[i] = _strdup(bblist->blockname); p = bblist; bblist = bblist->next; free(p); } return 0; } int big_file_open_block(BigFile * bf, BigBlock * block, const char * blockname) { char * basename = _path_join(bf->basename, blockname); int rt = _big_block_open(block, basename); free(basename); return rt; } int big_file_create_block(BigFile * bf, BigBlock * block, const char * blockname, const char * dtype, int nmemb, int Nfile, const size_t fsize[]) { RAISEIF(0 != _big_file_mksubdir_r(bf->basename, blockname), ex_subdir, NULL); char * basename = _path_join(bf->basename, blockname); int rt = _big_block_create(block, basename, dtype, nmemb, Nfile, fsize); free(basename); return rt; ex_subdir: return -1; } int big_file_close(BigFile * bf) { free(bf->basename); bf->basename = NULL; return 0; } static void sysvsum(unsigned int * sum, void * buf, size_t size); /* Bigblock */ int _big_block_open(BigBlock * bb, const char * basename) { memset(bb, 0, sizeof(bb[0])); if(basename == NULL) basename = "/."; bb->basename = _strdup(basename); bb->dirty = 0; bb->attrset = attrset_create(); /* COMPATIBLE WITH V1 ATTR FILES */ RAISEIF(0 != attrset_read_attr_set_v1(bb->attrset, bb->basename), ex_readattr, NULL); RAISEIF(0 != attrset_read_attr_set_v2(bb->attrset, bb->basename), ex_readattr, NULL); if (!endswith(bb->basename, "/.") && 0 != strcmp(bb->basename, ".")) { FILE * fheader = _big_file_open_a_file(bb->basename, FILEID_HEADER, "r", 1); RAISEIF (!fheader, ex_open, NULL); RAISEIF( (1 != fscanf(fheader, " DTYPE: %s", bb->dtype)) || (1 != fscanf(fheader, " NMEMB: %d", &(bb->nmemb))) || (1 != fscanf(fheader, " NFILE: %d", &(bb->Nfile))), ex_fscanf, "Failed to read header of block `%s' (%s)", bb->basename, strerror(errno)); RAISEIF(bb->Nfile < 0 || bb->Nfile >= INT_MAX-1, ex_fscanf, "Unreasonable value for Nfile in header of block `%s' (%d)",bb->basename,bb->Nfile); RAISEIF(bb->nmemb < 0, ex_fscanf, "Unreasonable value for nmemb in header of block `%s' (%d)",bb->basename,bb->nmemb); RAISEIF(!dtype_isvalid(bb->dtype), ex_fscanf, "Unreasonable value for dtype in header of block `%s' (%s)",bb->basename,bb->dtype); bb->fsize = calloc(bb->Nfile + 1, sizeof(size_t)); RAISEIF(!bb->fsize, ex_fsize, "Failed to alloc memory of block `%s'", bb->basename); bb->foffset = calloc(bb->Nfile + 1, sizeof(size_t)); RAISEIF(!bb->foffset, ex_foffset, "Failed to alloc memory of block `%s'", bb->basename); bb->fchecksum = calloc(bb->Nfile + 1, sizeof(int)); RAISEIF(!bb->fchecksum, ex_fchecksum, "Failed to alloc memory `%s'", bb->basename); int i; for(i = 0; i < bb->Nfile; i ++) { int fid; size_t size; unsigned int cksum; unsigned int sysv; RAISEIF(4 != fscanf(fheader, " " EXT_DATA ": %td : %u : %u", &fid, &size, &cksum, &sysv), ex_fscanf1, "Failed to readin physical file layout `%s' %d (%s)", bb->basename, fid, strerror(errno)); RAISEIF(fid < 0 || fid >= bb->Nfile, ex_fscanf1, "Non-existent file referenced: `%s' (%d)", bb->basename, fid); bb->fsize[fid] = size; bb->fchecksum[fid] = cksum; } bb->foffset[0] = 0; for(i = 0; i < bb->Nfile; i ++) { bb->foffset[i + 1] = bb->foffset[i] + bb->fsize[i]; } bb->size = bb->foffset[bb->Nfile]; fclose(fheader); return 0; ex_fscanf1: free(bb->fchecksum); ex_fchecksum: free(bb->foffset); ex_foffset: free(bb->fsize); ex_fsize: ex_fscanf: fclose(fheader); ex_open: return -1; } else { /* The meta block of a big file, has on extra files. */ bb->Nfile = 0; strcpy(bb->dtype, "####"); return 0; } ex_readattr: return -1; } int _big_block_grow_internal(BigBlock * bb, int Nfile_grow, const size_t fsize_grow[]) { int Nfile = Nfile_grow + bb->Nfile; size_t * fsize = calloc(Nfile + 1, sizeof(size_t)); size_t * foffset = calloc(Nfile + 1, sizeof(size_t)); unsigned int * fchecksum = calloc(Nfile + 1, sizeof(unsigned int)); int i; for(i = 0; i < bb->Nfile; i ++) { fsize[i] = bb->fsize[i]; fchecksum[i] = bb->fchecksum[i]; } for(i = bb->Nfile; i < Nfile; i ++) { fsize[i] = fsize_grow[i - bb->Nfile]; fchecksum[i] = 0; } foffset[0] = 0; for(i = 0; i < Nfile; i ++) { foffset[i + 1] = foffset[i] + fsize[i]; } free(bb->fsize); free(bb->foffset); free(bb->fchecksum); bb->fsize = fsize; bb->foffset = foffset; bb->fchecksum = fchecksum; bb->Nfile = Nfile; bb->size = bb->foffset[Nfile]; bb->dirty = 1; return 0; } int big_block_grow(BigBlock * bb, int Nfile_grow, const size_t fsize_grow[]) { int oldNfile = bb->Nfile; _big_block_grow_internal(bb, Nfile_grow, fsize_grow); int i; /* now truncate the new files */ for(i = oldNfile; i < bb->Nfile; i ++) { FILE * fp = _big_file_open_a_file(bb->basename, i, "w", 1); RAISEIF(fp == NULL, ex_fileio, NULL); fclose(fp); } return 0; ex_fileio: return -1; } int _big_block_create_internal(BigBlock * bb, const char * basename, const char * dtype, int nmemb, int Nfile, const size_t fsize[]) { memset(bb, 0, sizeof(bb[0])); if(basename == NULL) basename = "/."; RAISEIF ( strchr(basename, ' ') || strchr(basename, '\t') || strchr(basename, '\n'), ex_name, "Column name cannot contain blanks (space, tab or newline)" ); bb->basename = _strdup(basename); bb->attrset = attrset_create(); bb->attrset->dirty = 1; if (!endswith(bb->basename, "/.") && 0 != strcmp(bb->basename, ".")) { if(dtype == NULL) { dtype = "i8"; Nfile = 0; fsize = NULL; } /* always use normalized dtype in files. */ _dtype_normalize(bb->dtype, dtype); bb->Nfile = Nfile; bb->nmemb = nmemb; bb->fsize = calloc(bb->Nfile + 1, sizeof(size_t)); RAISEIF(!bb->fsize, ex_fsize, "No memory"); bb->fchecksum = calloc(bb->Nfile + 1, sizeof(int)); RAISEIF(!bb->fchecksum, ex_fchecksum, "No memory"); bb->foffset = calloc(bb->Nfile + 1, sizeof(size_t)); RAISEIF(!bb->foffset, ex_foffset, "No memory"); int i; bb->foffset[0] = 0; for(i = 0; i < bb->Nfile; i ++) { bb->fsize[i] = fsize[i]; bb->foffset[i + 1] = bb->foffset[i] + bb->fsize[i]; bb->fchecksum[i] = 0; } bb->size = bb->foffset[bb->Nfile]; bb->dirty = 1; RAISEIF(0 != big_block_flush(bb), ex_flush, NULL); bb->dirty = 0; return 0; ex_flush: attrset_free(bb->attrset); free(bb->foffset); ex_foffset: free(bb->fchecksum); ex_fchecksum: free(bb->fsize); ex_fsize: return -1; } else { /* special meta block of a file */ bb->Nfile = 0; /* never flush the header */ bb->dirty = 0; RAISEIF(0 != big_block_flush(bb), ex_flush2, NULL); bb->dirty = 0; return 0; ex_flush2: attrset_free(bb->attrset); return -1; } ex_name: return -1; } int _big_block_create(BigBlock * bb, const char * basename, const char * dtype, int nmemb, int Nfile, const size_t fsize[]) { int rt = _big_block_create_internal(bb, basename, dtype, nmemb, Nfile, fsize); int i; RAISEIF(rt != 0, ex_internal, NULL); /* now truncate all files */ for(i = 0; i < bb->Nfile; i ++) { FILE * fp = _big_file_open_a_file(bb->basename, i, "w", 1); RAISEIF(fp == NULL, ex_fileio, NULL); fclose(fp); } ex_internal: return rt; ex_fileio: _big_block_close_internal(bb); return -1; } void big_block_set_dirty(BigBlock * block, int value) { block->dirty = value; } int big_block_flush(BigBlock * block) { FILE * fheader = NULL; if(block->dirty) { int i; fheader = _big_file_open_a_file(block->basename, FILEID_HEADER, "w+", 1); RAISEIF(fheader == NULL, ex_fileio, NULL); RAISEIF( (0 > fprintf(fheader, "DTYPE: %s\n", block->dtype)) || (0 > fprintf(fheader, "NMEMB: %d\n", block->nmemb)) || (0 > fprintf(fheader, "NFILE: %d\n", block->Nfile)), ex_fprintf, "Writing file header"); for(i = 0; i < block->Nfile; i ++) { unsigned int s = block->fchecksum[i]; unsigned int r = (s & 0xffff) + ((s & 0xffffffff) >> 16); unsigned int checksum = (r & 0xffff) + (r >> 16); RAISEIF(0 > fprintf(fheader, EXT_DATA ": %td : %u : %u\n", i, block->fsize[i], block->fchecksum[i], checksum), ex_fprintf, "Writing file information to header"); } fclose(fheader); block->dirty = 0; } if(block->attrset->dirty) { RAISEIF(0 != attrset_write_attr_set_v2(block->attrset, block->basename), ex_write_attr, NULL); block->attrset->dirty = 0; } return 0; ex_fprintf: fclose(fheader); ex_write_attr: ex_fileio: return -1; } void _big_block_close_internal(BigBlock * block) { attrset_free(block->attrset); free(block->basename); free(block->fchecksum); free(block->fsize); free(block->foffset); memset(block, 0, sizeof(BigBlock)); } int big_block_close(BigBlock * block) { int rt = 0; RAISEIF(0 != big_block_flush(block), ex_flush, NULL); finalize: _big_block_close_internal(block); return rt; ex_flush: rt = -1; goto finalize; } BigAttr * big_block_lookup_attr(BigBlock * block, const char * attrname) { BigAttr * attr = attrset_lookup_attr(block->attrset, attrname); return attr; } int big_block_remove_attr(BigBlock * block, const char * attrname) { return attrset_remove_attr(block->attrset, attrname); } BigAttr * big_block_list_attrs(BigBlock * block, size_t * count) { return attrset_list_attrs(block->attrset, count); } int big_block_set_attr(BigBlock * block, const char * attrname, const void * data, const char * dtype, int nmemb) { return attrset_set_attr(block->attrset, attrname, data, dtype, nmemb); } int big_block_get_attr(BigBlock * block, const char * attrname, void * data, const char * dtype, int nmemb) { return attrset_get_attr(block->attrset, attrname, data, dtype, nmemb); } /* * * seek ptr to offset, on bb * * offset: 0 : Start * -1 : End * -2 : End - 1 * * returns 0 * * 0 4 5 10 140 * */ int big_block_seek(BigBlock * bb, BigBlockPtr * ptr, ptrdiff_t offset) { /* handle 0 sized files */ if(bb->size == 0 && offset == 0) { ptr->fileid = 0; ptr->roffset = 0; ptr->aoffset = 0; return 0; } /* handle negatives */ if(offset < 0) offset += bb->foffset[bb->Nfile]; RAISEIF(offset > bb->size, ex_eof, /* over the end of file */ /* note that we allow seeking at the end of file */ "Over the end of file %td of %td", offset, bb->size); { int left = 0; int right = bb->Nfile; while(right > left + 1) { int mid = ((right - left) >> 1) + left; if(bb->foffset[mid] <= offset) { left = mid; } else { right = mid; } } ptr->fileid = left; ptr->roffset = offset - bb->foffset[left]; ptr->aoffset = offset; return 0; } ex_eof: return -1; } int big_block_seek_rel(BigBlock * bb, BigBlockPtr * ptr, ptrdiff_t rel) { ptrdiff_t abs = bb->foffset[ptr->fileid] + ptr->roffset + rel; return big_block_seek(bb, ptr, abs); } int big_block_eof(BigBlock * bb, BigBlockPtr * ptr) { ptrdiff_t abs = bb->foffset[ptr->fileid] + ptr->roffset; return abs >= bb->size; } /* * this function will alloc memory in array and read from offset start * size of rows from the block. * free(array->data) after using it. * * at most size rows are read, array.dims[0] has the number that's read. * * if dtype is NULL use the dtype of the block. * otherwise cast the array to the dtype * */ int big_block_read_simple(BigBlock * bb, ptrdiff_t start, ptrdiff_t size, BigArray * array, const char * dtype) { BigBlockPtr ptr = {0}; if(dtype == NULL) { dtype = bb->dtype; } void * buffer; size_t dims[2]; RAISEIF(0 != big_block_seek(bb, &ptr, start), ex_seek, "failed to seek" ); if(start + size > bb->size){ size = bb->size - start; } RAISEIF(size < 0, ex_seek, "failed to seek"); buffer = malloc(size * big_file_dtype_itemsize(dtype) * bb->nmemb); dims[0] = size; dims[1] = bb->nmemb; big_array_init(array, buffer, dtype, 2, dims, NULL); RAISEIF(0 != big_block_read(bb, &ptr, array), ex_read, "failed to read"); return 0; ex_read: free(buffer); array->data = NULL; ex_seek: return -1; } int big_block_read(BigBlock * bb, BigBlockPtr * ptr, BigArray * array) { char * chunkbuf = malloc(CHUNK_BYTES); int nmemb = bb->nmemb ? bb->nmemb : 1; int felsize = big_file_dtype_itemsize(bb->dtype) * nmemb; size_t CHUNK_SIZE = CHUNK_BYTES / felsize; BigArray chunk_array = {0}; size_t dims[2]; dims[0] = CHUNK_SIZE; dims[1] = bb->nmemb; BigArrayIter chunk_iter; BigArrayIter array_iter; FILE * fp = NULL; ptrdiff_t toread = 0; RAISEIF(chunkbuf == NULL, ex_malloc, "Not enough memory for chunkbuf"); big_array_init(&chunk_array, chunkbuf, bb->dtype, 2, dims, NULL); big_array_iter_init(&array_iter, array); toread = array->size / nmemb; ptrdiff_t abs = bb->foffset[ptr->fileid] + ptr->roffset + toread; RAISEIF(abs > bb->size, ex_eof, "Reading beyond the block `%s` at (%d:%td)", bb->basename, ptr->fileid, ptr->roffset * felsize); while(toread > 0 && ! big_block_eof(bb, ptr)) { size_t chunk_size = CHUNK_SIZE; /* remaining items in the file */ if(chunk_size > bb->fsize[ptr->fileid] - ptr->roffset) { chunk_size = bb->fsize[ptr->fileid] - ptr->roffset; } /* remaining items to read */ if(chunk_size > toread) { chunk_size = toread; } RAISEIF(chunk_size == 0, ex_insuf, "Insufficient number of items in file `%s' at (%d:%td)", bb->basename, ptr->fileid, ptr->roffset * felsize); /* read to the beginning of chunk */ big_array_iter_init(&chunk_iter, &chunk_array); fp = _big_file_open_a_file(bb->basename, ptr->fileid, "r", 1); RAISEIF(fp == NULL, ex_open, NULL); RAISEIF(0 > fseek(fp, ptr->roffset * felsize, SEEK_SET), ex_seek, "Failed to seek in block `%s' at (%d:%td) (%s)", bb->basename, ptr->fileid, ptr->roffset * felsize, strerror(errno)); RAISEIF(chunk_size != fread(chunkbuf, felsize, chunk_size, fp), ex_read, "Failed to read in block `%s' at (%d:%td) (%s)", bb->basename, ptr->fileid, ptr->roffset * felsize, strerror(errno)); fclose(fp); fp = NULL; /* now translate the data from chunkbuf to mptr */ RAISEIF(0 != _dtype_convert(&array_iter, &chunk_iter, chunk_size * bb->nmemb), ex_convert, NULL); toread -= chunk_size; RAISEIF(0 != big_block_seek_rel(bb, ptr, chunk_size), ex_blockseek, NULL); } free(chunkbuf); return 0; ex_read: ex_seek: fclose(fp); ex_insuf: ex_convert: ex_blockseek: ex_open: ex_eof: free(chunkbuf); ex_malloc: return -1; } int big_block_write(BigBlock * bb, BigBlockPtr * ptr, BigArray * array) { if(array->size == 0) return 0; /* the file header is modified */ bb->dirty = 1; char * chunkbuf = malloc(CHUNK_BYTES); int nmemb = bb->nmemb ? bb->nmemb : 1; int felsize = big_file_dtype_itemsize(bb->dtype) * nmemb; size_t CHUNK_SIZE = CHUNK_BYTES / felsize; BigArray chunk_array = {0}; size_t dims[2]; dims[0] = CHUNK_SIZE; dims[1] = bb->nmemb; BigArrayIter chunk_iter; BigArrayIter array_iter; ptrdiff_t towrite = 0; FILE * fp; RAISEIF(chunkbuf == NULL, ex_malloc, "not enough memory for chunkbuf of size %d bytes", CHUNK_BYTES); big_array_init(&chunk_array, chunkbuf, bb->dtype, 2, dims, NULL); big_array_iter_init(&array_iter, array); towrite = array->size / nmemb; ptrdiff_t abs = bb->foffset[ptr->fileid] + ptr->roffset + towrite; RAISEIF(abs > bb->size, ex_eof, "Writing beyond the block `%s` at (%d:%td)", bb->basename, ptr->fileid, ptr->roffset * felsize); while(towrite > 0 && ! big_block_eof(bb, ptr)) { size_t chunk_size = CHUNK_SIZE; /* remaining items in the file */ if(chunk_size > bb->fsize[ptr->fileid] - ptr->roffset) { chunk_size = bb->fsize[ptr->fileid] - ptr->roffset; } /* remaining items to read */ if(chunk_size > towrite) { chunk_size = towrite; } /* write from the beginning of chunk */ big_array_iter_init(&chunk_iter, &chunk_array); /* now translate the data to format in the file*/ RAISEIF(0 != _dtype_convert(&chunk_iter, &array_iter, chunk_size * bb->nmemb), ex_convert, NULL); sysvsum(&bb->fchecksum[ptr->fileid], chunkbuf, chunk_size * felsize); fp = _big_file_open_a_file(bb->basename, ptr->fileid, "r+", 1); RAISEIF(fp == NULL, ex_open, NULL); RAISEIF(0 > fseek(fp, ptr->roffset * felsize, SEEK_SET), ex_seek, "Failed to seek in block `%s' at (%d:%td) (%s)", bb->basename, ptr->fileid, ptr->roffset * felsize, strerror(errno)); RAISEIF(chunk_size != fwrite(chunkbuf, felsize, chunk_size, fp), ex_write, "Failed to write in block `%s' at (%d:%td) (%s)", bb->basename, ptr->fileid, ptr->roffset * felsize, strerror(errno)); fclose(fp); towrite -= chunk_size; RAISEIF(0 != big_block_seek_rel(bb, ptr, chunk_size), ex_blockseek, NULL); } free(chunkbuf); return 0; ex_write: ex_seek: fclose(fp); ex_convert: ex_open: ex_blockseek: ex_eof: free(chunkbuf); ex_malloc: return -1; } /** * dtype stuff * */ #define MACHINE_ENDIANNESS MACHINE_ENDIAN_F() static char MACHINE_ENDIAN_F(void) { uint32_t i = 0x01234567; return ((*((uint8_t*)(&i))) == 0x67)?'<':'>'; } int _dtype_normalize(char * dst, const char * src) { /* normalize a dtype, so that * dst[0] is the endian-ness * dst[1] is the type kind char * dtype[2:] is the width * */ memset(dst, 0, 8); switch(src[0]) { case '<': case '>': case '|': case '=': strncpy(dst, src, 8); break; default: dst[0] = '='; strncpy(dst + 1, src, 7); } dst[7]='\0'; if(dst[0] == '=') { dst[0] = MACHINE_ENDIANNESS; } if(dst[0] == '|') { dst[0] = MACHINE_ENDIANNESS; } return 0; } /*Check that the passed dtype is valid. * Returns 1 if valid, 0 if invalid*/ static int dtype_isvalid(const char * dtype) { if(!dtype) return 0; switch(dtype[0]) { case '<': case '>': case '|': case '=': break; default: return 0; } switch(dtype[1]) { case 'S': case 'b': case 'i': case 'f': case 'u': case 'c': break; default: return 0; } int width = atoi(&dtype[2]); if(width > 16 || width <= 0) return 0; return 1; } int big_file_dtype_itemsize(const char * dtype) { char ndtype[8]; _dtype_normalize(ndtype, dtype); return atoi(&ndtype[2]); } int big_file_dtype_kind(const char * dtype) { char ndtype[8]; _dtype_normalize(ndtype, dtype); return ndtype[1]; } int big_array_init(BigArray * array, void * buf, const char * dtype, int ndim, const size_t dims[], const ptrdiff_t strides[]) { memset(array, 0, sizeof(array[0])); _dtype_normalize(array->dtype, dtype); array->data = buf; array->ndim = ndim; int i; memset(array->dims, 0, sizeof(ptrdiff_t) * 32); memset(array->strides, 0, sizeof(ptrdiff_t) * 32); array->size = 1; for(i = 0; i < ndim; i ++) { array->dims[i] = dims[i]; array->size *= dims[i]; } if(strides != NULL) { for(i = 0; i < ndim; i ++) { array->strides[i] = strides[i]; } } else { array->strides[ndim - 1] = big_file_dtype_itemsize(dtype); for(i = ndim - 2; i >= 0; i --) { array->strides[i] = array->strides[i + 1] * array->dims[i + 1]; } } return 0; } int big_array_iter_init(BigArrayIter * iter, BigArray * array) { memset(iter, 0, sizeof(iter[0])); iter->array = array; memset(iter->pos, 0, sizeof(ptrdiff_t) * 32); iter->dataptr = array->data; /* see if the iter is contiguous */ size_t elsize = big_file_dtype_itemsize(array->dtype); int i = 0; ptrdiff_t stride_contiguous = elsize; iter->contiguous = 1; for(i = array->ndim - 1; i >= 0; i --) { if(array->strides[i] != stride_contiguous) { iter->contiguous = 0; break; } stride_contiguous *= array->dims[i]; } return 0; } void big_array_iter_advance(BigArrayIter * iter) { BigArray * array = iter->array; if(iter->contiguous) { iter->dataptr = (char*) iter->dataptr + array->strides[array->ndim - 1]; return; } int k; iter->pos[array->ndim - 1] ++; iter->dataptr = ((char*) iter->dataptr) + array->strides[array->ndim - 1]; for(k = array->ndim - 1; k >= 0; k --) { if(iter->pos[k] == array->dims[k]) { iter->dataptr = ((char*) iter->dataptr) - array->strides[k] * iter->pos[k]; iter->pos[k] = 0; if(k > 0) { iter->pos[k - 1] ++; iter->dataptr = ((char*) iter->dataptr) + array->strides[k - 1]; } } else { break; } } } typedef struct {double r; double i;} cplx128_t; typedef struct {float r; float i;} cplx64_t; typedef union { char *a1; char *b1; int64_t *i8; uint64_t *u8; double *f8; int32_t *i4; uint32_t *u4; float *f4; cplx128_t * c16; cplx64_t * c8; void * v; } variant_t; /* format data in dtype to a string in buffer */ void big_file_dtype_format(char * buffer, const char * dtype, const void * data, const char * fmt) { char ndtype[8]; char ndtype2[8]; variant_t p; /* handle the endianness stuff in case it is not machine */ char converted[128]; _dtype_normalize(ndtype2, dtype); ndtype2[0] = '='; _dtype_normalize(ndtype, ndtype2); dtype_convert_simple(converted, ndtype, data, dtype, 1); p.v = converted; #define FORMAT1(dtype, defaultfmt) \ if(0 == strcmp(ndtype + 1, # dtype)) { \ if(fmt == NULL) fmt = defaultfmt; \ sprintf(buffer, fmt, *p.dtype); \ } else #define FORMAT2(dtype, defaultfmt) \ if(0 == strcmp(ndtype + 1, # dtype)) { \ if(fmt == NULL) fmt = defaultfmt; \ sprintf(buffer, fmt, p.dtype->r, p.dtype->i); \ } else FORMAT1(a1, "%c") FORMAT1(b1, "%d") FORMAT1(i8, "%ld") FORMAT1(i4, "%d") FORMAT1(u8, "%lu") FORMAT1(u4, "%u") FORMAT1(f8, "%g") FORMAT1(f4, "%g") FORMAT2(c8, "%g+%gI") FORMAT2(c16, "%g+%gI") { sprintf(buffer, "<%s>", ndtype); } } /* parse data in dtype to a string in buffer */ int big_file_dtype_parse(const char * buffer, const char * dtype, void * data, const char * fmt) { char ndtype[8]; char ndtype2[8]; variant_t p; /* handle the endianness stuff in case it is not machine */ char converted[128]; _dtype_normalize(ndtype2, dtype); ndtype2[0] = '='; _dtype_normalize(ndtype, ndtype2); p.v = converted; #define PARSE1(dtype, defaultfmt) \ if(0 == strcmp(ndtype + 1, # dtype)) { \ if(fmt == NULL) fmt = defaultfmt; \ sscanf(buffer, fmt, p.dtype); \ } #define PARSE2(dtype, defaultfmt) \ if(0 == strcmp(ndtype + 1, # dtype)) { \ if(fmt == NULL) fmt = defaultfmt; \ sscanf(buffer, fmt, &p.dtype->r, &p.dtype->i); \ } PARSE1(a1, "%c") else PARSE1(i8, "%ld") else PARSE1(i4, "%d") else PARSE1(u8, "%lu") else PARSE1(u4, "%u") else PARSE1(f8, "%lf") else PARSE1(f4, "%f") else PARSE2(c8, "%f + %f I") else PARSE2(c16, "%lf + %lf I") else return -1; dtype_convert_simple(data, dtype, converted, ndtype, 1); return 0; } static int dtype_convert_simple(void * dst, const char * dstdtype, const void * src, const char * srcdtype, size_t nmemb) { BigArray dst_array, src_array; BigArrayIter dst_iter, src_iter; big_array_init(&dst_array, dst, dstdtype, 1, &nmemb, NULL); big_array_init(&src_array, (void*) src, srcdtype, 1, &nmemb, NULL); big_array_iter_init(&dst_iter, &dst_array); big_array_iter_init(&src_iter, &src_array); return _dtype_convert(&dst_iter, &src_iter, nmemb); } static int cast(BigArrayIter * dst, BigArrayIter * src, size_t nmemb); static void byte_swap(BigArrayIter * array, size_t nmemb); int _dtype_convert(BigArrayIter * dst, BigArrayIter * src, size_t nmemb) { /* cast buf2 of dtype2 into buf1 of dtype1 */ /* match src to machine endianness */ if(src->array->dtype[0] != MACHINE_ENDIANNESS) { BigArrayIter iter = *src; byte_swap(&iter, nmemb); } BigArrayIter iter1 = *dst; BigArrayIter iter2 = *src; if(0 != cast(&iter1, &iter2, nmemb)) { /* cast is not supported */ return -1; } /* match dst to machine endianness */ if(dst->array->dtype[0] != MACHINE_ENDIANNESS) { BigArrayIter iter = *dst; byte_swap(&iter, nmemb); } *dst = iter1; *src = iter2; return 0; } static void byte_swap(BigArrayIter * iter, size_t nmemb) { /* swap a buffer in-place */ int elsize = big_file_dtype_itemsize(iter->array->dtype); if(elsize == 1) return; /* need byte swap; do it now on buf2 */ /* XXX: this may still be wrong. */ ptrdiff_t i; int half = elsize >> 1; for(i = 0; i < nmemb; i ++) { int j; char * ptr = iter->dataptr; for(j = 0; j < half; j ++) { char tmp = ptr[j]; ptr[j] = ptr[elsize - j - 1]; ptr[elsize - j - 1] = tmp; } big_array_iter_advance(iter); } } #define CAST(d1, t1, d2, t2) \ if((0 == strcmp(d1, dst->array->dtype + 1)) && (0 == strcmp(d2, src->array->dtype + 1))) { \ for(i = 0; i < nmemb; i ++) { \ t1 * p1 = dst->dataptr; t2 * p2 = src->dataptr; \ * p1 = * p2; \ big_array_iter_advance(dst); big_array_iter_advance(src); \ } \ return 0; \ } #define CAST2(d1, t1, d2, t2) \ if((0 == strcmp(d1, dst->array->dtype + 1)) && (0 == strcmp(d2, src->array->dtype + 1))) { \ for(i = 0; i < nmemb; i ++) { \ t1 * p1 = dst->dataptr; t2 * p2 = src->dataptr; \ p1->r = p2->r; p1->i = p2->i; \ big_array_iter_advance(dst); big_array_iter_advance(src); \ } \ return 0; \ } static int cast(BigArrayIter * dst, BigArrayIter * src, size_t nmemb) { /* doing cast assuming native byte order */ /* convert buf2 to buf1, both are native; * dtype has no endian-ness prefix * */ ptrdiff_t i; /* same type, no need for casting. */ if (0 == strcmp(dst->array->dtype + 1, src->array->dtype + 1)) { if(dst->contiguous && src->contiguous) { /* directly copy of memory chunks; FIXME: use memmove? */ memcpy(dst->dataptr, src->dataptr, nmemb * dst->array->strides[dst->array->ndim-1]); dst->dataptr = (char*) dst->dataptr + nmemb * dst->array->strides[dst->array->ndim - 1]; src->dataptr = (char*) src->dataptr + nmemb * src->array->strides[src->array->ndim - 1]; return 0; } else { /* copy one by one, discontinuous */ size_t elsize = big_file_dtype_itemsize(dst->array->dtype); for(i = 0; i < nmemb; i ++) { void * p1 = dst->dataptr; void * p2 = src->dataptr; memcpy(p1, p2, elsize); big_array_iter_advance(dst); big_array_iter_advance(src); } return 0; } } if(0 == strcmp(dst->array->dtype + 1, "i8")) { CAST("i8", int64_t, "i4", int32_t); CAST("i8", int64_t, "u4", uint32_t); CAST("i8", int64_t, "u8", uint64_t); CAST("i8", int64_t, "f8", double); CAST("i8", int64_t, "f4", float); CAST("i8", int64_t, "b1", char); } else if(0 == strcmp(dst->array->dtype + 1, "u8")) { CAST("u8", uint64_t, "u4", uint32_t); CAST("u8", uint64_t, "i4", int32_t); CAST("u8", uint64_t, "i8", int64_t); CAST("u8", uint64_t, "f8", double); CAST("u8", uint64_t, "f4", float); CAST("u8", uint64_t, "b1", char); } else if(0 == strcmp(dst->array->dtype + 1, "f8")) { CAST("f8", double, "f4", float); CAST("f8", double, "i4", int32_t); CAST("f8", double, "i8", int64_t); CAST("f8", double, "u4", uint32_t); CAST("f8", double, "u8", uint64_t); CAST("f8", double, "b1", char); } else if(0 == strcmp(dst->array->dtype + 1, "i4")) { CAST("i4", int32_t, "i8", int64_t); CAST("i4", int32_t, "u4", uint32_t); CAST("i4", int32_t, "u8", uint64_t); CAST("i4", int32_t, "f8", double); CAST("i4", int32_t, "f4", float); CAST("i4", int32_t, "b1", char); } else if(0 == strcmp(dst->array->dtype + 1, "u4")) { CAST("u4", uint32_t, "u8", uint64_t); CAST("u4", uint32_t, "i4", int32_t); CAST("u4", uint32_t, "i8", int64_t); CAST("u4", uint32_t, "f8", double); CAST("u4", uint32_t, "f4", float); CAST("u4", uint32_t, "b1", char); } else if(0 == strcmp(dst->array->dtype + 1, "f4")) { CAST("f4", float, "f8", double); CAST("f4", float, "i4", int32_t); CAST("f4", float, "i8", int64_t); CAST("f4", float, "u4", uint32_t); CAST("f4", float, "u8", uint64_t); CAST("f4", float, "b1", char); } else if(0 == strcmp(dst->array->dtype + 1, "c8")) { CAST2("c8", cplx64_t, "c16", cplx128_t); } else if(0 == strcmp(dst->array->dtype + 1, "c16")) { CAST2("c16", cplx128_t, "c8", cplx64_t); } RAISE(ex, "Unsupported conversion from %s to %s. ", src->array->dtype, dst->array->dtype); ex: return -1; } static void sysvsum(unsigned int * sum, void * buf, size_t size) { unsigned int thisrun = *sum; unsigned char * cp = buf; while(size --) thisrun += *(cp++); *sum = thisrun; } /* * Internal API for AttrSet objects; * */ static int attrset_read_attr_set_v1(BigAttrSet * attrset, const char * basename) { attrset->dirty = 0; FILE * fattr = _big_file_open_a_file(basename, FILEID_ATTR, "r", 0); if(fattr == NULL) { return 0; } int nmemb; int lname; char dtype[9]={0}; char * data; char * name; while(!feof(fattr)) { if(1 != fread(&nmemb, sizeof(int), 1, fattr)) break; RAISEIF( (1 != fread(&lname, sizeof(int), 1, fattr)) || (1 != fread(&dtype, 8, 1, fattr)) || (!dtype_isvalid(dtype)), ex_fread, "Failed to read from file" ) int ldata = big_file_dtype_itemsize(dtype) * nmemb; data = alloca(ldata); name = alloca(lname + 1); RAISEIF( (1 != fread(name, lname, 1, fattr)) || (1 != fread(data, ldata, 1, fattr)), ex_fread, "Failed to read from file"); name[lname] = 0; RAISEIF(0 != attrset_set_attr(attrset, name, data, dtype, nmemb), ex_set_attr, NULL); } attrset->dirty = 0; fclose(fattr); return 0; ex_set_attr: ex_fread: attrset->dirty = 0; fclose(fattr); return -1; } static int _isblank(int ch) { return ch == ' ' || ch == '\t'; } static int attrset_read_attr_set_v2(BigAttrSet * attrset, const char * basename) { attrset->dirty = 0; FILE * fattr = _big_file_open_a_file(basename, FILEID_ATTR_V2, "r", 0); if(fattr == NULL) { return 0; } fseek(fattr, 0, SEEK_END); long size = ftell(fattr); /*ftell may fail*/ RAISEIF(size < 0, ex_init, "ftell error: %s",strerror(errno)); char * buffer = (char*) malloc(size + 1); RAISEIF(!buffer, ex_init, "Could not allocate memory for buffer: %ld bytes",size+1); unsigned char * data = (unsigned char * ) malloc(size + 1); RAISEIF(!data, ex_data, "Could not allocate memory for data: %ld bytes",size+1); fseek(fattr, 0, SEEK_SET); RAISEIF(size != fread(buffer, 1, size, fattr), ex_read_file, "Failed to read attribute file\n"); buffer[size] = 0; /* now parse the v2 attr file.*/ long i = 0; #define ATTRV2_EXPECT(variable) while(_isblank(buffer[i])) i++; \ char * variable = buffer + i; \ while(!_isblank(buffer[i])) i++; buffer[i] = 0; i++; while(buffer[i]) { ATTRV2_EXPECT(name); ATTRV2_EXPECT(dtype); ATTRV2_EXPECT(rawlength); ATTRV2_EXPECT(rawdata); /* skip the reset of the line */ while(buffer[i] != '\n' && buffer[i]) i ++; if(buffer[i] == '\n') i++; int nmemb = atoi(rawlength); int itemsize = big_file_dtype_itemsize(dtype); RAISEIF(nmemb * itemsize * 2!= strlen(rawdata), ex_parse_attr, "NMEMB and data mismiatch: %d x %d (%s) * 2 != %d", nmemb, itemsize, dtype, strlen(rawdata)); int j, k; for(k = 0, j = 0; k < nmemb * itemsize; k ++, j += 2) { char buf[3]; buf[0] = rawdata[j]; buf[1] = rawdata[j+1]; buf[2] = 0; unsigned int byte = strtoll(buf, NULL, 16); data[k] = byte; } RAISEIF(0 != attrset_set_attr(attrset, name, data, dtype, nmemb), ex_set_attr, NULL); } fclose(fattr); free(data); free(buffer); attrset->dirty = 0; return 0; ex_read_file: ex_parse_attr: ex_set_attr: attrset->dirty = 0; free(data); ex_data: free(buffer); ex_init: fclose(fattr); return -1; } static int attrset_write_attr_set_v2(BigAttrSet * attrset, const char * basename) { static char conv[] = "0123456789ABCDEF"; attrset->dirty = 0; FILE * fattr = _big_file_open_a_file(basename, FILEID_ATTR_V2, "w", 1); RAISEIF(fattr == NULL, ex_open, NULL); ptrdiff_t i; for(i = 0; i < attrset->listused; i ++) { BigAttr * a = & attrset->attrlist[i]; int itemsize = big_file_dtype_itemsize(a->dtype); int ldata = itemsize * a->nmemb; char * rawdata = malloc(ldata * 2 + 1); unsigned char * adata = (unsigned char*) a->data; int j, k; for(j = 0, k = 0; k < ldata; k ++, j+=2) { rawdata[j] = conv[adata[k] / 16]; rawdata[j + 1] = conv[adata[k] % 16]; } rawdata[j] = 0; char * textual; /* skip textual representation for very long columns */ if(ldata > 128) { textual = _strdup("... (Too Long) "); } else { textual = malloc(a->nmemb * 32 + 1); textual[0] = 0; for(j = 0; j < a->nmemb; j ++) { if(a->dtype[1] != 'a' && !(a->dtype[1] == 'S' && big_file_dtype_itemsize(a->dtype) == 1)) { char buf[128]; big_file_dtype_format(buf, a->dtype, &adata[j * itemsize], NULL); strcat(textual, buf); if(j != a->nmemb - 1) { strcat(textual, " "); } } else { /* pretty print string encoded as a1 or S1. */ char buf[] = {adata[j], 0}; if(buf[0] == '\n') { strcat(textual, "..."); break; } if(buf[0] == 0) { break; } strcat(textual, buf); } } } int rt = fprintf(fattr, "%s %s %d %s #HUMANE [ %s ]\n", a->name, a->dtype, a->nmemb, rawdata, textual ); free(rawdata); free(textual); RAISEIF(rt <= 0, ex_write, "Failed to write to file"); } fclose(fattr); return 0; ex_write: fclose(fattr); ex_open: return -1; } static int attr_cmp(const void * p1, const void * p2) { const BigAttr * c1 = p1; const BigAttr * c2 = p2; return strcmp(c1->name, c2->name); } static BigAttr * attrset_append_attr(BigAttrSet * attrset) { while(attrset->listsize - attrset->listused < 1) { attrset->attrlist = realloc(attrset->attrlist, attrset->listsize * 2 * sizeof(BigAttr)); attrset->listsize *= 2; } BigAttr * a = & (attrset->attrlist[attrset->listused++]); memset(a, 0, sizeof(BigAttr)); return a; } static int attrset_add_attr(BigAttrSet * attrset, const char * attrname, const char * dtype, int nmemb) { size_t size = big_file_dtype_itemsize(dtype) * nmemb + strlen(attrname) + 1; while(attrset->bufsize - attrset->bufused < size) { int i; for(i = 0; i < attrset->listused; i ++) { attrset->attrlist[i].data -= (ptrdiff_t) attrset->attrbuf; attrset->attrlist[i].name -= (ptrdiff_t) attrset->attrbuf; } attrset->attrbuf = realloc(attrset->attrbuf, attrset->bufsize * 2); attrset->bufsize *= 2; for(i = 0; i < attrset->listused; i ++) { attrset->attrlist[i].data += (ptrdiff_t) attrset->attrbuf; attrset->attrlist[i].name += (ptrdiff_t) attrset->attrbuf; } } char * free = attrset->attrbuf + attrset->bufused; attrset->bufused += size; BigAttr * n = attrset_append_attr(attrset); n->nmemb = nmemb; memset(n->dtype, 0, 8); _dtype_normalize(n->dtype, dtype); n->name = free; strcpy(free, attrname); free += strlen(attrname) + 1; n->data = free; qsort(attrset->attrlist, attrset->listused, sizeof(BigAttr), attr_cmp); return 0; } static BigAttr * attrset_lookup_attr(BigAttrSet * attrset, const char * attrname) { BigAttr lookup = {0}; lookup.name = (char*) attrname; BigAttr * found = bsearch(&lookup, attrset->attrlist, attrset->listused, sizeof(BigAttr), attr_cmp); return found; } static int attrset_remove_attr(BigAttrSet * attrset, const char * attrname) { BigAttr *attr = attrset_lookup_attr(attrset, attrname); RAISEIF(attr == NULL, ex_notfound, "Attribute name '%s' is not found.", attrname ) ptrdiff_t ind = attr - attrset->attrlist; memmove(&attrset->attrlist[ind], &attrset->attrlist[ind + 1], (attrset->listused - ind - 1) * sizeof(BigAttr)); attrset->listused -= 1; return 0; ex_notfound: return -1; } static BigAttr * attrset_list_attrs(BigAttrSet * attrset, size_t * count) { *count = attrset->listused; return attrset->attrlist; } static int attrset_set_attr(BigAttrSet * attrset, const char * attrname, const void * data, const char * dtype, int nmemb) { BigAttr * attr; attrset->dirty = 1; RAISEIF ( strchr(attrname, ' ') || strchr(attrname, '\t') || strchr(attrname, '\n'), ex_name, "Attribute name cannot contain blanks (space, tab or newline)" ); /* Remove it if it exists*/ attr = attrset_lookup_attr(attrset, attrname); if(attr) attrset_remove_attr(attrset, attrname); /* add ensures the dtype has been normalized! */ RAISEIF(0 != attrset_add_attr(attrset, attrname, dtype, nmemb), ex_add, "Failed to add attr"); attr = attrset_lookup_attr(attrset, attrname); RAISEIF(attr->nmemb != nmemb, ex_mismatch, "attr nmemb mismatch"); return dtype_convert_simple(attr->data, attr->dtype, data, dtype, attr->nmemb); ex_name: ex_mismatch: ex_add: return -1; } static int attrset_get_attr(BigAttrSet * attrset, const char * attrname, void * data, const char * dtype, int nmemb) { BigAttr * found = attrset_lookup_attr(attrset, attrname); RAISEIF(!found, ex_notfound, "attr not found"); RAISEIF(found->nmemb != nmemb, ex_mismatch, "attr nmemb mismatch"); return dtype_convert_simple(data, dtype, found->data, found->dtype, found->nmemb); ex_mismatch: ex_notfound: return -1; } static BigAttrSet * attrset_create(void) { BigAttrSet * attrset = calloc(1, sizeof(BigAttrSet)); attrset->attrbuf = malloc(128); attrset->bufsize = 128; attrset->bufused = 0; attrset->attrlist = malloc(sizeof(BigAttr) * 16); attrset->listsize = 16; attrset->listused = 0; return attrset; } static void attrset_free(BigAttrSet * attrset) { free(attrset->attrbuf); free(attrset->attrlist); free(attrset); } void big_attrset_set_dirty(BigAttrSet * attrset, int dirty) { attrset->dirty = dirty; } static void * _big_attrset_pack(BigAttrSet * attrset, size_t * bytes) { size_t n = 0; n += sizeof(BigAttrSet); n += attrset->bufused; n += sizeof(BigAttr) * attrset->listused; char * buf = calloc(n, 1); char * p = buf; char * attrbuf = (char*) (p + sizeof(BigAttrSet)); BigAttr * attrlist = (BigAttr *) (attrbuf + attrset->bufused); memcpy(p, attrset, sizeof(BigAttrSet)); memcpy(attrbuf, attrset->attrbuf, attrset->bufused); memcpy(attrlist, attrset->attrlist, attrset->listused * sizeof(BigAttr)); int i = 0; for(i = 0; i < attrset->listused; i ++) { attrlist[i].data -= (ptrdiff_t) attrset->attrbuf; attrlist[i].name -= (ptrdiff_t) attrset->attrbuf; } *bytes = n; return (void*) p; } static BigAttrSet * _big_attrset_unpack(void * p) { BigAttrSet * attrset = calloc(1, sizeof(attrset[0])); memcpy(attrset, p, sizeof(BigAttrSet)); p += sizeof(BigAttrSet); attrset->attrbuf = malloc(attrset->bufsize); attrset->attrlist = malloc(attrset->listsize * sizeof(BigAttr)); memcpy(attrset->attrbuf, p, attrset->bufused); p += attrset->bufused; memcpy(attrset->attrlist, p, attrset->listused * sizeof(BigAttr)); int i = 0; for(i = 0; i < attrset->listused; i ++) { attrset->attrlist[i].data += (ptrdiff_t) attrset->attrbuf; attrset->attrlist[i].name += (ptrdiff_t) attrset->attrbuf; } return attrset; } void * _big_block_pack(BigBlock * block, size_t * bytes) { size_t attrsize = 0; void * attrset = _big_attrset_pack(block->attrset, &attrsize); int Nfile = block->Nfile; * bytes = sizeof(block[0]) + strlen(block->basename) + 1 + (Nfile + 1) * sizeof(block->fsize[0]) + (Nfile + 1) * sizeof(block->foffset[0]) + (Nfile + 1) * sizeof(block->fchecksum[0]) + attrsize; void * buf = malloc(*bytes); char * ptr = (char *) buf; memcpy(ptr, block, sizeof(block[0])); ptr += sizeof(block[0]); memcpy(ptr, block->basename, strlen(block->basename) + 1); ptr += strlen(block->basename) + 1; if(block->fsize) memcpy(ptr, block->fsize, (Nfile + 1) * sizeof(block->fsize[0])); ptr += (Nfile + 1) * sizeof(block->fsize[0]); if(block->foffset) memcpy(ptr, block->foffset, (Nfile + 1) * sizeof(block->foffset[0])); ptr += (Nfile + 1) * sizeof(block->foffset[0]); if(block->fchecksum) memcpy(ptr, block->fchecksum, (Nfile + 1) * sizeof(block->fchecksum[0])); ptr += (Nfile + 1) * sizeof(block->fchecksum[0]); memcpy(ptr, attrset, attrsize); free(attrset); ptr += attrsize; return buf; } void _big_block_unpack(BigBlock * block, void * buf) { char * ptr = (char*)buf; memcpy(block, ptr, sizeof(block[0])); ptr += sizeof(block[0]); int Nfile = block->Nfile; block->fsize = calloc(Nfile + 1, sizeof(size_t)); block->foffset = calloc(Nfile + 1, sizeof(size_t)); block->fchecksum = calloc(Nfile + 1, sizeof(int)); block->basename = _strdup(ptr); ptr += strlen(ptr) + 1; if(block->fsize) memcpy(block->fsize, ptr, (Nfile + 1) * sizeof(block->fsize[0])); ptr += (Nfile + 1) * sizeof(block->fsize[0]); if(block->foffset) memcpy(block->foffset, ptr, (Nfile + 1) * sizeof(block->foffset[0])); ptr += (Nfile + 1) * sizeof(block->foffset[0]); if(block->fchecksum) memcpy(block->fchecksum, ptr, (Nfile + 1) * sizeof(block->fchecksum[0])); ptr += (Nfile + 1) * sizeof(block->fchecksum[0]); block->attrset = _big_attrset_unpack(ptr); } /* File Path */ FILE * _big_file_open_a_file(const char * basename, int fileid, char * mode, int raise) { char * filename; int unbuffered = 0; if(fileid == FILEID_HEADER) { filename = _path_join(basename, EXT_HEADER); } else if(fileid == FILEID_ATTR) { filename = _path_join(basename, EXT_ATTR); } else if(fileid == FILEID_ATTR_V2) { filename = _path_join(basename, EXT_ATTR_V2); } else { char d[128]; sprintf(d, EXT_DATA, fileid); filename = _path_join(basename, d); unbuffered = 1; } FILE * fp = fopen(filename, mode); if(!raise && fp == NULL) { goto ex_fopen; } RAISEIF(fp == NULL, ex_fopen, "Failed to open physical file `%s' with mode `%s' (%s)", filename, mode, strerror(errno)); if(unbuffered) { setbuf(fp, NULL); } ex_fopen: free(filename); return fp; } static int _big_file_mkdir(const char * dirname) { struct stat buf; int mkdirret; mkdirret = mkdir(dirname, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); /* stat is to make sure the dir doesn't exist; it could be owned by others and stopped * by a permission error, but we are still OK, as the dir is created*/ /* already exists; only check stat after mkdir fails with EACCES, avoid meta data calls. */ RAISEIF((mkdirret !=0 && errno != EEXIST && stat(dirname, &buf)), ex_mkdir, "Failed to create directory structure at `%s' (%s)", dirname, strerror(errno) ); /* Attempt to update the time stamp */ utimes(dirname, NULL); return 0; ex_mkdir: return -1; } /* make subdir rel to pathname, recursively making parents */ int _big_file_mksubdir_r(const char * pathname, const char * subdir) { char * subdirname = _strdup(subdir); char * p; char * mydirname; for(p = subdirname; *p; p ++) { if(*p != '/') continue; *p = 0; mydirname = _path_join(pathname, subdirname); if(strlen(mydirname) != 0) { RAISEIF(0 != _big_file_mkdir(mydirname), ex_mkdir, NULL); } free(mydirname); *p = '/'; } mydirname = _path_join(pathname, subdirname); RAISEIF(0 != _big_file_mkdir(mydirname), ex_mkdir, NULL); free(subdirname); free(mydirname); return 0; ex_mkdir: free(subdirname); free(mydirname); return -1; }
ConverterOSG.h
/* -*-c++-*- IfcQuery www.ifcquery.com * MIT License Copyright (c) 2017 Fabian Gerold Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #pragma once #include <osg/CullFace> #include <osg/Geode> #include <osg/Hint> #include <osg/LineWidth> #include <osg/Material> #include <osg/Point> #include <osgUtil/Tessellator> #include <ifcpp/model/BasicTypes.h> #include <ifcpp/model/StatusCallback.h> #include <ifcpp/IFC4/include/IfcCurtainWall.h> #include <ifcpp/IFC4/include/IfcFeatureElementSubtraction.h> #include <ifcpp/IFC4/include/IfcProject.h> #include <ifcpp/IFC4/include/IfcSpace.h> #include <ifcpp/IFC4/include/IfcWindow.h> #include <ifcpp/IFC4/include/IfcPropertySetDefinitionSet.h> #include <ifcpp/geometry/GeometrySettings.h> #include <ifcpp/geometry/SceneGraphUtils.h> #include <ifcpp/geometry/AppearanceData.h> #include "GeometryInputData.h" #include "IncludeCarveHeaders.h" #include "CSG_Adapter.h" class ConverterOSG : public StatusCallback { protected: shared_ptr<GeometrySettings> m_geom_settings; std::map<int, osg::ref_ptr<osg::Switch> > m_map_entity_id_to_switch; std::map<int, osg::ref_ptr<osg::Switch> > m_map_representation_id_to_switch; double m_recent_progress; osg::ref_ptr<osg::CullFace> m_cull_back_off; osg::ref_ptr<osg::StateSet> m_glass_stateset; //\brief StateSet caching and re-use std::vector<osg::ref_ptr<osg::StateSet> > m_vec_existing_statesets; bool m_enable_stateset_caching = false; #ifdef ENABLE_OPENMP Mutex m_writelock_appearance_cache; #endif public: ConverterOSG( shared_ptr<GeometrySettings>& geom_settings ) : m_geom_settings(geom_settings) { m_cull_back_off = new osg::CullFace( osg::CullFace::BACK ); m_glass_stateset = new osg::StateSet(); m_glass_stateset->setMode( GL_BLEND, osg::StateAttribute::ON ); m_glass_stateset->setRenderingHint( osg::StateSet::TRANSPARENT_BIN ); } virtual ~ConverterOSG() {} // Map: IfcProduct ID -> scenegraph switch std::map<int, osg::ref_ptr<osg::Switch> >& getMapEntityIdToSwitch() { return m_map_entity_id_to_switch; } // Map: Representation Identifier -> scenegraph switch std::map<int, osg::ref_ptr<osg::Switch> >& getMapRepresentationToSwitch() { return m_map_representation_id_to_switch; } void clearInputCache() { m_map_entity_id_to_switch.clear(); m_map_representation_id_to_switch.clear(); m_vec_existing_statesets.clear(); } static void drawBoundingBox( const carve::geom::aabb<3>& aabb, osg::Geometry* geom ) { osg::ref_ptr<osg::Vec3Array> vertices = dynamic_cast<osg::Vec3Array*>( geom->getVertexArray() ); if( !vertices ) { vertices = new osg::Vec3Array(); geom->setVertexArray( vertices ); } const carve::geom::vector<3>& aabb_pos = aabb.pos; const carve::geom::vector<3>& extent = aabb.extent; const double dex = extent.x; const double dey = extent.y; const double dez = extent.z; const int vert_id_offset = vertices->size(); vertices->push_back( osg::Vec3f( aabb_pos.x - dex, aabb_pos.y - dey, aabb_pos.z - dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x + dex, aabb_pos.y - dey, aabb_pos.z - dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x + dex, aabb_pos.y + dey, aabb_pos.z - dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x - dex, aabb_pos.y + dey, aabb_pos.z - dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x - dex, aabb_pos.y - dey, aabb_pos.z + dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x + dex, aabb_pos.y - dey, aabb_pos.z + dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x + dex, aabb_pos.y + dey, aabb_pos.z + dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x - dex, aabb_pos.y + dey, aabb_pos.z + dez ) ); osg::ref_ptr<osg::DrawElementsUInt> box_lines = new osg::DrawElementsUInt( GL_LINE_STRIP, 0 ); box_lines->push_back( vert_id_offset + 0 ); box_lines->push_back( vert_id_offset + 1 ); box_lines->push_back( vert_id_offset + 2 ); box_lines->push_back( vert_id_offset + 3 ); box_lines->push_back( vert_id_offset + 0 ); box_lines->push_back( vert_id_offset + 4 ); box_lines->push_back( vert_id_offset + 5 ); box_lines->push_back( vert_id_offset + 1 ); box_lines->push_back( vert_id_offset + 5 ); box_lines->push_back( vert_id_offset + 6 ); box_lines->push_back( vert_id_offset + 2 ); box_lines->push_back( vert_id_offset + 6 ); box_lines->push_back( vert_id_offset + 7 ); box_lines->push_back( vert_id_offset + 3 ); box_lines->push_back( vert_id_offset + 7 ); box_lines->push_back( vert_id_offset + 4 ); geom->addPrimitiveSet( box_lines ); osg::ref_ptr<osg::Material> mat = new osg::Material(); if( !mat ) { throw OutOfMemoryException(); } osg::Vec4f ambientColor( 1.f, 0.2f, 0.1f, 1.f ); mat->setAmbient( osg::Material::FRONT_AND_BACK, ambientColor ); mat->setDiffuse( osg::Material::FRONT_AND_BACK, ambientColor ); mat->setSpecular( osg::Material::FRONT_AND_BACK, ambientColor ); //mat->setShininess( osg::Material::FRONT_AND_BACK, shininess ); //mat->setColorMode( osg::Material::SPECULAR ); osg::StateSet* stateset = geom->getOrCreateStateSet(); if( !stateset ) { throw OutOfMemoryException(); } stateset->setAttribute( mat, osg::StateAttribute::ON ); stateset->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); } static void drawFace( const carve::mesh::Face<3>* face, osg::Geode* geode, bool add_color_array = false ) { #ifdef _DEBUG std::cout << "not triangulated" << std::endl; #endif std::vector<vec3> face_vertices; face_vertices.resize( face->nVertices() ); carve::mesh::Edge<3> *e = face->edge; const size_t num_vertices = face->nVertices(); for( size_t i = 0; i < num_vertices; ++i ) { face_vertices[i] = e->v1()->v; e = e->next; } if( num_vertices < 4 ) { std::cout << "drawFace is meant only for num vertices > 4" << std::endl; } vec3* vertex_vec; osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array( num_vertices ); if( !vertices ) { throw OutOfMemoryException(); } osg::ref_ptr<osg::DrawElementsUInt> triangles = new osg::DrawElementsUInt( osg::PrimitiveSet::POLYGON, num_vertices ); if( !triangles ) { throw OutOfMemoryException(); } for( size_t i = 0; i < num_vertices; ++i ) { vertex_vec = &face_vertices[num_vertices - i - 1]; ( *vertices )[i].set( vertex_vec->x, vertex_vec->y, vertex_vec->z ); ( *triangles )[i] = i; } osg::Vec3f poly_normal = SceneGraphUtils::computePolygonNormal( vertices ); osg::ref_ptr<osg::Vec3Array> normals = new osg::Vec3Array(); normals->resize( num_vertices, poly_normal ); osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices ); geometry->setNormalArray( normals ); normals->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::POLYGON, 0, vertices->size() ) ); if( add_color_array ) { osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); colors->resize( vertices->size(), osg::Vec4f( 0.6f, 0.6f, 0.6f, 0.1f ) ); colors->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setColorArray( colors ); } if( num_vertices > 4 ) { // TODO: check if polygon is convex with Gift wrapping algorithm osg::ref_ptr<osgUtil::Tessellator> tesselator = new osgUtil::Tessellator(); tesselator->setTessellationType( osgUtil::Tessellator::TESS_TYPE_POLYGONS ); //tesselator->setWindingType( osgUtil::Tessellator::TESS_WINDING_ODD ); tesselator->retessellatePolygons( *geometry ); } geode->addDrawable( geometry ); #ifdef DEBUG_DRAW_NORMALS osg::ref_ptr<osg::Vec3Array> vertices_normals = new osg::Vec3Array(); for( size_t i = 0; i < num_vertices; ++i ) { vertex_vec = &face_vertices[num_vertices - i - 1]; vertices_normals->push_back( osg::Vec3f( vertex_vec->x, vertex_vec->y, vertex_vec->z ) ); vertices_normals->push_back( osg::Vec3f( vertex_vec->x, vertex_vec->y, vertex_vec->z ) + poly_normal ); } osg::ref_ptr<osg::Vec4Array> colors_normals = new osg::Vec4Array(); colors_normals->resize( num_vertices * 2, osg::Vec4f( 0.4f, 0.7f, 0.4f, 1.f ) ); osg::ref_ptr<osg::Geometry> geometry_normals = new osg::Geometry(); geometry_normals->setVertexArray( vertices_normals ); geometry_normals->setColorArray( colors_normals ); geometry_normals->setColorBinding( osg::Geometry::BIND_PER_VERTEX ); geometry_normals->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); geometry_normals->setNormalBinding( osg::Geometry::BIND_OFF ); geometry_normals->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices_normals->size() ) ); geode->addDrawable( geometry_normals ); #endif } //#define DEBUG_DRAW_NORMALS static void drawMeshSet( const shared_ptr<carve::mesh::MeshSet<3> >& meshset, osg::Geode* geode, double crease_angle = M_PI*0.05, bool add_color_array = false ) { if( !meshset ) { return; } osg::ref_ptr<osg::Vec3Array> vertices_tri = new osg::Vec3Array(); if( !vertices_tri ) { throw OutOfMemoryException(); } osg::ref_ptr<osg::Vec3Array> normals_tri = new osg::Vec3Array(); if( !normals_tri ) { throw OutOfMemoryException(); } osg::ref_ptr<osg::Vec3Array> vertices_quad; osg::ref_ptr<osg::Vec3Array> normals_quad; const size_t max_num_faces_per_vertex = 10000; std::map<carve::mesh::Face<3>*, double> map_face_area; std::map<carve::mesh::Face<3>*, double>::iterator it_face_area; if( crease_angle > 0 ) { for( size_t i_mesh = 0; i_mesh < meshset->meshes.size(); ++i_mesh ) { const carve::mesh::Mesh<3>* mesh = meshset->meshes[i_mesh]; const size_t num_faces = mesh->faces.size(); for( size_t i_face = 0; i_face != num_faces; ++i_face ) { carve::mesh::Face<3>* face = mesh->faces[i_face]; // compute area of projected face: std::vector<vec2> projected; face->getProjectedVertices( projected ); double face_area = carve::geom2d::signedArea( projected ); map_face_area[face] = abs( face_area ); } } } for( size_t i_mesh = 0; i_mesh < meshset->meshes.size(); ++i_mesh ) { const carve::mesh::Mesh<3>* mesh = meshset->meshes[i_mesh]; const size_t num_faces = mesh->faces.size(); for( size_t i_face = 0; i_face != num_faces; ++i_face ) { carve::mesh::Face<3>* face = mesh->faces[i_face]; const size_t n_vertices = face->nVertices(); if( n_vertices > 4 ) { drawFace( face, geode ); continue; } const vec3 face_normal = face->plane.N; if( crease_angle > 0 ) { carve::mesh::Edge<3>* e = face->edge; for( size_t jj = 0; jj < n_vertices; ++jj ) { carve::mesh::Vertex<3>* vertex = e->vert; vec3 intermediate_normal; // collect all faces at vertex // | ^ // | | // f1 e->rev | | e face // v | // <---e1------- <--------------- //-------------> ---------------> // | ^ // | | // v | carve::mesh::Edge<3>* e1 = e;// ->rev->next; carve::mesh::Face<3>* f1 = e1->face; #ifdef _DEBUG if( f1 != face ) { std::cout << "f1 != face" << std::endl; } #endif for( size_t i3 = 0; i3 < max_num_faces_per_vertex; ++i3 ) { if( !e1->rev ) { break; } if( !e1->rev->next ) { break; } vec3 f1_normal = f1->plane.N; const double cos_angle = dot( f1_normal, face_normal ); if( cos_angle > 0 ) { const double deviation = std::abs( cos_angle - 1.0 ); if( deviation < crease_angle ) { double weight = 0.0; it_face_area = map_face_area.find( f1 ); if( it_face_area != map_face_area.end() ) { weight = it_face_area->second; } intermediate_normal += weight*f1_normal; } } if( !e1->rev ) { // it's an open mesh break; } e1 = e1->rev->next; if( !e1 ) { break; } f1 = e1->face; #ifdef _DEBUG if( e1->vert != vertex ) { std::cout << "e1->vert != vertex" << std::endl; } #endif if( f1 == face ) { break; } } const double intermediate_normal_length = intermediate_normal.length(); if( intermediate_normal_length < 0.0000000001 ) { intermediate_normal = face_normal; } else { // normalize: intermediate_normal *= 1.0 / intermediate_normal_length; } const vec3& vertex_v = vertex->v; if( face->n_edges == 3 ) { vertices_tri->push_back( osg::Vec3( vertex_v.x, vertex_v.y, vertex_v.z ) ); normals_tri->push_back( osg::Vec3( intermediate_normal.x, intermediate_normal.y, intermediate_normal.z ) ); } else if( face->n_edges == 4 ) { if( !vertices_quad ) vertices_quad = new osg::Vec3Array(); vertices_quad->push_back( osg::Vec3( vertex_v.x, vertex_v.y, vertex_v.z ) ); if( !normals_quad ) normals_quad = new osg::Vec3Array(); normals_quad->push_back( osg::Vec3( intermediate_normal.x, intermediate_normal.y, intermediate_normal.z ) ); } e = e->next; } } else { carve::mesh::Edge<3>* e = face->edge; for( size_t jj = 0; jj < n_vertices; ++jj ) { carve::mesh::Vertex<3>* vertex = e->vert; const vec3& vertex_v = vertex->v; if( face->n_edges == 3 ) { vertices_tri->push_back( osg::Vec3( vertex_v.x, vertex_v.y, vertex_v.z ) ); normals_tri->push_back( osg::Vec3( face_normal.x, face_normal.y, face_normal.z ) ); } else if( face->n_edges == 4 ) { if( !vertices_quad ) vertices_quad = new osg::Vec3Array(); vertices_quad->push_back( osg::Vec3( vertex_v.x, vertex_v.y, vertex_v.z ) ); if( !normals_quad ) normals_quad = new osg::Vec3Array(); normals_quad->push_back( osg::Vec3( face_normal.x, face_normal.y, face_normal.z ) ); } e = e->next; } } } } if( vertices_tri->size() > 0 ) { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); if( !geometry ) { throw OutOfMemoryException(); } geometry->setVertexArray( vertices_tri ); geometry->setNormalArray( normals_tri ); normals_tri->setBinding( osg::Array::BIND_PER_VERTEX ); if( add_color_array ) { osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); if( !colors ) { throw OutOfMemoryException(); } colors->resize( vertices_tri->size(), osg::Vec4f( 0.6f, 0.6f, 0.6f, 0.1f ) ); colors->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setColorArray( colors ); } geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::TRIANGLES, 0, vertices_tri->size() ) ); if( !geometry ) { throw OutOfMemoryException(); } geode->addDrawable( geometry ); #ifdef DEBUG_DRAW_NORMALS osg::ref_ptr<osg::Vec3Array> vertices_normals = new osg::Vec3Array(); for( size_t i = 0; i < vertices_tri->size(); ++i ) { osg::Vec3f& vertex_vec = vertices_tri->at( i );// [i]; osg::Vec3f& normal_vec = normals_tri->at( i ); vertices_normals->push_back( osg::Vec3f( vertex_vec.x(), vertex_vec.y(), vertex_vec.z() ) ); vertices_normals->push_back( osg::Vec3f( vertex_vec.x(), vertex_vec.y(), vertex_vec.z() ) + normal_vec ); } osg::ref_ptr<osg::Vec4Array> colors_normals = new osg::Vec4Array(); colors_normals->resize( vertices_normals->size(), osg::Vec4f( 0.4f, 0.7f, 0.4f, 1.f ) ); osg::ref_ptr<osg::Geometry> geometry_normals = new osg::Geometry(); geometry_normals->setVertexArray( vertices_normals ); geometry_normals->setColorArray( colors_normals ); geometry_normals->setColorBinding( osg::Geometry::BIND_PER_VERTEX ); geometry_normals->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); geometry_normals->setNormalBinding( osg::Geometry::BIND_OFF ); geometry_normals->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices_normals->size() ) ); geode->addDrawable( geometry_normals ); #endif } if( vertices_quad ) { if( vertices_quad->size() > 0 ) { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); if( !geometry ) { throw OutOfMemoryException(); } geometry->setVertexArray( vertices_quad ); if( normals_quad ) { normals_quad->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setNormalArray( normals_quad ); } if( add_color_array ) { osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); if( !colors ) { throw OutOfMemoryException(); } colors->resize( vertices_quad->size(), osg::Vec4f( 0.6f, 0.6f, 0.6f, 0.1f ) ); colors->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setColorArray( colors ); } geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::QUADS, 0, vertices_quad->size() ) ); if( !geometry ) { throw OutOfMemoryException(); } geode->addDrawable( geometry ); } } } static void drawPolyline( const carve::input::PolylineSetData* polyline_data, osg::Geode* geode, bool add_color_array = false ) { osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array(); if( !vertices ) { throw OutOfMemoryException(); } carve::line::PolylineSet* polyline_set = polyline_data->create( carve::input::opts() ); if( polyline_set->vertices.size() < 2 ) { #ifdef _DEBUG std::cout << __FUNC__ << ": polyline_set->vertices.size() < 2" << std::endl; #endif return; } for( auto it = polyline_set->lines.begin(); it != polyline_set->lines.end(); ++it ) { const carve::line::Polyline* pline = *it; size_t vertex_count = pline->vertexCount(); for( size_t vertex_i = 0; vertex_i < vertex_count; ++vertex_i ) { if( vertex_i >= polyline_set->vertices.size() ) { #ifdef _DEBUG std::cout << __FUNC__ << ": vertex_i >= polyline_set->vertices.size()" << std::endl; #endif continue; } const carve::line::Vertex* v = pline->vertex( vertex_i ); vertices->push_back( osg::Vec3d( v->v[0], v->v[1], v->v[2] ) ); } } osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); if( !geometry ) { throw OutOfMemoryException(); } geometry->setVertexArray( vertices ); geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINE_STRIP, 0, vertices->size() ) ); if( add_color_array ) { osg::Vec4f color( 0.6f, 0.6f, 0.6f, 0.1f ); osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array( vertices->size(), &color ); if( !colors ) { throw OutOfMemoryException(); } colors->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setColorArray( colors ); } geode->addDrawable( geometry ); } void computeCreaseEdgesFromMeshset( const shared_ptr<carve::mesh::MeshSet<3> >& meshset, std::vector<carve::mesh::Edge<3>* >& vec_edges_out, const double crease_angle ) { if( !meshset ) { return; } for( size_t i_mesh = 0; i_mesh < meshset->meshes.size(); ++i_mesh ) { const carve::mesh::Mesh<3>* mesh = meshset->meshes[i_mesh]; const std::vector<carve::mesh::Edge<3>* >& vec_closed_edges = mesh->closed_edges; for( size_t i_edge = 0; i_edge < vec_closed_edges.size(); ++i_edge ) { carve::mesh::Edge<3>* edge = vec_closed_edges[i_edge]; if( !edge ) { continue; } carve::mesh::Edge<3>* edge_reverse = edge->rev; if( !edge_reverse ) { continue; } carve::mesh::Face<3>* face = edge->face; carve::mesh::Face<3>* face_reverse = edge_reverse->face; const carve::geom::vector<3>& f1_normal = face->plane.N; const carve::geom::vector<3>& f2_normal = face_reverse->plane.N; const double cos_angle = dot( f1_normal, f2_normal ); if( cos_angle > 0 ) { const double deviation = std::abs( cos_angle - 1.0 ); if( deviation < crease_angle ) { continue; } } // TODO: if area of face and face_reverse is equal, skip the crease edge. It could be the inside or outside of a cylinder. Check also if > 2 faces in a row have same normal angle differences vec_edges_out.push_back( edge ); } } } void renderMeshsetCreaseEdges( const shared_ptr<carve::mesh::MeshSet<3> >& meshset, osg::Geode* target_geode, const double crease_angle ) { if( !meshset ) { return; } if( !target_geode ) { return; } std::vector<carve::mesh::Edge<3>* > vec_crease_edges; computeCreaseEdgesFromMeshset( meshset, vec_crease_edges, crease_angle ); if( vec_crease_edges.size() > 0 ) { osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array(); for( size_t i_edge = 0; i_edge < vec_crease_edges.size(); ++i_edge ) { const carve::mesh::Edge<3>* edge = vec_crease_edges[i_edge]; const carve::geom::vector<3>& vertex1 = edge->v1()->v; const carve::geom::vector<3>& vertex2 = edge->v2()->v; vertices->push_back( osg::Vec3d( vertex1.x, vertex1.y, vertex1.z ) ); vertices->push_back( osg::Vec3d( vertex2.x, vertex2.y, vertex2.z ) ); } osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices ); geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices->size() ) ); geometry->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); geometry->getOrCreateStateSet()->setMode( GL_BLEND, osg::StateAttribute::ON ); geometry->getOrCreateStateSet()->setAttributeAndModes( new osg::LineWidth( 3.0f ), osg::StateAttribute::ON ); geometry->getOrCreateStateSet()->setMode( GL_LINE_SMOOTH, osg::StateAttribute::ON ); geometry->getOrCreateStateSet()->setAttributeAndModes( new osg::Hint( GL_LINE_SMOOTH_HINT, GL_NICEST ), osg::StateAttribute::ON ); geometry->getOrCreateStateSet()->setRenderBinDetails( 10, "RenderBin"); target_geode->addDrawable( geometry ); } } void applyAppearancesToGroup( const std::vector<shared_ptr<AppearanceData> >& vec_product_appearances, osg::Group* grp ) { for( size_t ii = 0; ii < vec_product_appearances.size(); ++ii ) { const shared_ptr<AppearanceData>& appearance = vec_product_appearances[ii]; if( !appearance ) { continue; } if( appearance->m_apply_to_geometry_type == AppearanceData::GEOM_TYPE_SURFACE || appearance->m_apply_to_geometry_type == AppearanceData::GEOM_TYPE_ANY ) { osg::ref_ptr<osg::StateSet> item_stateset; convertToOSGStateSet( appearance, item_stateset ); if( item_stateset ) { osg::StateSet* existing_item_stateset = grp->getStateSet(); if( existing_item_stateset ) { if( existing_item_stateset != item_stateset ) { existing_item_stateset->merge( *item_stateset ); } } else { grp->setStateSet( item_stateset ); } } } else if( appearance->m_apply_to_geometry_type == AppearanceData::GEOM_TYPE_CURVE ) { } } } osg::Matrixd convertMatrixToOSG( const carve::math::Matrix& mat_in ) { return osg::Matrixd( mat_in.m[0][0], mat_in.m[0][1], mat_in.m[0][2], mat_in.m[0][3], mat_in.m[1][0], mat_in.m[1][1], mat_in.m[1][2], mat_in.m[1][3], mat_in.m[2][0], mat_in.m[2][1], mat_in.m[2][2], mat_in.m[2][3], mat_in.m[3][0], mat_in.m[3][1], mat_in.m[3][2], mat_in.m[3][3] ); } //\brief method convertProductShapeToOSG: creates geometry objects from an IfcProduct object // caution: when using OpenMP, this method runs in parallel threads, so every write access to member variables needs a write lock void convertProductShapeToOSG( shared_ptr<ProductShapeData>& product_shape, std::map<int, osg::ref_ptr<osg::Switch> >& map_representation_switches ) { if( product_shape->m_ifc_object_definition.expired() ) { return; } shared_ptr<IfcObjectDefinition> ifc_object_def( product_shape->m_ifc_object_definition ); shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>( ifc_object_def ); if( !ifc_product ) { return; } const int product_id = ifc_product->m_entity_id; std::stringstream strs_product_switch_name; strs_product_switch_name << "#" << product_id << "=" << ifc_product->className() << " group"; bool draw_bounding_box = false; // create OSG objects std::vector<shared_ptr<RepresentationData> >& vec_product_representations = product_shape->m_vec_representations; for( size_t ii_representation = 0; ii_representation < vec_product_representations.size(); ++ii_representation ) { const shared_ptr<RepresentationData>& product_representation_data = vec_product_representations[ii_representation]; if( product_representation_data->m_ifc_representation.expired() ) { continue; } shared_ptr<IfcRepresentation> ifc_representation( product_representation_data->m_ifc_representation ); const int representation_id = ifc_representation->m_entity_id; osg::ref_ptr<osg::Switch> representation_switch = new osg::Switch(); #ifdef _DEBUG std::stringstream strs_representation_name; strs_representation_name << strs_product_switch_name.str().c_str() << ", representation " << ii_representation; representation_switch->setName( strs_representation_name.str().c_str() ); #endif const std::vector<shared_ptr<ItemShapeData> >& product_items = product_representation_data->m_vec_item_data; for( size_t i_item = 0; i_item < product_items.size(); ++i_item ) { const shared_ptr<ItemShapeData>& item_shape = product_items[i_item]; osg::ref_ptr<osg::MatrixTransform> item_group = new osg::MatrixTransform(); if( !item_group ) { throw OutOfMemoryException( __FUNC__ ); } #ifdef _DEBUG std::stringstream strs_item_name; strs_item_name << strs_representation_name.str().c_str() << ", item " << i_item; item_group->setName( strs_item_name.str().c_str() ); #endif // create shape for open shells for( size_t ii = 0; ii < item_shape->m_meshsets_open.size(); ++ii ) { shared_ptr<carve::mesh::MeshSet<3> >& item_meshset = item_shape->m_meshsets_open[ii]; CSG_Adapter::retriangulateMeshSet( item_meshset ); osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } drawMeshSet( item_meshset, geode, m_geom_settings->getCoplanarFacesMaxDeltaAngle() ); if( m_geom_settings->getRenderCreaseEdges() ) { renderMeshsetCreaseEdges( item_meshset, geode, m_geom_settings->getCreaseEdgesMaxDeltaAngle() ); } // disable back face culling for open meshes geode->getOrCreateStateSet()->setAttributeAndModes( m_cull_back_off.get(), osg::StateAttribute::OFF ); item_group->addChild( geode ); if( draw_bounding_box ) { carve::geom::aabb<3> bbox = item_meshset->getAABB(); osg::ref_ptr<osg::Geometry> bbox_geom = new osg::Geometry(); drawBoundingBox( bbox, bbox_geom ); geode->addDrawable( bbox_geom ); } #ifdef _DEBUG std::stringstream strs_item_meshset_name; strs_item_meshset_name << strs_item_name.str().c_str() << ", open meshset " << ii; geode->setName( strs_item_meshset_name.str().c_str() ); #endif } // create shape for meshsets for( size_t ii = 0; ii < item_shape->m_meshsets.size(); ++ii ) { shared_ptr<carve::mesh::MeshSet<3> >& item_meshset = item_shape->m_meshsets[ii]; CSG_Adapter::retriangulateMeshSet( item_meshset ); osg::ref_ptr<osg::Geode> geode_meshset = new osg::Geode(); if( !geode_meshset ) { throw OutOfMemoryException( __FUNC__ ); } drawMeshSet( item_meshset, geode_meshset, m_geom_settings->getCoplanarFacesMaxDeltaAngle() ); item_group->addChild( geode_meshset ); if( m_geom_settings->getRenderCreaseEdges() ) { renderMeshsetCreaseEdges( item_meshset, geode_meshset, m_geom_settings->getCreaseEdgesMaxDeltaAngle() ); } if( draw_bounding_box ) { carve::geom::aabb<3> bbox = item_meshset->getAABB(); osg::ref_ptr<osg::Geometry> bbox_geom = new osg::Geometry(); drawBoundingBox( bbox, bbox_geom ); geode_meshset->addDrawable( bbox_geom ); } #ifdef _DEBUG std::stringstream strs_item_meshset_name; strs_item_meshset_name << strs_item_name.str().c_str() << ", meshset " << ii; geode_meshset->setName( strs_item_meshset_name.str().c_str() ); #endif } // create shape for points const std::vector<shared_ptr<carve::input::VertexData> >& vertex_points = item_shape->getVertexPoints(); for( size_t ii = 0; ii < vertex_points.size(); ++ii ) { const shared_ptr<carve::input::VertexData>& pointset_data = vertex_points[ii]; if( pointset_data ) { if( pointset_data->points.size() > 0 ) { osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array(); for( size_t i_pointset_point = 0; i_pointset_point < pointset_data->points.size(); ++i_pointset_point ) { vec3& carve_point = pointset_data->points[i_pointset_point]; vertices->push_back( osg::Vec3d( carve_point.x, carve_point.y, carve_point.z ) ); } osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices ); geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::POINTS, 0, vertices->size() ) ); geode->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); geode->getOrCreateStateSet()->setAttribute( new osg::Point( 3.0f ), osg::StateAttribute::ON ); geode->addDrawable( geometry ); geode->setCullingActive( false ); item_group->addChild( geode ); #ifdef _DEBUG std::stringstream strs_item_meshset_name; strs_item_meshset_name << strs_item_name.str().c_str() << ", vertex_point " << ii; geode->setName( strs_item_meshset_name.str().c_str() ); #endif } } } // create shape for polylines for( size_t ii = 0; ii < item_shape->m_polylines.size(); ++ii ) { shared_ptr<carve::input::PolylineSetData>& polyline_data = item_shape->m_polylines[ii]; osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } geode->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); drawPolyline( polyline_data.get(), geode ); item_group->addChild( geode ); #ifdef _DEBUG std::stringstream strs_item_meshset_name; strs_item_meshset_name << strs_item_name.str().c_str() << ", polylines " << ii; geode->setName( strs_item_meshset_name.str().c_str() ); #endif } if( m_geom_settings->isShowTextLiterals() ) { for( size_t ii = 0; ii < item_shape->m_vec_text_literals.size(); ++ii ) { shared_ptr<TextItemData>& text_data = item_shape->m_vec_text_literals[ii]; if( !text_data ) { continue; } carve::math::Matrix& text_pos = text_data->m_text_position; // TODO: handle rotation std::string text_str; text_str.assign( text_data->m_text.begin(), text_data->m_text.end() ); osg::Vec3 pos2( text_pos._41, text_pos._42, text_pos._43 ); osg::ref_ptr<osgText::Text> txt = new osgText::Text(); if( !txt ) { throw OutOfMemoryException( __FUNC__ ); } txt->setFont( "fonts/arial.ttf" ); txt->setColor( osg::Vec4f( 0, 0, 0, 1 ) ); txt->setCharacterSize( 0.1f ); txt->setAutoRotateToScreen( true ); txt->setPosition( pos2 ); txt->setText( text_str.c_str() ); txt->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ){ throw OutOfMemoryException( __FUNC__ ); } geode->addDrawable( txt ); item_group->addChild( geode ); } } // apply statesets if there are any if( item_shape->m_vec_item_appearances.size() > 0 ) { applyAppearancesToGroup( item_shape->m_vec_item_appearances, item_group ); } // If anything has been created, add it to the representation group if( item_group->getNumChildren() > 0 ) { #ifdef _DEBUG if( item_group->getNumParents() > 0 ) { std::cout << __FUNC__ << ": item_group->getNumParents() > 0" << std::endl; } #endif representation_switch->addChild( item_group ); } } // apply statesets if there are any if( product_representation_data->m_vec_representation_appearances.size() > 0 ) { applyAppearancesToGroup( product_representation_data->m_vec_representation_appearances, representation_switch ); } // If anything has been created, add it to the product group if( representation_switch->getNumChildren() > 0 ) { #ifdef _DEBUG if( representation_switch->getNumParents() > 0 ) { std::cout << __FUNC__ << ": product_representation_switch->getNumParents() > 0" << std::endl; } #endif // enable transparency for certain objects if( dynamic_pointer_cast<IfcSpace>(ifc_product) ) { representation_switch->setStateSet( m_glass_stateset ); } else if( dynamic_pointer_cast<IfcCurtainWall>(ifc_product) || dynamic_pointer_cast<IfcWindow>(ifc_product) ) { representation_switch->setStateSet( m_glass_stateset ); SceneGraphUtils::setMaterialAlpha( representation_switch, 0.6f ); } map_representation_switches.insert( std::make_pair( representation_id, representation_switch ) ); } } // TODO: if no color or material is given, set color 231/219/169 for walls, 140/140/140 for slabs } /*\brief method convertToOSG: Creates geometry for OpenSceneGraph from given ProductShapeData. \param[out] parent_group Group to append the geometry. **/ void convertToOSG( const std::map<int, shared_ptr<ProductShapeData> >& map_shape_data, osg::ref_ptr<osg::Switch> parent_group ) { progressTextCallback( L"Converting geometry to OpenGL format ..." ); progressValueCallback( 0, "scenegraph" ); m_map_entity_id_to_switch.clear(); m_map_representation_id_to_switch.clear(); m_vec_existing_statesets.clear(); shared_ptr<ProductShapeData> ifc_project_data; std::vector<shared_ptr<ProductShapeData> > vec_products; for( auto it = map_shape_data.begin(); it != map_shape_data.end(); ++it ) { shared_ptr<ProductShapeData> shape_data = it->second; if( shape_data ) { vec_products.push_back( shape_data ); } } // create geometry for for each IfcProduct independently, spatial structure will be resolved later std::map<int, osg::ref_ptr<osg::Switch> >* map_entity_id = &m_map_entity_id_to_switch; std::map<int, osg::ref_ptr<osg::Switch> >* map_representations = &m_map_representation_id_to_switch; const int num_products = (int)vec_products.size(); #ifdef ENABLE_OPENMP Mutex writelock_map; Mutex writelock_message_callback; Mutex writelock_ifc_project; #pragma omp parallel firstprivate(num_products) shared(map_entity_id, map_representations) { // time for one product may vary significantly, so schedule not so many #pragma omp for schedule(dynamic,40) #endif for( int i = 0; i < num_products; ++i ) { shared_ptr<ProductShapeData>& shape_data = vec_products[i]; weak_ptr<IfcObjectDefinition>& ifc_object_def_weak = shape_data->m_ifc_object_definition; if( ifc_object_def_weak.expired() ) { continue; } shared_ptr<IfcObjectDefinition> ifc_object_def( ifc_object_def_weak ); std::stringstream thread_err; if( dynamic_pointer_cast<IfcFeatureElementSubtraction>(ifc_object_def) ) { // geometry will be created in method subtractOpenings continue; } else if( dynamic_pointer_cast<IfcProject>(ifc_object_def) ) { #ifdef ENABLE_OPENMP ScopedLock scoped_lock( writelock_ifc_project ); #endif ifc_project_data = shape_data; } shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def); if( !ifc_product ) { continue; } if( !ifc_product->m_Representation ) { continue; } const int product_id = ifc_product->m_entity_id; std::map<int, osg::ref_ptr<osg::Switch> > map_representation_switches; try { convertProductShapeToOSG( shape_data, map_representation_switches ); } catch( OutOfMemoryException& e ) { throw e; } catch( BuildingException& e ) { thread_err << e.what(); } catch( carve::exception& e ) { thread_err << e.str(); } catch( std::exception& e ) { thread_err << e.what(); } catch( ... ) { thread_err << "undefined error, product id " << product_id; } if( map_representation_switches.size() > 0 ) { osg::ref_ptr<osg::Switch> product_switch = new osg::Switch(); osg::ref_ptr<osg::MatrixTransform> product_transform = new osg::MatrixTransform(); product_transform->setMatrix( convertMatrixToOSG( shape_data->getTransform() ) ); product_switch->addChild( product_transform ); std::stringstream strs_product_switch_name; strs_product_switch_name << "#" << product_id << "=" << ifc_product->className() << " group"; product_switch->setName( strs_product_switch_name.str().c_str() ); for( auto it_map = map_representation_switches.begin(); it_map != map_representation_switches.end(); ++it_map ) { osg::ref_ptr<osg::Switch>& repres_switch = it_map->second; product_transform->addChild( repres_switch ); } // apply statesets if there are any const std::vector<shared_ptr<AppearanceData> >& vec_product_appearances = shape_data->getAppearances(); if( vec_product_appearances.size() > 0 ) { applyAppearancesToGroup( vec_product_appearances, product_switch ); } #ifdef ENABLE_OPENMP ScopedLock scoped_lock( writelock_map ); #endif map_entity_id->insert( std::make_pair( product_id, product_switch ) ); map_representations->insert( map_representation_switches.begin(), map_representation_switches.end() ); } if( thread_err.tellp() > 0 ) { #ifdef ENABLE_OPENMP ScopedLock scoped_lock( writelock_message_callback ); #endif messageCallback( thread_err.str().c_str(), StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ ); } // progress callback double progress = (double)i / (double)num_products; if( progress - m_recent_progress > 0.02 ) { #ifdef ENABLE_OPENMP if( omp_get_thread_num() == 0 ) #endif { // leave 10% of progress to openscenegraph internals progressValueCallback( progress*0.9, "scenegraph" ); m_recent_progress = progress; } } } #ifdef ENABLE_OPENMP } // implicit barrier #endif try { // now resolve spatial structure if( ifc_project_data ) { resolveProjectStructure( ifc_project_data, parent_group ); } } catch( OutOfMemoryException& e ) { throw e; } catch( BuildingException& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( std::exception& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( ... ) { messageCallback( "undefined error", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ ); } progressValueCallback( 0.9, "scenegraph" ); } void addNodes( const std::map<int, shared_ptr<BuildingObject> >& map_shape_data, osg::ref_ptr<osg::Switch>& target_group ) { // check if there are entities that are not in spatial structure if( !target_group ) { target_group = new osg::Switch(); } for( auto it_product_shapes = map_shape_data.begin(); it_product_shapes != map_shape_data.end(); ++it_product_shapes ) { int product_id = it_product_shapes->first; auto it_find = m_map_entity_id_to_switch.find( product_id ); if( it_find != m_map_entity_id_to_switch.end() ) { osg::ref_ptr<osg::Switch>& sw = it_find->second; if( sw ) { target_group->addChild( sw ); } } } } void resolveProjectStructure( const shared_ptr<ProductShapeData>& product_data, osg::ref_ptr<osg::Switch> group ) { if( !product_data ) { return; } if( product_data->m_ifc_object_definition.expired() ) { return; } shared_ptr<IfcObjectDefinition> object_def( product_data->m_ifc_object_definition ); const int entity_id = object_def->m_entity_id; if( SceneGraphUtils::inParentList( entity_id, group ) ) { messageCallback( "Cycle in project structure detected", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__, object_def.get() ); return; } const std::vector<shared_ptr<ProductShapeData> >& vec_children = product_data->getChildren(); for( size_t ii = 0; ii < vec_children.size(); ++ii ) { const shared_ptr<ProductShapeData>& child_product_data = vec_children[ii]; if( !child_product_data ) { continue; } osg::ref_ptr<osg::Switch> group_subparts = new osg::Switch(); if( !child_product_data->m_ifc_object_definition.expired() ) { shared_ptr<IfcObjectDefinition> child_obj_def( child_product_data->m_ifc_object_definition ); std::stringstream group_subparts_name; group_subparts_name << "#" << child_obj_def->m_entity_id << "="; group_subparts_name << child_obj_def->className(); group_subparts->setName( group_subparts_name.str().c_str() ); } group->addChild( group_subparts ); resolveProjectStructure( child_product_data, group_subparts ); } auto it_product_map = m_map_entity_id_to_switch.find( entity_id ); if( it_product_map != m_map_entity_id_to_switch.end() ) { const osg::ref_ptr<osg::Switch>& product_switch = it_product_map->second; if( product_switch ) { group->addChild( product_switch ); } } else { if( group->getNumChildren() == 0 ) { osg::ref_ptr<osg::Switch> product_switch = new osg::Switch(); group->addChild( product_switch ); std::stringstream switch_name; switch_name << "#" << entity_id << "=" << object_def->className(); product_switch->setName( switch_name.str().c_str() ); } } } void clearAppearanceCache() { #ifdef ENABLE_OPENMP ScopedLock lock( m_writelock_appearance_cache ); #endif m_vec_existing_statesets.clear(); } void convertToOSGStateSet( const shared_ptr<AppearanceData>& appearence, osg::ref_ptr<osg::StateSet>& target_stateset ) { if( !appearence ) { return; } const float shininess = appearence->m_shininess; const float transparency = appearence->m_transparency; const bool set_transparent = appearence->m_set_transparent; const float color_ambient_r = appearence->m_color_ambient.r(); const float color_ambient_g = appearence->m_color_ambient.g(); const float color_ambient_b = appearence->m_color_ambient.b(); const float color_ambient_a = appearence->m_color_ambient.a(); const float color_diffuse_r = appearence->m_color_diffuse.r(); const float color_diffuse_g = appearence->m_color_diffuse.g(); const float color_diffuse_b = appearence->m_color_diffuse.b(); const float color_diffuse_a = appearence->m_color_diffuse.a(); const float color_specular_r = appearence->m_color_specular.r(); const float color_specular_g = appearence->m_color_specular.g(); const float color_specular_b = appearence->m_color_specular.b(); const float color_specular_a = appearence->m_color_specular.a(); if( m_enable_stateset_caching ) { #ifdef ENABLE_OPENMP ScopedLock lock( m_writelock_appearance_cache ); #endif for( size_t i = 0; i<m_vec_existing_statesets.size(); ++i ) { const osg::ref_ptr<osg::StateSet> stateset_existing = m_vec_existing_statesets[i]; if( !stateset_existing.valid() ) { continue; } osg::ref_ptr<osg::Material> mat_existing = (osg::Material*)stateset_existing->getAttribute( osg::StateAttribute::MATERIAL ); if( !mat_existing ) { continue; } // compare osg::Vec4f color_ambient_existing = mat_existing->getAmbient( osg::Material::FRONT_AND_BACK ); if( fabs( color_ambient_existing.r() - color_ambient_r ) > 0.03 ) break; if( fabs( color_ambient_existing.g() - color_ambient_g ) > 0.03 ) break; if( fabs( color_ambient_existing.b() - color_ambient_b ) > 0.03 ) break; if( fabs( color_ambient_existing.a() - color_ambient_a ) > 0.03 ) break; osg::Vec4f color_diffuse_existing = mat_existing->getDiffuse( osg::Material::FRONT_AND_BACK ); if( fabs( color_diffuse_existing.r() - color_diffuse_r ) > 0.03 ) break; if( fabs( color_diffuse_existing.g() - color_diffuse_g ) > 0.03 ) break; if( fabs( color_diffuse_existing.b() - color_diffuse_b ) > 0.03 ) break; if( fabs( color_diffuse_existing.a() - color_diffuse_a ) > 0.03 ) break; osg::Vec4f color_specular_existing = mat_existing->getSpecular( osg::Material::FRONT_AND_BACK ); if( fabs( color_specular_existing.r() - color_specular_r ) > 0.03 ) break; if( fabs( color_specular_existing.g() - color_specular_g ) > 0.03 ) break; if( fabs( color_specular_existing.b() - color_specular_b ) > 0.03 ) break; if( fabs( color_specular_existing.a() - color_specular_a ) > 0.03 ) break; float shininess_existing = mat_existing->getShininess( osg::Material::FRONT_AND_BACK ); if( fabs( shininess_existing - shininess ) > 0.03 ) break; bool blend_on_existing = stateset_existing->getMode( GL_BLEND ) == osg::StateAttribute::ON; if( blend_on_existing != set_transparent ) break; bool transparent_bin = stateset_existing->getRenderingHint() == osg::StateSet::TRANSPARENT_BIN; if( transparent_bin != set_transparent ) break; // if we get here, appearance is same as existing state set // TODO: block this re-used stateset for merging, or prevent merged statesets from being re-used target_stateset = stateset_existing; return; } } osg::Vec4f ambientColor( color_ambient_r, color_ambient_g, color_ambient_b, transparency ); osg::Vec4f diffuseColor( color_diffuse_r, color_diffuse_g, color_diffuse_b, transparency ); osg::Vec4f specularColor( color_specular_r, color_specular_g, color_specular_b, transparency ); // TODO: material caching and re-use osg::ref_ptr<osg::Material> mat = new osg::Material(); if( !mat ){ throw OutOfMemoryException(); } mat->setAmbient( osg::Material::FRONT_AND_BACK, ambientColor ); mat->setDiffuse( osg::Material::FRONT_AND_BACK, diffuseColor ); mat->setSpecular( osg::Material::FRONT_AND_BACK, specularColor ); mat->setShininess( osg::Material::FRONT_AND_BACK, shininess ); mat->setColorMode( osg::Material::SPECULAR ); target_stateset = new osg::StateSet(); if( !target_stateset ){ throw OutOfMemoryException(); } target_stateset->setAttribute( mat, osg::StateAttribute::ON ); if( appearence->m_set_transparent ) { mat->setTransparency( osg::Material::FRONT_AND_BACK, transparency ); target_stateset->setMode( GL_BLEND, osg::StateAttribute::ON ); target_stateset->setRenderingHint( osg::StateSet::TRANSPARENT_BIN ); } if( appearence->m_specular_exponent != 0.f ) { //osg::ref_ptr<osgFX::SpecularHighlights> spec_highlights = new osgFX::SpecularHighlights(); //spec_highlights->setSpecularExponent( spec->m_value ); // todo: add to scenegraph } if( m_enable_stateset_caching ) { m_vec_existing_statesets.push_back( target_stateset ); } } };
zherk.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_herk * * Performs one of the Hermitian rank k operations * * \f[ C = \alpha A \times A^H + \beta C, \f] * or * \f[ C = \alpha A^H \times A + \beta C, \f] * * where alpha and beta are real scalars, C is an n-by-n Hermitian * matrix, and A is an n-by-k matrix in the first case and a k-by-n * matrix in the second case. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of C is stored; * - PlasmaLower: Lower triangle of C is stored. * * @param[in] trans * - PlasmaNoTrans: \f[ C = \alpha A \times A^H + \beta C; \f] * - PlasmaConjTrans: \f[ C = \alpha A^H \times A + \beta C. \f] * * @param[in] n * The order of the matrix C. n >= 0. * * @param[in] k * If trans = PlasmaNoTrans, number of columns of the A matrix; * if trans = PlasmaConjTrans, number of rows of the A matrix. * * @param[in] alpha * The scalar alpha. * * @param[in] pA * A is an lda-by-ka matrix. * If trans = PlasmaNoTrans, ka = k; * if trans = PlasmaConjTrans, ka = n. * * @param[in] lda * The leading dimension of the array A. * If trans = PlasmaNoTrans, lda >= max(1, n); * if trans = PlasmaConjTrans, lda >= max(1, k). * * @param[in] beta * The scalar beta. * * @param[in,out] pC * C is an ldc-by-n matrix. * On exit, the uplo part of the matrix is overwritten * by the uplo part of the updated matrix. * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1, n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * ******************************************************************************* * * @sa plasma_omp_zherk * @sa plasma_cherk * ******************************************************************************/ int plasma_zherk(plasma_enum_t uplo, plasma_enum_t trans, int n, int k, double alpha, plasma_complex64_t *pA, int lda, double beta, plasma_complex64_t *pC, int ldc) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -1; } if ((trans != PlasmaNoTrans) && (trans != PlasmaConjTrans)) { plasma_error("illegal value of trans"); return -2; } if (n < 0) { plasma_error("illegal value of n"); return -3; } if (k < 0) { plasma_error("illegal value of k"); return -4; } int am, an; if (trans == PlasmaNoTrans) { am = n; an = k; } else { am = k; an = n; } if (lda < imax(1, am)) { plasma_error("illegal value of lda"); return -7; } if (ldc < imax(1, n)) { plasma_error("illegal value of ldc"); return -10; } // quick return if (n == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0)) return PlasmaSuccess; // Set tiling parameters. int nb = plasma->nb; // Initialize tile matrix descriptors. plasma_desc_t A; plasma_desc_t C; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, am, an, 0, 0, am, an, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, n, 0, 0, n, n, &C); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Create sequence. plasma_sequence_t *sequence = NULL; retval = plasma_sequence_create(&sequence); if (retval != PlasmaSuccess) { plasma_error("plasma_sequence_create() failed"); return retval; } // Initialize request. plasma_request_t request = PlasmaRequestInitializer; // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, sequence, &request); plasma_omp_zge2desc(pC, ldc, C, sequence, &request); // Call the tile async function. plasma_omp_zherk(uplo, trans, alpha, A, beta, C, sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2ge(C, pC, ldc, sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&C); // Return status. int status = sequence->status; plasma_sequence_destroy(sequence); return status; } /***************************************************************************//** * * @ingroup plasma_herk * * Performs rank k update. * Non-blocking tile version of plasma_zherk(). * May return before the computation is finished. * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of C is stored; * - PlasmaLower: Lower triangle of C is stored. * * @param[in] trans * - PlasmaNoTrans: \f[ C = \alpha A \times A^H + \beta C; \f] * - PlasmaConjTrans: \f[ C = \alpha A^H \times A + \beta C. \f] * * @param[in] alpha * The scalar alpha. * * @param[in] A * Descriptor of matrix A. * * @param[in] beta * The scalar beta. * * @param[in,out] C * Descriptor of matrix C. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zherk * @sa plasma_omp_zherk * @sa plasma_omp_cherk * @sa plasma_omp_dherk * @sa plasma_omp_sherk * ******************************************************************************/ void plasma_omp_zherk(plasma_enum_t uplo, plasma_enum_t trans, double alpha, plasma_desc_t A, double beta, plasma_desc_t C, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((trans != PlasmaNoTrans) && (trans != PlasmaConjTrans)) { plasma_error("illegal value of trans"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid A"); return; } if (plasma_desc_check(C) != PlasmaSuccess) { plasma_error("invalid C"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return int k = trans == PlasmaNoTrans ? A.n : A.m; if (C.m == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0)) return; // Call the parallel function. plasma_pzherk(uplo, trans, alpha, A, beta, C, sequence, request); }
divsufsort.c
/* * divsufsort.c for libdivsufsort * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include "divsufsort_private.h" #ifdef _OPENMP # include <omp.h> #endif /*- Private Functions -*/ /* Sorts suffixes of type B*. */ static saidx_t sort_typeBstar(const sauchar_t *T, saidx_t *SA, saidx_t *bucket_A, saidx_t *bucket_B, saidx_t n) { saidx_t *PAb, *ISAb, *buf; #ifdef _OPENMP saidx_t *curbuf; saidx_t l; #endif saidx_t i, j, k, t, m, bufsize; saint_t c0, c1; #ifdef _OPENMP saint_t d0, d1; int tmp; #endif /* Initialize bucket arrays. */ for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; } for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; } /* Count the number of occurrences of the first one or two characters of each type A, B and B* suffix. Moreover, store the beginning position of all type B* suffixes into the array SA. */ for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) { /* type A suffix. */ do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1)); if(0 <= i) { /* type B* suffix. */ ++BUCKET_BSTAR(c0, c1); SA[--m] = i; /* type B suffix. */ for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { ++BUCKET_B(c0, c1); } } } m = n - m; /* note: A type B* suffix is lexicographically smaller than a type B suffix that begins with the same first two characters. */ /* Calculate the index of start/end point of each bucket. */ for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) { t = i + BUCKET_A(c0); BUCKET_A(c0) = i + j; /* start point */ i = t + BUCKET_B(c0, c0); for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) { j += BUCKET_BSTAR(c0, c1); BUCKET_BSTAR(c0, c1) = j; /* end point */ i += BUCKET_B(c0, c1); } } if(0 < m) { /* Sort the type B* suffixes by their first two characters. */ PAb = SA + n - m; ISAb = SA + m; for(i = m - 2; 0 <= i; --i) { t = PAb[i], c0 = T[t], c1 = T[t + 1]; SA[--BUCKET_BSTAR(c0, c1)] = i; } t = PAb[m - 1], c0 = T[t], c1 = T[t + 1]; SA[--BUCKET_BSTAR(c0, c1)] = m - 1; /* Sort the type B* substrings using sssort. */ #ifdef _OPENMP tmp = omp_get_max_threads(); buf = SA + m, bufsize = (n - (2 * m)) / tmp; c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m; #pragma omp parallel default(shared) private(curbuf, k, l, d0, d1, tmp) { tmp = omp_get_thread_num(); curbuf = buf + tmp * bufsize; k = 0; for(;;) { #pragma omp critical(sssort_lock) { if(0 < (l = j)) { d0 = c0, d1 = c1; do { k = BUCKET_BSTAR(d0, d1); if(--d1 <= d0) { d1 = ALPHABET_SIZE - 1; if(--d0 < 0) { break; } } } while(((l - k) <= 1) && (0 < (l = k))); c0 = d0, c1 = d1, j = k; } } if(l == 0) { break; } sssort(T, PAb, SA + k, SA + l, curbuf, bufsize, 2, n, *(SA + k) == (m - 1)); } } #else buf = SA + m, bufsize = n - (2 * m); for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) { for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) { i = BUCKET_BSTAR(c0, c1); if(1 < (j - i)) { sssort(T, PAb, SA + i, SA + j, buf, bufsize, 2, n, *(SA + i) == (m - 1)); } } } #endif /* Compute ranks of type B* substrings. */ for(i = m - 1; 0 <= i; --i) { if(0 <= SA[i]) { j = i; do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i])); SA[i + 1] = i - j; if(i <= 0) { break; } } j = i; do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0); ISAb[SA[i]] = j; } /* Construct the inverse suffix array of type B* suffixes using trsort. */ trsort(ISAb, SA, m, 1); /* Set the sorted order of tyoe B* suffixes. */ for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) { for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { } if(0 <= i) { t = i; for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { } SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t; } } /* Calculate the index of start/end point of each bucket. */ BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */ for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) { i = BUCKET_A(c0 + 1) - 1; for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) { t = i - BUCKET_B(c0, c1); BUCKET_B(c0, c1) = i; /* end point */ /* Move all type B* suffixes to the correct position. */ for(i = t, j = BUCKET_BSTAR(c0, c1); j <= k; --i, --k) { SA[i] = SA[k]; } } BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */ BUCKET_B(c0, c0) = i; /* end point */ } } return m; } /* Constructs the suffix array by using the sorted order of type B* suffixes. */ static void construct_SA(const sauchar_t *T, saidx_t *SA, saidx_t *bucket_A, saidx_t *bucket_B, saidx_t n, saidx_t m) { saidx_t *i, *j, *k; saidx_t s; saint_t c0, c1, c2; if(0 < m) { /* Construct the sorted order of type B suffixes by using the sorted order of type B* suffixes. */ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { /* Scan the suffix array from right to left. */ for(i = SA + BUCKET_BSTAR(c1, c1 + 1), j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; i <= j; --j) { if(0 < (s = *j)) { assert(T[s] == c1); assert(((s + 1) < n) && (T[s] <= T[s + 1])); assert(T[s - 1] <= T[s]); *j = ~s; c0 = T[--s]; if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); *k-- = s; } else { assert(((s == 0) && (T[s] == c1)) || (s < 0)); *j = ~s; } } } } /* Construct the suffix array by using the sorted order of type B suffixes. */ k = SA + BUCKET_A(c2 = T[n - 1]); *k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1); /* Scan the suffix array from left to right. */ for(i = SA, j = SA + n; i < j; ++i) { if(0 < (s = *i)) { assert(T[s - 1] >= T[s]); c0 = T[--s]; if((s == 0) || (T[s - 1] < c0)) { s = ~s; } if(c0 != c2) { BUCKET_A(c2) = k - SA; k = SA + BUCKET_A(c2 = c0); } assert(i < k); *k++ = s; } else { assert(s < 0); *i = ~s; } } } #if 0 /* Constructs the burrows-wheeler transformed string directly by using the sorted order of type B* suffixes. */ static saidx_t construct_BWT(const sauchar_t *T, saidx_t *SA, saidx_t *bucket_A, saidx_t *bucket_B, saidx_t n, saidx_t m) { saidx_t *i, *j, *k, *orig; saidx_t s; saint_t c0, c1, c2; if(0 < m) { /* Construct the sorted order of type B suffixes by using the sorted order of type B* suffixes. */ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { /* Scan the suffix array from right to left. */ for(i = SA + BUCKET_BSTAR(c1, c1 + 1), j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; i <= j; --j) { if(0 < (s = *j)) { assert(T[s] == c1); assert(((s + 1) < n) && (T[s] <= T[s + 1])); assert(T[s - 1] <= T[s]); c0 = T[--s]; *j = ~((saidx_t)c0); if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); *k-- = s; } else if(s != 0) { *j = ~s; #ifndef NDEBUG } else { assert(T[s] == c1); #endif } } } } /* Construct the BWTed string by using the sorted order of type B suffixes. */ k = SA + BUCKET_A(c2 = T[n - 1]); *k++ = (T[n - 2] < c2) ? ~((saidx_t)T[n - 2]) : (n - 1); /* Scan the suffix array from left to right. */ for(i = SA, j = SA + n, orig = SA; i < j; ++i) { if(0 < (s = *i)) { assert(T[s - 1] >= T[s]); c0 = T[--s]; *i = c0; if((0 < s) && (T[s - 1] < c0)) { s = ~((saidx_t)T[s - 1]); } if(c0 != c2) { BUCKET_A(c2) = k - SA; k = SA + BUCKET_A(c2 = c0); } assert(i < k); *k++ = s; } else if(s != 0) { *i = ~s; } else { orig = i; } } return orig - SA; } #endif /*---------------------------------------------------------------------------*/ /** * Initialize suffix array context * * @return 0 for success, or non-zero in case of an error */ int divsufsort_init(divsufsort_ctx_t *ctx) { ctx->bucket_A = (saidx_t *)malloc(BUCKET_A_SIZE * sizeof(saidx_t)); ctx->bucket_B = NULL; if (ctx->bucket_A) { ctx->bucket_B = (saidx_t *)malloc(BUCKET_B_SIZE * sizeof(saidx_t)); if (ctx->bucket_B) return 0; } divsufsort_destroy(ctx); return -1; } /** * Destroy suffix array context * * @param ctx suffix array context to destroy */ void divsufsort_destroy(divsufsort_ctx_t *ctx) { if (ctx->bucket_B) { free(ctx->bucket_B); ctx->bucket_B = NULL; } if (ctx->bucket_A) { free(ctx->bucket_A); ctx->bucket_A = NULL; } } /*- Function -*/ saint_t divsufsort_build_array(divsufsort_ctx_t *ctx, const sauchar_t *T, saidx_t *SA, saidx_t n) { saidx_t m; saint_t err = 0; /* Check arguments. */ if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; } else if(n == 0) { return 0; } else if(n == 1) { SA[0] = 0; return 0; } else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; } /* Suffixsort. */ if((ctx->bucket_A != NULL) && (ctx->bucket_B != NULL)) { m = sort_typeBstar(T, SA, ctx->bucket_A, ctx->bucket_B, n); construct_SA(T, SA, ctx->bucket_A, ctx->bucket_B, n, m); } else { err = -2; } return err; } #if 0 saidx_t divbwt(const sauchar_t *T, sauchar_t *U, saidx_t *A, saidx_t n) { saidx_t *B; saidx_t *bucket_A, *bucket_B; saidx_t m, pidx, i; /* Check arguments. */ if((T == NULL) || (U == NULL) || (n < 0)) { return -1; } else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; } if((B = A) == NULL) { B = (saidx_t *)malloc((size_t)(n + 1) * sizeof(saidx_t)); } bucket_A = (saidx_t *)malloc(BUCKET_A_SIZE * sizeof(saidx_t)); bucket_B = (saidx_t *)malloc(BUCKET_B_SIZE * sizeof(saidx_t)); /* Burrows-Wheeler Transform. */ if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) { m = sort_typeBstar(T, B, bucket_A, bucket_B, n); pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m); /* Copy to output string. */ U[0] = T[n - 1]; for(i = 0; i < pidx; ++i) { U[i + 1] = (sauchar_t)B[i]; } for(i += 1; i < n; ++i) { U[i] = (sauchar_t)B[i]; } pidx += 1; } else { pidx = -2; } free(bucket_B); free(bucket_A); if(A == NULL) { free(B); } return pidx; } const char * divsufsort_version(void) { return PROJECT_VERSION_FULL; } #endif
convolution_3x3_pack8to1_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack8to1_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 8a-inch/8a-64-outch; kernel_tm_pack8to1.create(8 * inch / 8, 64, outch / 8 + outch % 8, (size_t)2u * 8, 8); int p = 0; for (; p + 7 < outch; p += 8) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); const Mat k4 = kernel_tm.channel(p + 4); const Mat k5 = kernel_tm.channel(p + 5); const Mat k6 = kernel_tm.channel(p + 6); const Mat k7 = kernel_tm.channel(p + 7); Mat g0 = kernel_tm_pack8to1.channel(p / 8); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = (__fp16)k0.row(q + i)[k]; g00[1] = (__fp16)k1.row(q + i)[k]; g00[2] = (__fp16)k2.row(q + i)[k]; g00[3] = (__fp16)k3.row(q + i)[k]; g00[4] = (__fp16)k4.row(q + i)[k]; g00[5] = (__fp16)k5.row(q + i)[k]; g00[6] = (__fp16)k6.row(q + i)[k]; g00[7] = (__fp16)k7.row(q + i)[k]; g00 += 8; } } } } for (; p < outch; p++) { const Mat k0 = kernel_tm.channel(p); Mat g0 = kernel_tm_pack8to1.channel(p / 8 + p % 8); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = (__fp16)k0.row(q + i)[k]; g00 += 1; } } } } } static void conv3x3s1_winograd64_pack8to1_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 6; int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd64_transform_input_pack8_fp16sa_neon(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tm2p = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); r0 += bottom_blob_tm.cstep * 8; } } for (; i + 3 < tiles; i += 4) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x4 asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); r0 += bottom_blob_tm.cstep * 8; } } for (; i < tiles; i++) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); r0 += bottom_blob_tm.cstep * 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 2u, 1, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; __fp16* output0_tm = top_blob_tm.channel(p); __fp16* output1_tm = top_blob_tm.channel(p + 1); __fp16* output2_tm = top_blob_tm.channel(p + 2); __fp16* output3_tm = top_blob_tm.channel(p + 3); __fp16* output4_tm = top_blob_tm.channel(p + 4); __fp16* output5_tm = top_blob_tm.channel(p + 5); __fp16* output6_tm = top_blob_tm.channel(p + 6); __fp16* output7_tm = top_blob_tm.channel(p + 7); const Mat kernel01_tm = kernel_tm.channel(p / 8); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%10], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v28.8h, v16.8h, v0.h[4] \n" "fmla v29.8h, v16.8h, v0.h[5] \n" "fmla v30.8h, v16.8h, v0.h[6] \n" "fmla v31.8h, v16.8h, v0.h[7] \n" "fmla v24.8h, v17.8h, v1.h[0] \n" "fmla v25.8h, v17.8h, v1.h[1] \n" "fmla v26.8h, v17.8h, v1.h[2] \n" "fmla v27.8h, v17.8h, v1.h[3] \n" "fmla v28.8h, v17.8h, v1.h[4] \n" "fmla v29.8h, v17.8h, v1.h[5] \n" "fmla v30.8h, v17.8h, v1.h[6] \n" "fmla v31.8h, v17.8h, v1.h[7] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%9], #64 \n" "fmla v24.8h, v18.8h, v2.h[0] \n" "fmla v25.8h, v18.8h, v2.h[1] \n" "fmla v26.8h, v18.8h, v2.h[2] \n" "fmla v27.8h, v18.8h, v2.h[3] \n" "fmla v28.8h, v18.8h, v2.h[4] \n" "fmla v29.8h, v18.8h, v2.h[5] \n" "fmla v30.8h, v18.8h, v2.h[6] \n" "fmla v31.8h, v18.8h, v2.h[7] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%10], #64 \n" "fmla v24.8h, v19.8h, v3.h[0] \n" "fmla v25.8h, v19.8h, v3.h[1] \n" "fmla v26.8h, v19.8h, v3.h[2] \n" "fmla v27.8h, v19.8h, v3.h[3] \n" "fmla v28.8h, v19.8h, v3.h[4] \n" "fmla v29.8h, v19.8h, v3.h[5] \n" "fmla v30.8h, v19.8h, v3.h[6] \n" "fmla v31.8h, v19.8h, v3.h[7] \n" "fmla v24.8h, v20.8h, v4.h[0] \n" "fmla v25.8h, v20.8h, v4.h[1] \n" "fmla v26.8h, v20.8h, v4.h[2] \n" "fmla v27.8h, v20.8h, v4.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v4.h[5] \n" "fmla v30.8h, v20.8h, v4.h[6] \n" "fmla v31.8h, v20.8h, v4.h[7] \n" "fmla v24.8h, v21.8h, v5.h[0] \n" "fmla v25.8h, v21.8h, v5.h[1] \n" "fmla v26.8h, v21.8h, v5.h[2] \n" "fmla v27.8h, v21.8h, v5.h[3] \n" "fmla v28.8h, v21.8h, v5.h[4] \n" "fmla v29.8h, v21.8h, v5.h[5] \n" "fmla v30.8h, v21.8h, v5.h[6] \n" "fmla v31.8h, v21.8h, v5.h[7] \n" "fmla v24.8h, v22.8h, v6.h[0] \n" "fmla v25.8h, v22.8h, v6.h[1] \n" "fmla v26.8h, v22.8h, v6.h[2] \n" "fmla v27.8h, v22.8h, v6.h[3] \n" "fmla v28.8h, v22.8h, v6.h[4] \n" "fmla v29.8h, v22.8h, v6.h[5] \n" "fmla v30.8h, v22.8h, v6.h[6] \n" "fmla v31.8h, v22.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v23.8h, v7.h[0] \n" "fmla v25.8h, v23.8h, v7.h[1] \n" "fmla v26.8h, v23.8h, v7.h[2] \n" "fmla v27.8h, v23.8h, v7.h[3] \n" "fmla v28.8h, v23.8h, v7.h[4] \n" "fmla v29.8h, v23.8h, v7.h[5] \n" "fmla v30.8h, v23.8h, v7.h[6] \n" "fmla v31.8h, v23.8h, v7.h[7] \n" "bne 0b \n" "st1 {v24.8h}, [%1], #16 \n" "st1 {v25.8h}, [%2], #16 \n" "st1 {v26.8h}, [%3], #16 \n" "st1 {v27.8h}, [%4], #16 \n" "st1 {v28.8h}, [%5], #16 \n" "st1 {v29.8h}, [%6], #16 \n" "st1 {v30.8h}, [%7], #16 \n" "st1 {v31.8h}, [%8], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%9], #32 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%10], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v28.4h, v16.4h, v0.h[4] \n" "fmla v29.4h, v16.4h, v0.h[5] \n" "fmla v30.4h, v16.4h, v0.h[6] \n" "fmla v31.4h, v16.4h, v0.h[7] \n" "fmla v24.4h, v17.4h, v1.h[0] \n" "fmla v25.4h, v17.4h, v1.h[1] \n" "fmla v26.4h, v17.4h, v1.h[2] \n" "fmla v27.4h, v17.4h, v1.h[3] \n" "fmla v28.4h, v17.4h, v1.h[4] \n" "fmla v29.4h, v17.4h, v1.h[5] \n" "fmla v30.4h, v17.4h, v1.h[6] \n" "fmla v31.4h, v17.4h, v1.h[7] \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%9], #32 \n" "fmla v24.4h, v18.4h, v2.h[0] \n" "fmla v25.4h, v18.4h, v2.h[1] \n" "fmla v26.4h, v18.4h, v2.h[2] \n" "fmla v27.4h, v18.4h, v2.h[3] \n" "fmla v28.4h, v18.4h, v2.h[4] \n" "fmla v29.4h, v18.4h, v2.h[5] \n" "fmla v30.4h, v18.4h, v2.h[6] \n" "fmla v31.4h, v18.4h, v2.h[7] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%10], #64 \n" "fmla v24.4h, v19.4h, v3.h[0] \n" "fmla v25.4h, v19.4h, v3.h[1] \n" "fmla v26.4h, v19.4h, v3.h[2] \n" "fmla v27.4h, v19.4h, v3.h[3] \n" "fmla v28.4h, v19.4h, v3.h[4] \n" "fmla v29.4h, v19.4h, v3.h[5] \n" "fmla v30.4h, v19.4h, v3.h[6] \n" "fmla v31.4h, v19.4h, v3.h[7] \n" "fmla v24.4h, v20.4h, v4.h[0] \n" "fmla v25.4h, v20.4h, v4.h[1] \n" "fmla v26.4h, v20.4h, v4.h[2] \n" "fmla v27.4h, v20.4h, v4.h[3] \n" "fmla v28.4h, v20.4h, v4.h[4] \n" "fmla v29.4h, v20.4h, v4.h[5] \n" "fmla v30.4h, v20.4h, v4.h[6] \n" "fmla v31.4h, v20.4h, v4.h[7] \n" "fmla v24.4h, v21.4h, v5.h[0] \n" "fmla v25.4h, v21.4h, v5.h[1] \n" "fmla v26.4h, v21.4h, v5.h[2] \n" "fmla v27.4h, v21.4h, v5.h[3] \n" "fmla v28.4h, v21.4h, v5.h[4] \n" "fmla v29.4h, v21.4h, v5.h[5] \n" "fmla v30.4h, v21.4h, v5.h[6] \n" "fmla v31.4h, v21.4h, v5.h[7] \n" "fmla v24.4h, v22.4h, v6.h[0] \n" "fmla v25.4h, v22.4h, v6.h[1] \n" "fmla v26.4h, v22.4h, v6.h[2] \n" "fmla v27.4h, v22.4h, v6.h[3] \n" "fmla v28.4h, v22.4h, v6.h[4] \n" "fmla v29.4h, v22.4h, v6.h[5] \n" "fmla v30.4h, v22.4h, v6.h[6] \n" "fmla v31.4h, v22.4h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v23.4h, v7.h[0] \n" "fmla v25.4h, v23.4h, v7.h[1] \n" "fmla v26.4h, v23.4h, v7.h[2] \n" "fmla v27.4h, v23.4h, v7.h[3] \n" "fmla v28.4h, v23.4h, v7.h[4] \n" "fmla v29.4h, v23.4h, v7.h[5] \n" "fmla v30.4h, v23.4h, v7.h[6] \n" "fmla v31.4h, v23.4h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h}, [%1], #8 \n" "st1 {v25.4h}, [%2], #8 \n" "st1 {v26.4h}, [%3], #8 \n" "st1 {v27.4h}, [%4], #8 \n" "st1 {v28.4h}, [%5], #8 \n" "st1 {v29.4h}, [%6], #8 \n" "st1 {v30.4h}, [%7], #8 \n" "st1 {v31.4h}, [%8], #8 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v30.16b, v30.16b, v30.16b \n" "0: \n" "prfm pldl1keep, [%9, #128] \n" "ld1 {v0.8h}, [%9], #16 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%10], #64 \n" "fmla v30.8h, v16.8h, v0.h[0] \n" "fmla v30.8h, v17.8h, v0.h[1] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%10], #64 \n" "fmla v30.8h, v18.8h, v0.h[2] \n" "fmla v30.8h, v19.8h, v0.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v30.8h, v20.8h, v0.h[4] \n" "fmla v30.8h, v21.8h, v0.h[5] \n" "fmla v30.8h, v22.8h, v0.h[6] \n" "fmla v30.8h, v23.8h, v0.h[7] \n" "bne 0b \n" "st1 {v30.h}[0], [%1], #2 \n" "st1 {v30.h}[1], [%2], #2 \n" "st1 {v30.h}[2], [%3], #2 \n" "st1 {v30.h}[3], [%4], #2 \n" "st1 {v30.h}[4], [%5], #2 \n" "st1 {v30.h}[5], [%6], #2 \n" "st1 {v30.h}[6], [%7], #2 \n" "st1 {v30.h}[7], [%8], #2 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30"); } } } remain_outch_start += nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p / 8 + p % 8); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v30.16b, v30.16b, v30.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%2], #64 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.8h}, [%3], #16 \n" "fmla v30.8h, v16.8h, v0.h[0] \n" "fmla v30.8h, v17.8h, v0.h[1] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%2], #64 \n" "fmla v30.8h, v18.8h, v0.h[2] \n" "fmla v30.8h, v19.8h, v0.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v30.8h, v20.8h, v0.h[4] \n" "fmla v30.8h, v21.8h, v0.h[5] \n" "fmla v30.8h, v22.8h, v0.h[6] \n" "fmla v30.8h, v23.8h, v0.h[7] \n" "bne 0b \n" "st1 {v30.8h}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v30.16b, v30.16b, v30.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.8h}, [%3], #16 \n" "fmla v30.4h, v16.4h, v0.h[0] \n" "fmla v30.4h, v17.4h, v0.h[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n" "fmla v30.4h, v18.4h, v0.h[2] \n" "fmla v30.4h, v19.4h, v0.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v30.4h, v20.4h, v0.h[4] \n" "fmla v30.4h, v21.4h, v0.h[5] \n" "fmla v30.4h, v22.4h, v0.h[6] \n" "fmla v30.4h, v23.4h, v0.h[7] \n" "bne 0b \n" "st1 {v30.4h}, [%1], #8 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); float16x8_t _sum0 = vdupq_n_f16((__fp16)0.f); for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(r0); float16x8_t _k0 = vld1q_f16(kptr); _sum0 = vfmaq_f16(_sum0, _r0, _k0); kptr += 8; r0 += 8; } __fp16 sum0 = vaddvq_f32(vcvt_f32_f16(vadd_f16(vget_low_f16(_sum0), vget_high_f16(_sum0)))); output0_tm[0] = sum0; output0_tm++; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 2u, 1, opt.workspace_allocator); } { conv3x3s1_winograd64_transform_output_fp16sa_neon(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
GB_binop__islt_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__islt_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__islt_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__islt_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__islt_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_uint32) // A*D function (colscale): GB (_AxD__islt_uint32) // D*A function (rowscale): GB (_DxB__islt_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__islt_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__islt_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_uint32) // C=scalar+B GB (_bind1st__islt_uint32) // C=scalar+B' GB (_bind1st_tran__islt_uint32) // C=A+scalar GB (_bind2nd__islt_uint32) // C=A'+scalar GB (_bind2nd_tran__islt_uint32) // C type: uint32_t // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_UINT32 || GxB_NO_ISLT_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__islt_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__islt_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__islt_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__islt_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__islt_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__islt_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__islt_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__islt_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__islt_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__islt_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__islt_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__islt_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__islt_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__islt_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
1832.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i*(j+1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i*(j+3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i*(j+2)) / nk; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl), DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j, k; #pragma scop #pragma omp parallel private (i, j, k) num_threads(#P11) { /* E := A*B */ #pragma omp target teams distribute schedule(static, 16) for (i = 0; i < _PB_NI; i++) { #pragma omp target teams distribute schedule(static, 16) for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } } /* F := C*D */ #pragma omp target teams distribute schedule(static, 16) for (i = 0; i < _PB_NJ; i++) { #pragma omp target teams distribute schedule(static, 16) for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } } /* G := E*F */ #pragma omp target teams distribute schedule(static, 16) for (i = 0; i < _PB_NI; i++) { #pragma omp target teams distribute schedule(static, 16) for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
brkTie.c
/* This code is part of this project: Donato E, Ouyang M, * Peguero-Isalguez C. Triangle counting with a multi-core computer. * Proceedings of IEEE High Performance Extreme Computing Conference * (HPEC), 2018, 1-7. * * Copyright (c) 2018 Ming Ouyang * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <omp.h> #include "ompTri.h" uint64_t *idx, *newID; static uint64_t *hub; static uint64_t oneBit[64]; //oneBit[0] 0001 //oneBit[1] 0010 //oneBit[2] 0100 //oneBit[3] 1000 //descending static int cmpDeg(const void *a, const void *b) { return degree[*(uint64_t*) b] - degree[*(uint64_t*) a]; } //ascending static int cmpNewID(const void *a, const void *b) { return newID[*(uint64_t*) a] - newID[*(uint64_t*) b]; } //descending degree, break tie by adjacency to hubs static int degBrkTie(const void *a, const void *b) { uint64_t u, v; u = *(uint64_t*) a; v = *(uint64_t*) b; if (degree[u] > degree[v]) return -1; if (degree[u] < degree[v]) return 1; if (hub[u] > hub[v]) return -1; if (hub[u] < hub[v]) return 1; return 0; } void sortBrkTie(void) { uint64_t i, j, u, count; oneBit[0] = 1; for (i = 1; i < 64; i++) oneBit[i] = oneBit[i - 1] << 1; idx = (uint64_t*) malloc(sizeof(uint64_t) * n); newID = (uint64_t*) malloc(sizeof(uint64_t) * n); hub = (uint64_t*) malloc(sizeof(uint64_t) * n); for (i = 0; i < n; i++) { idx[i] = i; hub[i] = 0; } //sort all vertices by their degrees to find the top 64 hubs //improve this by partitioning qsort((void *)idx, n, sizeof(uint64_t), cmpDeg); //adjacency to the top 64 hub vertices for (i = 0; i < 64; i++) for (j = 0; j < degree[ idx[i] ]; j++) { u = neighbor[ idx[i] ] [j]; hub[u] = hub[u] | oneBit[64 - i - 1]; } //sort by degrees, tiebreaking with adjacency to hubs qsort((void *)idx, n, sizeof(uint64_t), degBrkTie); for (i = 0; i < n; i++) newID[ idx[i] ] = i; //sort neighbors by their new indices #pragma omp parallel for schedule(guided) for (i = 0; i < n; i++) if (degree[i] > 1) qsort((void *) neighbor[i], degree[i], sizeof(uint64_t), cmpNewID); count = 0; //drop zero-degree vertices while (degree[ idx[n - 1] ] == 0) { if (neighbor[ idx[n - 1] ]) { free(neighbor[ idx[n - 1] ]); neighbor[ idx[n - 1] ] = NULL; } count++, n--; } free(hub); if (verbose) printf("%lu vertices of 0-degree are removed\n", count); } //memory not freed: idx, newID -- they are needed by luCSR.c
fx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF X X % % F X X % % FFF X % % F X X % % F X X % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/fx-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Typedef declarations. */ typedef enum { BitwiseAndAssignmentOperator = 0xd9U, BitwiseOrAssignmentOperator, LeftShiftAssignmentOperator, RightShiftAssignmentOperator, PowerAssignmentOperator, ModuloAssignmentOperator, PlusAssignmentOperator, SubtractAssignmentOperator, MultiplyAssignmentOperator, DivideAssignmentOperator, IncrementAssignmentOperator, DecrementAssignmentOperator, LeftShiftOperator, RightShiftOperator, LessThanEqualOperator, GreaterThanEqualOperator, EqualOperator, NotEqualOperator, LogicalAndOperator, LogicalOrOperator, ExponentialNotation } FxOperator; struct _FxInfo { const Image *images; char *expression; FILE *file; SplayTreeInfo *colors, *symbols; CacheView **view; RandomInfo *random_info; ExceptionInfo *exception; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireFxInfo() allocates the FxInfo structure. % % The format of the AcquireFxInfo method is: % % FxInfo *AcquireFxInfo(Image *images,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o expression: the expression. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression, ExceptionInfo *exception) { const Image *next; FxInfo *fx_info; ssize_t i; unsigned char fx_op[2]; fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info)); (void) memset(fx_info,0,sizeof(*fx_info)); fx_info->exception=AcquireExceptionInfo(); fx_info->images=images; fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength( fx_info->images),sizeof(*fx_info->view)); if (fx_info->view == (CacheView **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); i=0; next=GetFirstImageInList(fx_info->images); for ( ; next != (Image *) NULL; next=next->next) { fx_info->view[i]=AcquireVirtualCacheView(next,exception); i++; } fx_info->random_info=AcquireRandomInfo(); fx_info->expression=ConstantString(expression); fx_info->file=stderr; /* Convert compound to simple operators. */ fx_op[1]='\0'; *fx_op=(unsigned char) BitwiseAndAssignmentOperator; (void) SubstituteString(&fx_info->expression,"&=",(char *) fx_op); *fx_op=(unsigned char) BitwiseOrAssignmentOperator; (void) SubstituteString(&fx_info->expression,"|=",(char *) fx_op); *fx_op=(unsigned char) LeftShiftAssignmentOperator; (void) SubstituteString(&fx_info->expression,"<<=",(char *) fx_op); *fx_op=(unsigned char) RightShiftAssignmentOperator; (void) SubstituteString(&fx_info->expression,">>=",(char *) fx_op); *fx_op=(unsigned char) PowerAssignmentOperator; (void) SubstituteString(&fx_info->expression,"^=",(char *) fx_op); *fx_op=(unsigned char) ModuloAssignmentOperator; (void) SubstituteString(&fx_info->expression,"%=",(char *) fx_op); *fx_op=(unsigned char) PlusAssignmentOperator; (void) SubstituteString(&fx_info->expression,"+=",(char *) fx_op); *fx_op=(unsigned char) SubtractAssignmentOperator; (void) SubstituteString(&fx_info->expression,"-=",(char *) fx_op); *fx_op=(unsigned char) MultiplyAssignmentOperator; (void) SubstituteString(&fx_info->expression,"*=",(char *) fx_op); *fx_op=(unsigned char) DivideAssignmentOperator; (void) SubstituteString(&fx_info->expression,"/=",(char *) fx_op); *fx_op=(unsigned char) IncrementAssignmentOperator; (void) SubstituteString(&fx_info->expression,"++",(char *) fx_op); *fx_op=(unsigned char) DecrementAssignmentOperator; (void) SubstituteString(&fx_info->expression,"--",(char *) fx_op); *fx_op=(unsigned char) LeftShiftOperator; (void) SubstituteString(&fx_info->expression,"<<",(char *) fx_op); *fx_op=(unsigned char) RightShiftOperator; (void) SubstituteString(&fx_info->expression,">>",(char *) fx_op); *fx_op=(unsigned char) LessThanEqualOperator; (void) SubstituteString(&fx_info->expression,"<=",(char *) fx_op); *fx_op=(unsigned char) GreaterThanEqualOperator; (void) SubstituteString(&fx_info->expression,">=",(char *) fx_op); *fx_op=(unsigned char) EqualOperator; (void) SubstituteString(&fx_info->expression,"==",(char *) fx_op); *fx_op=(unsigned char) NotEqualOperator; (void) SubstituteString(&fx_info->expression,"!=",(char *) fx_op); *fx_op=(unsigned char) LogicalAndOperator; (void) SubstituteString(&fx_info->expression,"&&",(char *) fx_op); *fx_op=(unsigned char) LogicalOrOperator; (void) SubstituteString(&fx_info->expression,"||",(char *) fx_op); *fx_op=(unsigned char) ExponentialNotation; (void) SubstituteString(&fx_info->expression,"**",(char *) fx_op); /* Force right-to-left associativity for unary negation. */ (void) SubstituteString(&fx_info->expression,"-","-1.0*"); (void) SubstituteString(&fx_info->expression,"^-1.0*","^-"); (void) SubstituteString(&fx_info->expression,"E-1.0*","E-"); (void) SubstituteString(&fx_info->expression,"e-1.0*","e-"); (void) SubstituteString(&fx_info->expression," ",""); /* compact string */ return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyFxInfo() deallocates memory associated with an FxInfo structure. % % The format of the DestroyFxInfo method is: % % ImageInfo *DestroyFxInfo(ImageInfo *fx_info) % % A description of each parameter follows: % % o fx_info: the fx info. % */ MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info) { ssize_t i; fx_info->exception=DestroyExceptionInfo(fx_info->exception); fx_info->expression=DestroyString(fx_info->expression); fx_info->symbols=DestroySplayTree(fx_info->symbols); fx_info->colors=DestroySplayTree(fx_info->colors); for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--) fx_info->view[i]=DestroyCacheView(fx_info->view[i]); fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view); fx_info->random_info=DestroyRandomInfo(fx_info->random_info); fx_info=(FxInfo *) RelinquishMagickMemory(fx_info); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F x E v a l u a t e C h a n n e l E x p r e s s i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxEvaluateChannelExpression() evaluates an expression and returns the % results. % % The format of the FxEvaluateExpression method is: % % double FxEvaluateChannelExpression(FxInfo *fx_info, % const PixelChannel channel,const ssize_t x,const ssize_t y, % double *alpha,Exceptioninfo *exception) % double FxEvaluateExpression(FxInfo *fx_info, % double *alpha,Exceptioninfo *exception) % % A description of each parameter follows: % % o fx_info: the fx info. % % o channel: the channel. % % o x,y: the pixel position. % % o alpha: the result. % % o exception: return any errors or warnings in this structure. % */ static inline const double *GetFxSymbolValue(FxInfo *magick_restrict fx_info, const char *symbol) { return((const double *) GetValueFromSplayTree(fx_info->symbols,symbol)); } static inline MagickBooleanType SetFxSymbolValue( FxInfo *magick_restrict fx_info,const char *magick_restrict symbol, double const value) { double *object; object=(double *) GetValueFromSplayTree(fx_info->symbols,symbol); if (object != (double *) NULL) { *object=value; return(MagickTrue); } object=(double *) AcquireMagickMemory(sizeof(*object)); if (object == (double *) NULL) { (void) ThrowMagickException(fx_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", fx_info->images->filename); return(MagickFalse); } *object=value; return(AddValueToSplayTree(fx_info->symbols,ConstantString(symbol),object)); } static double FxChannelStatistics(FxInfo *fx_info,Image *image, PixelChannel channel,const char *symbol,ExceptionInfo *exception) { ChannelType channel_mask; char key[MagickPathExtent]; const double *value; double statistic; const char *p; channel_mask=UndefinedChannel; for (p=symbol; (*p != '.') && (*p != '\0'); p++) ; if (*p == '.') { ssize_t option; option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1); if (option >= 0) { channel=(PixelChannel) option; channel_mask=SetPixelChannelMask(image,(ChannelType) (1UL << channel)); } } (void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image, (double) channel,symbol); value=GetFxSymbolValue(fx_info,key); if (value != (const double *) NULL) { if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); return(QuantumScale*(*value)); } statistic=0.0; if (LocaleNCompare(symbol,"depth",5) == 0) { size_t depth; depth=GetImageDepth(image,exception); statistic=(double) depth; } if (LocaleNCompare(symbol,"kurtosis",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); statistic=kurtosis; } if (LocaleNCompare(symbol,"maxima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); statistic=maxima; } if (LocaleNCompare(symbol,"mean",4) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); statistic=mean; } if (LocaleNCompare(symbol,"median",6) == 0) { double median; (void) GetImageMedian(image,&median,exception); statistic=median; } if (LocaleNCompare(symbol,"minima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); statistic=minima; } if (LocaleNCompare(symbol,"skewness",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); statistic=skewness; } if (LocaleNCompare(symbol,"standard_deviation",18) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); statistic=standard_deviation; } if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); if (SetFxSymbolValue(fx_info,key,statistic) == MagickFalse) return(0.0); return(QuantumScale*statistic); } static double FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t, const ssize_t,const char *,const size_t,double *,ExceptionInfo *); static inline MagickBooleanType IsFxFunction(const char *expression, const char *name,const size_t length) { int c; size_t i; for (i=0; i <= length; i++) if (expression[i] == '\0') return(MagickFalse); c=expression[length]; if ((LocaleNCompare(expression,name,length) == 0) && ((isspace((int) ((unsigned char) c)) == 0) || (c == '('))) return(MagickTrue); return(MagickFalse); } static inline double FxGCD(const double alpha,const double beta) { if (alpha < beta) return(FxGCD(beta,alpha)); if (fabs(beta) < 0.001) return(alpha); return(FxGCD(beta,alpha-beta*floor(alpha/beta))); } static inline const char *FxSubexpression(const char *expression, ExceptionInfo *exception) { const char *subexpression; ssize_t level; level=0; subexpression=expression; while ((*subexpression != '\0') && ((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL))) { if (strchr("(",(int) *subexpression) != (char *) NULL) level++; else if (strchr(")",(int) *subexpression) != (char *) NULL) level--; subexpression++; } if (*subexpression == '\0') (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnbalancedParenthesis","`%s'",expression); return(subexpression); } static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel, const ssize_t x,const ssize_t y,const char *expression,const size_t depth, ExceptionInfo *exception) { char *q, symbol[MagickPathExtent]; const char *artifact, *p; const double *value; double alpha, beta; Image *image; MagickBooleanType status; PixelInfo pixel; PointInfo point; ssize_t i; size_t level; p=expression; i=GetImageIndexInList(fx_info->images); level=0; point.x=(double) x; point.y=(double) y; if (isalpha((int) ((unsigned char) *(p+1))) == 0) { char *subexpression; subexpression=AcquireString(expression); if (strchr("suv",(int) *p) != (char *) NULL) { switch (*p) { case 's': default: { i=GetImageIndexInList(fx_info->images); break; } case 'u': i=0; break; case 'v': i=1; break; } p++; if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); i=(ssize_t) alpha; if (*p != '\0') p++; } if (*p == '.') p++; } if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0)) { p++; if (*p == '{') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '{') level++; else if (*p == '}') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x=alpha; point.y=beta; if (*p != '\0') p++; } else if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x+=alpha; point.y+=beta; if (*p != '\0') p++; } if (*p == '.') p++; } subexpression=DestroyString(subexpression); } image=GetImageFromList(fx_info->images,i); if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "NoSuchImage","`%s'",expression); return(0.0); } i=GetImageIndexInList(image); GetPixelInfo(image,&pixel); status=InterpolatePixelInfo(image,fx_info->view[i],image->interpolate, point.x,point.y,&pixel,exception); (void) status; if ((*p != '\0') && (*(p+1) != '\0') && (*(p+2) != '\0') && (LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) && (LocaleCompare(p,"lightness") != 0)) { char name[MagickPathExtent]; size_t length; (void) CopyMagickString(name,p,MagickPathExtent); length=strlen(name); for (q=name+length-1; q > name; q--) { if (*q == ')') break; if (*q == '.') { *q='\0'; break; } } q=name; if ((*q != '\0') && (*(q+1) != '\0') && (*(q+2) != '\0') && (GetFxSymbolValue(fx_info,name) == (const double *) NULL)) { PixelInfo *color; color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name); if (color != (PixelInfo *) NULL) { pixel=(*color); p+=length; } else { MagickBooleanType status; status=QueryColorCompliance(name,AllCompliance,&pixel, fx_info->exception); if (status != MagickFalse) { (void) AddValueToSplayTree(fx_info->colors, ConstantString(name),ClonePixelInfo(&pixel)); p+=length; } } } } (void) CopyMagickString(symbol,p,MagickPathExtent); (void) StripMagickString(symbol); if (*symbol == '\0') { switch (channel) { case RedPixelChannel: return(QuantumScale*pixel.red); case GreenPixelChannel: return(QuantumScale*pixel.green); case BluePixelChannel: return(QuantumScale*pixel.blue); case BlackPixelChannel: { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), ImageError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } case AlphaPixelChannel: { if (pixel.alpha_trait == UndefinedPixelTrait) return(1.0); alpha=(double) (QuantumScale*pixel.alpha); return(alpha); } case CompositePixelChannel: { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } case IndexPixelChannel: return(0.0); default: break; } (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",p); return(0.0); } switch (*symbol) { case 'A': case 'a': { if (LocaleCompare(symbol,"a") == 0) return((QuantumScale*pixel.alpha)); break; } case 'B': case 'b': { if (LocaleCompare(symbol,"b") == 0) return(QuantumScale*pixel.blue); break; } case 'C': case 'c': { if (IsFxFunction(symbol,"channel",7) != MagickFalse) { GeometryInfo channel_info; MagickStatusType flags; flags=ParseGeometry(symbol+7,&channel_info); if (image->colorspace == CMYKColorspace) switch (channel) { case CyanPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case MagentaPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case YellowPixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case AlphaPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } switch (channel) { case RedPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case GreenPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case BluePixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } case AlphaPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } default: return(0.0); } } if (LocaleCompare(symbol,"c") == 0) return(QuantumScale*pixel.red); break; } case 'D': case 'd': { if (LocaleNCompare(symbol,"depth",5) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'E': case 'e': { if (LocaleCompare(symbol,"extent") == 0) { if (image->extent != 0) return((double) image->extent); return((double) GetBlobSize(image)); } break; } case 'G': case 'g': { if (LocaleCompare(symbol,"g") == 0) return(QuantumScale*pixel.green); break; } case 'K': case 'k': { if (LocaleNCompare(symbol,"kurtosis",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"k") == 0) { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } break; } case 'H': case 'h': { if (LocaleCompare(symbol,"h") == 0) return((double) image->rows); if (LocaleCompare(symbol,"hue") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(hue); } break; } case 'I': case 'i': { if ((LocaleCompare(symbol,"image.depth") == 0) || (LocaleCompare(symbol,"image.minima") == 0) || (LocaleCompare(symbol,"image.maxima") == 0) || (LocaleCompare(symbol,"image.mean") == 0) || (LocaleCompare(symbol,"image.kurtosis") == 0) || (LocaleCompare(symbol,"image.skewness") == 0) || (LocaleCompare(symbol,"image.standard_deviation") == 0)) return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception)); if (LocaleCompare(symbol,"image.resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"image.resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"intensity") == 0) { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } if (LocaleCompare(symbol,"i") == 0) return((double) x); break; } case 'J': case 'j': { if (LocaleCompare(symbol,"j") == 0) return((double) y); break; } case 'L': case 'l': { if (LocaleCompare(symbol,"lightness") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(lightness); } if (LocaleCompare(symbol,"luma") == 0) { double luma; luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luma); } if (LocaleCompare(symbol,"luminance") == 0) { double luminence; luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luminence); } break; } case 'M': case 'm': { if (LocaleNCompare(symbol,"maxima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"mean",4) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"median",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"minima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"m") == 0) return(QuantumScale*pixel.green); break; } case 'N': case 'n': { if (LocaleCompare(symbol,"n") == 0) return((double) GetImageListLength(fx_info->images)); break; } case 'O': case 'o': { if (LocaleCompare(symbol,"o") == 0) return(QuantumScale*pixel.alpha); break; } case 'P': case 'p': { if (LocaleCompare(symbol,"page.height") == 0) return((double) image->page.height); if (LocaleCompare(symbol,"page.width") == 0) return((double) image->page.width); if (LocaleCompare(symbol,"page.x") == 0) return((double) image->page.x); if (LocaleCompare(symbol,"page.y") == 0) return((double) image->page.y); if (LocaleCompare(symbol,"printsize.x") == 0) return(PerceptibleReciprocal(image->resolution.x)*image->columns); if (LocaleCompare(symbol,"printsize.y") == 0) return(PerceptibleReciprocal(image->resolution.y)*image->rows); break; } case 'Q': case 'q': { if (LocaleCompare(symbol,"quality") == 0) return((double) image->quality); break; } case 'R': case 'r': { if (LocaleCompare(symbol,"resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"r") == 0) return(QuantumScale*pixel.red); break; } case 'S': case 's': { if (LocaleCompare(symbol,"saturation") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(saturation); } if (LocaleNCompare(symbol,"skewness",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"standard_deviation",18) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'T': case 't': { if (LocaleCompare(symbol,"t") == 0) return((double) GetImageIndexInList(fx_info->images)); break; } case 'W': case 'w': { if (LocaleCompare(symbol,"w") == 0) return((double) image->columns); break; } case 'Y': case 'y': { if (LocaleCompare(symbol,"y") == 0) return(QuantumScale*pixel.blue); break; } case 'Z': case 'z': { if (LocaleCompare(symbol,"z") == 0) return((double) GetImageDepth(image,fx_info->exception)); break; } default: break; } value=GetFxSymbolValue(fx_info,symbol); if (value != (const double *) NULL) return(*value); artifact=GetImageArtifact(image,symbol); if (artifact != (const char *) NULL) return(StringToDouble(artifact,(char **) NULL)); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UndefinedVariable","`%s'",symbol); (void) SetFxSymbolValue(fx_info,symbol,0.0); return(0.0); } static const char *FxOperatorPrecedence(const char *expression, ExceptionInfo *exception) { typedef enum { UndefinedPrecedence, NullPrecedence, BitwiseComplementPrecedence, ExponentPrecedence, ExponentialNotationPrecedence, MultiplyPrecedence, AdditionPrecedence, ShiftPrecedence, RelationalPrecedence, EquivalencyPrecedence, BitwiseAndPrecedence, BitwiseOrPrecedence, LogicalAndPrecedence, LogicalOrPrecedence, TernaryPrecedence, AssignmentPrecedence, CommaPrecedence, SeparatorPrecedence } FxPrecedence; FxPrecedence precedence, target; const char *subexpression; int c; size_t level; c=(-1); level=0; subexpression=(const char *) NULL; target=NullPrecedence; while ((c != '\0') && (*expression != '\0')) { precedence=UndefinedPrecedence; if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@')) { expression++; continue; } switch (*expression) { case 'A': case 'a': { #if defined(MAGICKCORE_HAVE_ACOSH) if (IsFxFunction(expression,"acosh",5) != MagickFalse) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (IsFxFunction(expression,"asinh",5) != MagickFalse) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ATANH) if (IsFxFunction(expression,"atanh",5) != MagickFalse) { expression+=5; break; } #endif if (IsFxFunction(expression,"atan2",5) != MagickFalse) { expression+=5; break; } break; } case 'E': case 'e': { if ((isdigit((int) ((unsigned char) c)) != 0) && ((LocaleNCompare(expression,"E+",2) == 0) || (LocaleNCompare(expression,"E-",2) == 0))) { expression+=2; /* scientific notation */ break; } } case 'J': case 'j': { if ((IsFxFunction(expression,"j0",2) != MagickFalse) || (IsFxFunction(expression,"j1",2) != MagickFalse)) { expression+=2; break; } break; } case '#': { while (isxdigit((int) ((unsigned char) *(expression+1))) != 0) expression++; break; } default: break; } if ((c == (int) '{') || (c == (int) '[')) level++; else if ((c == (int) '}') || (c == (int) ']')) level--; if (level == 0) switch ((unsigned char) *expression) { case '~': case '!': { precedence=BitwiseComplementPrecedence; break; } case '^': case '@': { precedence=ExponentPrecedence; break; } default: { if (((c != 0) && ((isdigit((int) ((unsigned char) c)) != 0) || (strchr(")",c) != (char *) NULL))) && (((islower((int) ((unsigned char) *expression)) != 0) || (strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) || ((isdigit((int) ((unsigned char) c)) == 0) && (isdigit((int) ((unsigned char) *expression)) != 0))) && (strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL)) precedence=MultiplyPrecedence; break; } case '*': case '/': case '%': { precedence=MultiplyPrecedence; break; } case '+': case '-': { if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) || (isalpha((int) ((unsigned char) c)) != 0)) precedence=AdditionPrecedence; break; } case BitwiseAndAssignmentOperator: case BitwiseOrAssignmentOperator: case LeftShiftAssignmentOperator: case RightShiftAssignmentOperator: case PowerAssignmentOperator: case ModuloAssignmentOperator: case PlusAssignmentOperator: case SubtractAssignmentOperator: case MultiplyAssignmentOperator: case DivideAssignmentOperator: case IncrementAssignmentOperator: case DecrementAssignmentOperator: { precedence=AssignmentPrecedence; break; } case LeftShiftOperator: case RightShiftOperator: { precedence=ShiftPrecedence; break; } case '<': case LessThanEqualOperator: case GreaterThanEqualOperator: case '>': { precedence=RelationalPrecedence; break; } case EqualOperator: case NotEqualOperator: { precedence=EquivalencyPrecedence; break; } case '&': { precedence=BitwiseAndPrecedence; break; } case '|': { precedence=BitwiseOrPrecedence; break; } case LogicalAndOperator: { precedence=LogicalAndPrecedence; break; } case LogicalOrOperator: { precedence=LogicalOrPrecedence; break; } case ExponentialNotation: { precedence=ExponentialNotationPrecedence; break; } case ':': case '?': { precedence=TernaryPrecedence; break; } case '=': { precedence=AssignmentPrecedence; break; } case ',': { precedence=CommaPrecedence; break; } case ';': { precedence=SeparatorPrecedence; break; } } if ((precedence == BitwiseComplementPrecedence) || (precedence == TernaryPrecedence) || (precedence == AssignmentPrecedence)) { if (precedence > target) { /* Right-to-left associativity. */ target=precedence; subexpression=expression; } } else if (precedence >= target) { /* Left-to-right associativity. */ target=precedence; subexpression=expression; } if (strchr("(",(int) *expression) != (char *) NULL) expression=FxSubexpression(expression,exception); c=(int) (*expression++); } return(subexpression); } static double FxEvaluateSubexpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, const char *expression,const size_t depth,double *beta, ExceptionInfo *exception) { #define FxMaxParenthesisDepth 58 #define FxMaxSubexpressionDepth 200 #define FxReturn(value) \ { \ subexpression=DestroyString(subexpression); \ return(value); \ } #define FxParseConditional(subexpression,sentinal,p,q) \ { \ p=subexpression; \ for (q=(char *) p; (*q != (sentinal)) && (*q != '\0'); q++) \ if (*q == '(') \ { \ for (q++; (*q != ')') && (*q != '\0'); q++); \ if (*q == '\0') \ break; \ } \ if (*q == '\0') \ { \ (void) ThrowMagickException(exception,GetMagickModule(), \ OptionError,"UnableToParseExpression","`%s'",subexpression); \ FxReturn(0.0); \ } \ if (strlen(q) == 1) \ *(q+1)='\0'; \ *q='\0'; \ } char *q, *subexpression; double alpha, gamma, sans, value; const char *p; *beta=0.0; sans=0.0; subexpression=AcquireString(expression); *subexpression='\0'; if (depth > FxMaxSubexpressionDepth) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",expression); FxReturn(0.0); } if (exception->severity >= ErrorException) FxReturn(0.0); while (isspace((int) ((unsigned char) *expression)) != 0) expression++; if (*expression == '\0') FxReturn(0.0); p=FxOperatorPrecedence(expression,exception); if (p != (const char *) NULL) { (void) CopyMagickString(subexpression,expression,(size_t) (p-expression+1)); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); switch ((unsigned char) *p) { case '~': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) (~(size_t) *beta); FxReturn(*beta); } case '!': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta == 0.0 ? 1.0 : 0.0); } case '^': { *beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p, depth+1,beta,exception)); FxReturn(*beta); } case '*': case ExponentialNotation: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha*(*beta)); } case '/': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(PerceptibleReciprocal(*beta)*alpha); } case '%': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fmod(alpha,*beta)); } case '+': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha+(*beta)); } case '-': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha-(*beta)); } case BitwiseAndAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(double) ((size_t) (alpha+0.5) & (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case BitwiseOrAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(double) ((size_t) (alpha+0.5) | (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case LeftShiftAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (*beta+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } value=(double) ((size_t) (alpha+0.5) << (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case RightShiftAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (*beta+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } value=(double) ((size_t) (alpha+0.5) >> (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case PowerAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=pow(alpha,*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case ModuloAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=fmod(alpha,*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case PlusAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha+(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case SubtractAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha-(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case MultiplyAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha*(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case DivideAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha*PerceptibleReciprocal(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case IncrementAssignmentOperator: { if (*subexpression == '\0') alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha+1.0; if (*subexpression == '\0') { if (SetFxSymbolValue(fx_info,p,value) == MagickFalse) return(0.0); } else if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case DecrementAssignmentOperator: { if (*subexpression == '\0') alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha-1.0; if (*subexpression == '\0') { if (SetFxSymbolValue(fx_info,p,value) == MagickFalse) return(0.0); } else if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case LeftShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5)); FxReturn(*beta); } case RightShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5)); FxReturn(*beta); } case '<': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha < *beta ? 1.0 : 0.0); } case LessThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha <= *beta ? 1.0 : 0.0); } case '>': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha > *beta ? 1.0 : 0.0); } case GreaterThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha >= *beta ? 1.0 : 0.0); } case EqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0); } case NotEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0); } case '&': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5)); FxReturn(*beta); } case '|': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5)); FxReturn(*beta); } case LogicalAndOperator: { p++; if (alpha <= 0.0) { *beta=0.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case LogicalOrOperator: { p++; if (alpha > 0.0) { *beta=1.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case '?': { (void) CopyMagickString(subexpression,++p,MagickPathExtent-1); FxParseConditional(subexpression,':',p,q); if (fabs(alpha) >= MagickEpsilon) gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); else gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); FxReturn(gamma); } case '=': { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case ',': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha); } case ';': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta); } default: { gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1, beta,exception); FxReturn(gamma); } } } if (strchr("(",(int) *expression) != (char *) NULL) { size_t length; if (depth >= FxMaxParenthesisDepth) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "ParenthesisNestedTooDeeply","`%s'",expression); length=CopyMagickString(subexpression,expression+1,MagickPathExtent); if (length != 0) subexpression[length-1]='\0'; gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); FxReturn(gamma); } switch (*expression) { case '+': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(1.0*gamma); } case '-': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(-1.0*gamma); } case '~': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn((double) (~(size_t) (gamma+0.5))); } case 'A': case 'a': { if (IsFxFunction(expression,"abs",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(fabs(alpha)); } #if defined(MAGICKCORE_HAVE_ACOSH) if (IsFxFunction(expression,"acosh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(acosh(alpha)); } #endif if (IsFxFunction(expression,"acos",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(acos(alpha)); } #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"airy",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha); FxReturn(gamma*gamma); } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (IsFxFunction(expression,"asinh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(asinh(alpha)); } #endif if (IsFxFunction(expression,"asin",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(asin(alpha)); } if (IsFxFunction(expression,"alt",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0); } if (IsFxFunction(expression,"atan2",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atan2(alpha,*beta)); } #if defined(MAGICKCORE_HAVE_ATANH) if (IsFxFunction(expression,"atanh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atanh(alpha)); } #endif if (IsFxFunction(expression,"atan",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(atan(alpha)); } if (LocaleCompare(expression,"a") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'B': case 'b': { if (LocaleCompare(expression,"b") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'C': case 'c': { if (IsFxFunction(expression,"ceil",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(ceil(alpha)); } if (IsFxFunction(expression,"clamp",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha < 0.0) FxReturn(0.0); if (alpha > 1.0) FxReturn(1.0); FxReturn(alpha); } if (IsFxFunction(expression,"cosh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(cosh(alpha)); } if (IsFxFunction(expression,"cos",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(cos(alpha)); } if (LocaleCompare(expression,"c") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'D': case 'd': { if (IsFxFunction(expression,"debug",5) != MagickFalse) { const char *type; size_t length; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); switch (fx_info->images->colorspace) { case CMYKColorspace: { switch (channel) { case CyanPixelChannel: type="cyan"; break; case MagentaPixelChannel: type="magenta"; break; case YellowPixelChannel: type="yellow"; break; case AlphaPixelChannel: type="alpha"; break; case BlackPixelChannel: type="black"; break; default: type="unknown"; break; } break; } case GRAYColorspace: { switch (channel) { case RedPixelChannel: type="gray"; break; case AlphaPixelChannel: type="alpha"; break; default: type="unknown"; break; } break; } default: { switch (channel) { case RedPixelChannel: type="red"; break; case GreenPixelChannel: type="green"; break; case BluePixelChannel: type="blue"; break; case AlphaPixelChannel: type="alpha"; break; default: type="unknown"; break; } break; } } *subexpression='\0'; length=1; if (strlen(expression) > 6) length=CopyMagickString(subexpression,expression+6, MagickPathExtent); if (length != 0) subexpression[length-1]='\0'; if (fx_info->file != (FILE *) NULL) (void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: " "%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type, subexpression,GetMagickPrecision(),alpha); FxReturn(alpha); } if (IsFxFunction(expression,"do",2) != MagickFalse) { size_t length; /* Parse do(expression,condition test). */ length=CopyMagickString(subexpression,expression+3, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); for (alpha=0.0; ; ) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); if (fabs(gamma) < MagickEpsilon) break; } FxReturn(alpha); } if (IsFxFunction(expression,"drc",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((alpha/(*beta*(alpha-1.0)+1.0))); } break; } case 'E': case 'e': { if (LocaleCompare(expression,"epsilon") == 0) FxReturn(MagickEpsilon); #if defined(MAGICKCORE_HAVE_ERF) if (IsFxFunction(expression,"erf",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(erf(alpha)); } #endif if (IsFxFunction(expression,"exp",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(exp(alpha)); } if (LocaleCompare(expression,"e") == 0) FxReturn(2.7182818284590452354); break; } case 'F': case 'f': { if (IsFxFunction(expression,"floor",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(floor(alpha)); } if (IsFxFunction(expression,"for",3) != MagickFalse) { double sans = 0.0; size_t length; /* Parse for(initialization, condition test, expression). */ length=CopyMagickString(subexpression,expression+4, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); (void) CopyMagickString(subexpression,q+1,MagickPathExtent-1); FxParseConditional(subexpression,',',p,q); for (alpha=0.0; ; ) { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); if (fabs(gamma) < MagickEpsilon) break; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); } FxReturn(alpha); } break; } case 'G': case 'g': { if (IsFxFunction(expression,"gauss",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI)); } if (IsFxFunction(expression,"gcd",3) != MagickFalse) { double gcd; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); if (IsNaN(alpha)) FxReturn(alpha); gcd=FxGCD(alpha,*beta); FxReturn(gcd); } if (LocaleCompare(expression,"g") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'H': case 'h': { if (LocaleCompare(expression,"h") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (LocaleCompare(expression,"hue") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"hypot",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(hypot(alpha,*beta)); } break; } case 'K': case 'k': { if (LocaleCompare(expression,"k") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'I': case 'i': { if (IsFxFunction(expression,"if",2) != MagickFalse) { double sans = 0.0; size_t length; /* Parse if(condition test, true-expression, false-expression). */ length=CopyMagickString(subexpression,expression+3, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); (void) CopyMagickString(subexpression,q+1,MagickPathExtent-1); FxParseConditional(subexpression,',',p,q); if (fabs(alpha) >= MagickEpsilon) alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); else alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); FxReturn(alpha); } if (LocaleCompare(expression,"intensity") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"int",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(floor(alpha)); } if (IsFxFunction(expression,"isnan",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn((double) !!IsNaN(alpha)); } if (LocaleCompare(expression,"i") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'J': case 'j': { if (LocaleCompare(expression,"j") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); #if defined(MAGICKCORE_HAVE_J0) if (IsFxFunction(expression,"j0",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j0(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"j1",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j1(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"jinc",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); FxReturn((2.0*j1((MagickPI*alpha))/(MagickPI*alpha))); } #endif break; } case 'L': case 'l': { if (IsFxFunction(expression,"ln",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(log(alpha)); } if (IsFxFunction(expression,"logtwo",6) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn(log10(alpha)/log10(2.0)); } if (IsFxFunction(expression,"log",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(log10(alpha)); } if (LocaleCompare(expression,"lightness") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'M': case 'm': { if (LocaleCompare(expression,"MaxRGB") == 0) FxReturn(QuantumRange); if (LocaleNCompare(expression,"maxima",6) == 0) break; if (IsFxFunction(expression,"max",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha > *beta ? alpha : *beta); } if (LocaleNCompare(expression,"minima",6) == 0) break; if (IsFxFunction(expression,"min",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha < *beta ? alpha : *beta); } if (IsFxFunction(expression,"mod",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta)); } if (LocaleCompare(expression,"m") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'N': case 'n': { if (IsFxFunction(expression,"not",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((double) (alpha < MagickEpsilon)); } if (LocaleCompare(expression,"n") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'O': case 'o': { if (LocaleCompare(expression,"Opaque") == 0) FxReturn(1.0); if (LocaleCompare(expression,"o") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'P': case 'p': { if (LocaleCompare(expression,"phi") == 0) FxReturn(MagickPHI); if (LocaleCompare(expression,"pi") == 0) FxReturn(MagickPI); if (IsFxFunction(expression,"pow",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(pow(alpha,*beta)); } if (LocaleCompare(expression,"p") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Q': case 'q': { if (LocaleCompare(expression,"QuantumRange") == 0) FxReturn(QuantumRange); if (LocaleCompare(expression,"QuantumScale") == 0) FxReturn(QuantumScale); break; } case 'R': case 'r': { if (IsFxFunction(expression,"rand",4) != MagickFalse) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxEvaluateSubexpression) #endif alpha=GetPseudoRandomValue(fx_info->random_info); FxReturn(alpha); } if (IsFxFunction(expression,"round",5) != MagickFalse) { /* Round the fraction to nearest integer. */ alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if ((alpha-floor(alpha)) < (ceil(alpha)-alpha)) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression,"r") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'S': case 's': { if (LocaleCompare(expression,"saturation") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"sign",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(alpha < 0.0 ? -1.0 : 1.0); } if (IsFxFunction(expression,"sinc",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0) FxReturn(1.0); FxReturn(sin((MagickPI*alpha))/(MagickPI*alpha)); } if (IsFxFunction(expression,"sinh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sinh(alpha)); } if (IsFxFunction(expression,"sin",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(sin(alpha)); } if (IsFxFunction(expression,"sqrt",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sqrt(alpha)); } if (IsFxFunction(expression,"squish",6) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn((1.0/(1.0+exp(-alpha)))); } if (LocaleCompare(expression,"s") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'T': case 't': { if (IsFxFunction(expression,"tanh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(tanh(alpha)); } if (IsFxFunction(expression,"tan",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(tan(alpha)); } if (LocaleCompare(expression,"Transparent") == 0) FxReturn(0.0); if (IsFxFunction(expression,"trunc",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha >= 0.0) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression,"t") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'U': case 'u': { if (LocaleCompare(expression,"u") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'V': case 'v': { if (LocaleCompare(expression,"v") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'W': case 'w': { if (IsFxFunction(expression,"while",5) != MagickFalse) { size_t length; /* Parse while(condition test, expression). */ length=CopyMagickString(subexpression,expression+6, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); for (alpha=0.0; ; ) { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); if (fabs(gamma) < MagickEpsilon) break; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1, beta,exception); } FxReturn(alpha); } if (LocaleCompare(expression,"w") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Y': case 'y': { if (LocaleCompare(expression,"y") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Z': case 'z': { if (LocaleCompare(expression,"z") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } default: break; } subexpression=DestroyString(subexpression); q=(char *) expression; alpha=InterpretSiPrefixValue(expression,&q); if (q == expression) alpha=FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception); FxReturn(alpha); } MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { MagickBooleanType status; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); return(status); } MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { FILE *file; MagickBooleanType status; file=fx_info->file; fx_info->file=(FILE *) NULL; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); fx_info->file=file; return(status); } MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, double *alpha,ExceptionInfo *exception) { double beta; beta=0.0; *alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0, &beta,exception); return(exception->severity == OptionError ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxImage() applies a mathematical expression to the specified image. % % The format of the FxImage method is: % % Image *FxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o expression: A mathematical expression. % % o exception: return any errors or warnings in this structure. % */ static FxInfo **DestroyFxThreadSet(FxInfo **fx_info) { ssize_t i; assert(fx_info != (FxInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (fx_info[i] != (FxInfo *) NULL) fx_info[i]=DestroyFxInfo(fx_info[i]); fx_info=(FxInfo **) RelinquishMagickMemory(fx_info); return(fx_info); } static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression, ExceptionInfo *exception) { char *fx_expression; double alpha; FxInfo **fx_info; ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info)); if (fx_info == (FxInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return((FxInfo **) NULL); } (void) memset(fx_info,0,number_threads*sizeof(*fx_info)); if (*expression != '@') fx_expression=ConstantString(expression); else fx_expression=FileToString(expression+1,~0UL,exception); for (i=0; i < (ssize_t) number_threads; i++) { MagickBooleanType status; fx_info[i]=AcquireFxInfo(image,fx_expression,exception); if (fx_info[i] == (FxInfo *) NULL) break; status=FxPreprocessExpression(fx_info[i],&alpha,exception); if (status == MagickFalse) break; } fx_expression=DestroyString(fx_expression); if (i < (ssize_t) number_threads) fx_info=DestroyFxThreadSet(fx_info); return(fx_info); } MagickExport Image *FxImage(const Image *image,const char *expression, ExceptionInfo *exception) { #define FxImageTag "Fx/Image" CacheView *fx_view, *image_view; FxInfo **magick_restrict fx_info; Image *fx_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (expression == (const char *) NULL) return(CloneImage(image,0,0,MagickTrue,exception)); fx_info=AcquireFxThreadSet(image,expression,exception); if (fx_info == (FxInfo **) NULL) return((Image *) NULL); fx_image=CloneImage(image,0,0,MagickTrue,exception); if (fx_image == (Image *) NULL) { fx_info=DestroyFxThreadSet(fx_info); return((Image *) NULL); } if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse) { fx_info=DestroyFxThreadSet(fx_info); fx_image=DestroyImage(fx_image); return((Image *) NULL); } /* Fx image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); fx_view=AcquireAuthenticCacheView(fx_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(progress,status) \ magick_number_threads(image,fx_image,fx_image->rows, \ GlobExpression(fx_info[0]->expression,"debug(",MagickTrue) == 0 ? 1 : 0) #endif for (y=0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) fx_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel); if ((traits == UndefinedPixelTrait) || (fx_traits == UndefinedPixelTrait)) continue; if ((fx_traits & CopyPixelTrait) != 0) { SetPixelChannel(fx_image,channel,p[i],q); continue; } alpha=0.0; (void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha, exception); q[i]=ClampToQuantum(QuantumRange*alpha); } p+=GetPixelChannels(image); q+=GetPixelChannels(fx_image); } if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FxImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } fx_view=DestroyCacheView(fx_view); image_view=DestroyCacheView(image_view); fx_info=DestroyFxThreadSet(fx_info); if (status == MagickFalse) fx_image=DestroyImage(fx_image); return(fx_image); }
task4_4096_omp.c
#include <math.h> #include <string.h> #include "timer.h" #define NN 4096 #define NM 4096 float A[NN][NM]; float Anew[NN][NM]; int main(int argc, char** argv) { const int n = NN; const int m = NM; const int iter_max = 1000; const double tol = 1.0e-6; double error = 1.0; memset(A, 0, n * m * sizeof(float)); memset(Anew, 0, n * m * sizeof(float)); for (int j = 0; j < n; j++) { A[j][0] = 1.0; Anew[j][0] = 1.0; } printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m); StartTimer(); int iter = 0; while ( error > tol && iter < iter_max ) { error = 0.0; #pragma omp parallel for reduction(max:error) for( int j = 1; j < n-1; j++) { for( int i = 1; i < m-1; i++ ) { Anew[j][i] = 0.25 * ( A[j][i+1] + A[j][i-1] + A[j-1][i] + A[j+1][i]); error = fmax( error, fabs(Anew[j][i] - A[j][i])); } } #pragma omp parallel for for( int j = 1; j < n-1; j++) { for( int i = 1; i < m-1; i++ ) { A[j][i] = Anew[j][i]; } } if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error); iter++; } double runtime = GetTimer(); printf(" total: %f s\n", runtime / 1000); return 0; }
clauses-3.c
struct T { int a; int *b; }; struct S { int *s; char u; struct T v; long x; }; void bar (int *); #pragma omp declare target to (bar) int main () { int a[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; struct S s = { a, 5, { 6, a + 5 }, 99L }; #pragma omp target map (s.v.a, s.u, s.x) ; #pragma omp target map (s.v.a, s.u, s.x) bar (&s.v.a); #pragma omp target map (s.v.a) map (always, to: s.u) map (s.x) ; #pragma omp target map (s.s[0]) map (s.v.b[:3]) ; #pragma omp target map (s.s[0]) map (s.v.b[:3]) bar (s.s); return 0; }
avx2.h
#include <immintrin.h> #include "../../context.h" #include "../../lsc.h" #include "../../parallel.h" inline __m256 _mm256_set_ps1(float v) { return _mm256_set_ps(v, v, v, v, v, v, v, v); } inline void get_assignment_value_vec( const Cluster* cluster, const uint8_t* img_quad_row, const uint16_t* spatial_dist_patch_row, const uint16_t* min_dist_row, const uint16_t* assignment_row, __m128i cluster_number_vec, __m256i cluster_color_vec, __m128i order_swap_mask, __m128i& new_min_dist__narrow, __m128i& new_assignment__narrow ) { // image_segment: 16 elements of uint16_t // cluster_color_vec: cluster_R cluster_G cluster_B 0 cluster_R cluster_G cluster_B 0 .... // spatial_dist_vec: 8 elements of uint16_t (128-bit narrow vector) // color_dist_vec, dist_vec: 8 elements of uint16_t (128-bit narrow-vectors) __m128i spatial_dist_vec = _mm_load_si128((__m128i *)spatial_dist_patch_row); __m256i image_segment = _mm256_cvtepu8_epi16(_mm_loadu_si128((__m128i*)img_quad_row)); __m256i image_segment_2 = _mm256_cvtepu8_epi16(_mm_loadu_si128((__m128i*)(img_quad_row + 16))); // [R1, G1, B1, A1, R2, G2, B2, A2, R3, G3, B3, A3, R3, G3, B3, A3] __m256i abd_segment = _mm256_abs_epi16(_mm256_subs_epi16(image_segment, cluster_color_vec)); // [R5, G5, B5, A5, R6, G6, B6, A6, R7, G7, B7, A7, R8, G8, B8, A8] __m256i abd_segment_2 = _mm256_abs_epi16(_mm256_subs_epi16(image_segment_2, cluster_color_vec)); // [ // R1 + G1, // B1 + A1, // R2 + G2, // B2 + A2, // // R5 + G5, // B5 + A5, // R6 + G6, // B6 + A6, // R3 + G3, // B3 + A3, // R4 + G4, // B4 + A4, // R7 + G7, // B7 + A7, // R8 + G8, // B8 + A8, // // ] __m256i pkd_hpair = _mm256_hadds_epi16(abd_segment, abd_segment_2); // [ // R1 + G1 + B1 + A1, (16-bit) // R2 + G2 + B2 + A2, // R5 + G5 + B5 + A5, // R6 + G6 + B6 + A6, // R3 + G3 + B3 + A3, // R4 + G4 + B4 + A4, // R7 + G7 + B7 + A7, // R8 + G8 + B8 + A8, // ] __m128i pkd_qpair = _mm_hadds_epi16( _mm256_extracti128_si256(pkd_hpair, 0), _mm256_extracti128_si256(pkd_hpair, 1) ); // [0 (32-bit), 1, 2, 3] -> [0, 2, 1, 3] __m128i color_dist_vec = _mm_castps_si128( _mm_permutevar_ps(_mm_castsi128_ps(pkd_qpair), order_swap_mask) ); __m128i dist_vec = _mm_adds_epu16(color_dist_vec, spatial_dist_vec); __m128i old_assignment__narrow = _mm_loadu_si128((__m128i *)assignment_row); __m128i old_min_dist__narrow = _mm_loadu_si128((__m128i *)min_dist_row); new_min_dist__narrow = _mm_min_epu16(old_min_dist__narrow, dist_vec); // 0xFFFF if a[i+15:i] == b[i+15:i], 0x0000 otherwise. __m128i mask__narrow = _mm_cmpeq_epi16(old_min_dist__narrow, new_min_dist__narrow); // if mask[i+7:i] == 0xFF, choose b[i+7:i], otherwise choose a[i+7:i] new_assignment__narrow = _mm_blendv_epi8(cluster_number_vec, old_assignment__narrow, mask__narrow); } namespace fslic { class Context_X64_AVX2 : public ContextSIMD { using ContextSIMD::ContextSIMD; virtual void assign_clusters(const Cluster **target_clusters, int size) { __m128i order_swap_mask = _mm_set_epi32(3, 1, 2, 0); // [0, 1, 2, 3] int16_t patch_height = spatial_dist_patch.get_height(); for (int cidx = 0; cidx < size; cidx++) { const Cluster *cluster = target_clusters[cidx]; uint16_t cluster_number = cluster->number; const uint16_t patch_width = spatial_dist_patch.get_width(); const uint16_t patch_width_multiple8 = patch_width & 0xFFF8; const int16_t cluster_y = cluster->y, cluster_x = cluster->x; const int16_t y_lo = cluster_y - S, x_lo = cluster_x - S; // Note: x86-64 is little-endian arch. ABGR order is correct. const uint64_t cluster_color_quad = ((uint64_t)cluster->b << 32) + ((uint64_t)cluster->g << 16) + ((uint64_t)cluster->r); __m256i cluster_color_vec = _mm256_set1_epi64x(cluster_color_quad); // 16 elements uint16_t (among there elements are the first 8 elements used) __m128i cluster_number_vec = _mm_set1_epi16(cluster_number); for (int16_t i = fit_to_stride(y_lo) - y_lo; i < patch_height; i += subsample_stride) { const uint16_t* spatial_dist_patch_base_row = spatial_dist_patch.get_row(i); #ifdef FAST_SLIC_SIMD_INVARIANCE_CHECK assert((long long)spatial_dist_patch_base_row % 32 == 0); #endif // not aligned const uint8_t *img_quad_base_row = quad_image.get_row(y_lo + i, 4 * x_lo); uint16_t* assignment_base_row = assignment.get_row(i + y_lo, x_lo); uint16_t* min_dist_base_row = min_dists.get_row(i + y_lo, x_lo); #define ASSIGNMENT_VALUE_GETTER_BODY \ __m128i new_assignment__narrow, new_min_dist__narrow; \ uint16_t* min_dist_row = min_dist_base_row + j; /* unaligned */ \ uint16_t* assignment_row = assignment_base_row + j; /* unaligned */ \ const uint8_t* img_quad_row = img_quad_base_row + 4 * j; /*Image rows are not aligned due to x_lo*/ \ const uint16_t* spatial_dist_patch_row = (uint16_t *)HINT_ALIGNED_AS(spatial_dist_patch_base_row + j, 16); /* Spatial distance patch is aligned */ \ get_assignment_value_vec( \ cluster, \ img_quad_row, \ spatial_dist_patch_row, \ min_dist_row, \ assignment_row, \ cluster_number_vec, \ cluster_color_vec, \ order_swap_mask, \ new_min_dist__narrow, \ new_assignment__narrow \ ); \ // 32(batch size) / 4(rgba quad) = stride 8 for (int j = 0; j < patch_width_multiple8; j += 8) { ASSIGNMENT_VALUE_GETTER_BODY _mm_storeu_si128((__m128i*)min_dist_row, new_min_dist__narrow); _mm_storeu_si128((__m128i*)assignment_row, new_assignment__narrow); } if (0 < patch_width - patch_width_multiple8) { int j = patch_width_multiple8; int rem = patch_width - patch_width_multiple8; ASSIGNMENT_VALUE_GETTER_BODY uint64_t dist_4, assignment_4; if (rem >= 4) { *(uint64_t *)&min_dist_base_row[j] = _mm_extract_epi64(new_min_dist__narrow, 0); *(uint64_t *)&assignment_base_row[j] = _mm_extract_epi64(new_assignment__narrow, 0); rem -= 4; j += 4; dist_4 = _mm_extract_epi64(new_min_dist__narrow, 1); assignment_4 = _mm_extract_epi64(new_assignment__narrow, 1); } else { dist_4 = _mm_extract_epi64(new_min_dist__narrow, 0); assignment_4 = _mm_extract_epi64(new_assignment__narrow, 0); } switch (rem) { case 3: *(uint32_t *)&min_dist_base_row[j] = (uint32_t)dist_4; *(uint32_t *)&assignment_base_row[j] = (uint32_t)assignment_4; *(uint16_t *)&min_dist_base_row[j + 2] = (uint16_t)(dist_4 >> 32); *(uint16_t *)&assignment_base_row[j + 2] = (uint16_t)(assignment_4 >> 32); break; case 2: *(uint32_t *)&min_dist_base_row[j] = (uint32_t)dist_4; *(uint32_t *)&assignment_base_row[j] = (uint32_t)assignment_4; break; case 1: *(uint16_t *)&min_dist_base_row[j] = (uint16_t)dist_4; *(uint16_t *)&assignment_base_row[j] = (uint16_t)assignment_4; break; } } } } } }; class ContextLSC_X64_AVX2 : public ContextLSC { public: using ContextLSC::ContextLSC; protected: virtual void assign_clusters(const Cluster **target_clusters, int size) { const float* __restrict img_feats[10]; const float* __restrict centroid_feats[10]; for (int i = 0; i < 10; i++) { img_feats[i] = &image_features[i][0]; centroid_feats[i] = &centroid_features[i][0]; } for (int cidx = 0; cidx < size; cidx++) { const Cluster* cluster = target_clusters[cidx]; int cluster_y = cluster->y, cluster_x = cluster->x; uint16_t cluster_no = cluster->number; int y_lo = my_max<int>(cluster_y - S, 0), y_hi = my_min<int>(cluster_y + S + 1, H); int x_lo = my_max<int>(cluster_x - S, 0), x_hi = my_min<int>(cluster_x + S + 1, W); __m128i cluster_number_vec = _mm_set1_epi16(cluster_no); __m256 c_0 = _mm256_set_ps1(centroid_feats[0][cluster_no]); __m256 c_1 = _mm256_set_ps1(centroid_feats[1][cluster_no]); __m256 c_2 = _mm256_set_ps1(centroid_feats[2][cluster_no]); __m256 c_3 = _mm256_set_ps1(centroid_feats[3][cluster_no]); __m256 c_4 = _mm256_set_ps1(centroid_feats[4][cluster_no]); __m256 c_5 = _mm256_set_ps1(centroid_feats[5][cluster_no]); __m256 c_6 = _mm256_set_ps1(centroid_feats[6][cluster_no]); __m256 c_7 = _mm256_set_ps1(centroid_feats[7][cluster_no]); __m256 c_8 = _mm256_set_ps1(centroid_feats[8][cluster_no]); __m256 c_9 = _mm256_set_ps1(centroid_feats[9][cluster_no]); for (int i = y_lo; i < y_hi; i++) { if (!valid_subsample_row(i)) continue; for (int j = x_lo; j < x_hi; j += 8) { float* __restrict min_dist_row = min_dists.get_row(i, j); uint16_t* __restrict assignment_row = assignment.get_row(i, j); int index = W * i + j; __m256 f_0 = _mm256_loadu_ps(&img_feats[0][index]); __m256 d_0 = _mm256_sub_ps(f_0, c_0); __m256 f_1 = _mm256_loadu_ps(&img_feats[1][index]); __m256 d_1 = _mm256_sub_ps(f_1, c_1); __m256 f_2 = _mm256_loadu_ps(&img_feats[2][index]); __m256 d_2 = _mm256_sub_ps(f_2, c_2); __m256 f_3 = _mm256_loadu_ps(&img_feats[3][index]); __m256 d_3 = _mm256_sub_ps(f_3, c_3); __m256 f_4 = _mm256_loadu_ps(&img_feats[4][index]); __m256 d_4 = _mm256_sub_ps(f_4, c_4); __m256 f_5 = _mm256_loadu_ps(&img_feats[5][index]); __m256 d_5 = _mm256_sub_ps(f_5, c_5); __m256 f_6 = _mm256_loadu_ps(&img_feats[6][index]); __m256 d_6 = _mm256_sub_ps(f_6, c_6); __m256 f_7 = _mm256_loadu_ps(&img_feats[7][index]); __m256 d_7 = _mm256_sub_ps(f_7, c_7); __m256 f_8 = _mm256_loadu_ps(&img_feats[8][index]); __m256 d_8 = _mm256_sub_ps(f_8, c_8); __m256 f_9 = _mm256_loadu_ps(&img_feats[9][index]); __m256 d_9 = _mm256_sub_ps(f_9, c_9); __m256 dist_vec = _mm256_mul_ps(d_0, d_0); dist_vec = _mm256_fmadd_ps(d_1, d_1, dist_vec); dist_vec = _mm256_fmadd_ps(d_2, d_2, dist_vec); dist_vec = _mm256_fmadd_ps(d_3, d_3, dist_vec); dist_vec = _mm256_fmadd_ps(d_4, d_4, dist_vec); dist_vec = _mm256_fmadd_ps(d_5, d_5, dist_vec); dist_vec = _mm256_fmadd_ps(d_6, d_6, dist_vec); dist_vec = _mm256_fmadd_ps(d_7, d_7, dist_vec); dist_vec = _mm256_fmadd_ps(d_8, d_8, dist_vec); dist_vec = _mm256_fmadd_ps(d_9, d_9, dist_vec); __m128i old_assignment = _mm_loadu_si128((__m128i *)assignment_row); __m256 old_min_dist = _mm256_loadu_ps(min_dist_row); __m256 new_min_dist = _mm256_min_ps(dist_vec, old_min_dist); // 0xFFFFFFFF if a[i+32:i] == b[i+32:i], 0x0000 otherwise. __m256i mask = _mm256_castps_si256(_mm256_cmp_ps(dist_vec, old_min_dist, _CMP_LT_OS)); mask = _mm256_srli_epi32(mask, 16); __m128i mask__narrow = _mm256_extracti128_si256( _mm256_permute4x64_epi64(_mm256_packus_epi32(mask, mask), 0xD8), 0 ); // if mask[i+7:i] == 0xFF, choose b[i+7:i], otherwise choose a[i+7:i] __m128i new_assignment = _mm_blendv_epi8(old_assignment, cluster_number_vec, mask__narrow); int rem = x_hi - j; if (rem >= 8) { _mm_storeu_si128((__m128i*)assignment_row, new_assignment); _mm256_storeu_ps(min_dist_row, new_min_dist); } else { uint16_t arr_assignment[8]; float arr_dist[8]; _mm_storeu_si128((__m128i*)arr_assignment, new_assignment); _mm256_storeu_ps(arr_dist, new_min_dist); for (int delta = 0; delta < rem; delta++) { assignment_row[delta] = arr_assignment[delta]; min_dist_row[delta] = arr_dist[delta]; } } } } } } void normalize_features(float * __restrict img_feats[10], float* __restrict weights, int size) { #pragma omp parallel for num_threads(fsparallel::nth()) for (int i = 0; i < size; i += 8) { __m256 reciprocal_w = _mm256_rcp_ps(_mm256_loadu_ps(&weights[i])); _mm256_storeu_ps(&img_feats[0][i], _mm256_mul_ps(_mm256_loadu_ps(&img_feats[0][i]), reciprocal_w)); _mm256_storeu_ps(&img_feats[1][i], _mm256_mul_ps(_mm256_loadu_ps(&img_feats[1][i]), reciprocal_w)); _mm256_storeu_ps(&img_feats[2][i], _mm256_mul_ps(_mm256_loadu_ps(&img_feats[2][i]), reciprocal_w)); _mm256_storeu_ps(&img_feats[3][i], _mm256_mul_ps(_mm256_loadu_ps(&img_feats[3][i]), reciprocal_w)); _mm256_storeu_ps(&img_feats[4][i], _mm256_mul_ps(_mm256_loadu_ps(&img_feats[4][i]), reciprocal_w)); _mm256_storeu_ps(&img_feats[5][i], _mm256_mul_ps(_mm256_loadu_ps(&img_feats[5][i]), reciprocal_w)); _mm256_storeu_ps(&img_feats[6][i], _mm256_mul_ps(_mm256_loadu_ps(&img_feats[6][i]), reciprocal_w)); _mm256_storeu_ps(&img_feats[7][i], _mm256_mul_ps(_mm256_loadu_ps(&img_feats[7][i]), reciprocal_w)); _mm256_storeu_ps(&img_feats[8][i], _mm256_mul_ps(_mm256_loadu_ps(&img_feats[8][i]), reciprocal_w)); _mm256_storeu_ps(&img_feats[9][i], _mm256_mul_ps(_mm256_loadu_ps(&img_feats[9][i]), reciprocal_w)); } } }; };
dictionary_generator.c
#include <stdlib.h> #include <string.h> #include <stdio.h> #include <assert.h> #include "../tagger/tags.h" #include "dictionary_generator.h" #include "../corpus/corpus_io.h" #include "../lib/hashmap.h" #include "dictionary_reduce.h" void shorten_tag_struct(struct hashmap map); HASHMAP_FUNCS_CREATE(tag, const char, struct tagcounts_t); HASHMAP_FUNCS_CREATE(new, const char, int); tagcounts_t frequency_count; //call in generate dict = malloc (sizeof (struct tagcounts_t)); int most_common_tag; // call in generate dict= malloc (sizeof(int)); struct hashmap generate_dictionary(corpus_t corpus){ FILE* fp = fopen("frequencies.txt", "r"); if(fp == NULL){ //Instantiate a new hashmap struct tagcounts_t *data; struct hashmap map; hashmap_init(&map, hashmap_hash_string, hashmap_compare_string, 0); #pragma omp parallel for num_threads(4) for(int i = 0; i < corpus.num_lines; i++){ if(!corpus.info[i].ignore_flag){ data = tag_hashmap_get(&map, corpus.words[i]); if(data == NULL){ struct tagcounts_t *newtags = malloc (sizeof (struct tagcounts_t)); memset(newtags, 0, sizeof(struct tagcounts_t)); update_tags(corpus.words[i], newtags, corpus.human_tags[i]); tag_hashmap_put(&map, corpus.words[i], newtags); } else{ update_tags(corpus.words[i], data, corpus.human_tags[i]); } } } return reduce_map(map); } else{ //fclose(fp); struct hashmap newmap; hashmap_init(&newmap, hashmap_hash_string, hashmap_compare_string, 0); char* saveptr; char* line = NULL; size_t len = 0; size_t read; while ((read = getline(&line, &len, fp)) != -1) { char *saveptr; char* word = strtok_r(line, "\t", &saveptr); char* tag = strtok_r(NULL, " ", &saveptr); if(word[strlen(word)-1] == '\n') word[strlen(word)-1] = '\0'; if(tag[strlen(tag)-1] == '\n') tag[strlen(tag)-1] = '\0'; int* highest = (int *)malloc(sizeof(int)); *highest = tag_to_hash(tag); char* key = (char*)malloc(strlen(word)+1 * sizeof(char)); strcpy(key, word); //printf("Value of key: %s\n", key); new_hashmap_put(&newmap, key, highest); } free(line); fclose(fp); return newmap; } } void update_tags(char* word, struct tagcounts_t *val, int tag){ //Hash the tag value to save time in the future int hash_value = tag; /*auto generated switch statement*/ //Switch statement is passed the hashed tag value. The corresponding cases //are enumerated types described in ../tagger/tags.h switch(hash_value){ case APPGE: val->APPGE+=1; frequency_count.APPGE+=1; break; case AT: val->AT+=1; frequency_count.AT+=1; break; case AT1: val->AT1+=1; frequency_count.AT1+=1; break; case BCL: val->BCL+=1; frequency_count.BCL+=1; break; case CC: val->CC+=1; frequency_count.CC+=1; break; case CCB: val->CCB+=1; frequency_count.CCB+=1; break; case CS: val->CS+=1; frequency_count.CS+=1; break; case CSA: val->CSA+=1; frequency_count.CSA+=1; break; case CSN: val->CSN+=1; frequency_count.CSN+=1; break; case CST: val->CST+=1; frequency_count.CST+=1; break; case CSW: val->CSW+=1; frequency_count.CSW+=1; break; case DA: val->DA+=1; frequency_count.DA+=1; break; case DA1: val->DA1+=1; frequency_count.DA1+=1; break; case DA2: val->DA2+=1; frequency_count.DA2+=1; break; case DAR: val->DAR+=1; frequency_count.DAR+=1; break; case DAT: val->DAT+=1; frequency_count.DAT+=1; break; case DB: val->DB+=1; frequency_count.DB+=1; break; case DB2: val->DB2+=1; frequency_count.DB2+=1; break; case DD: val->DD+=1; frequency_count.DD+=1; break; case DD1: val->DD1+=1; frequency_count.DD1+=1; break; case DD2: val->DD2+=1; frequency_count.DD2+=1; break; case DDQ: val->DDQ+=1; frequency_count.DDQ+=1; break; case DDQGE: val->DDQGE+=1; frequency_count.DDQGE+=1; break; case DDQV: val->DDQV+=1; frequency_count.DDQV+=1; break; case EX: val->EX+=1; frequency_count.EX+=1; break; case FO: val->FO+=1; frequency_count.FO+=1; break; case FU: val->FU+=1; frequency_count.FU+=1; break; case FW: val->FW+=1; frequency_count.FW+=1; break; case GE: val->GE+=1; frequency_count.GE+=1; break; case IF: val->IF+=1; frequency_count.IF+=1; break; case II: val->II+=1; frequency_count.II+=1; break; case IO: val->IO+=1; frequency_count.IO+=1; break; case IW: val->IW+=1; frequency_count.IW+=1; break; case JJ: val->JJ+=1; frequency_count.JJ+=1; break; case JJR: val->JJR+=1; frequency_count.JJR+=1; break; case JJT: val->JJT+=1; frequency_count.JJT+=1; break; case JK: val->JK+=1; frequency_count.JK+=1; break; case MC: val->MC+=1; frequency_count.MC+=1; break; case MC1: val->MC1+=1; frequency_count.MC1+=1; break; case MC2: val->MC2+=1; frequency_count.MC2+=1; break; case MCGE: val->MCGE+=1; frequency_count.MCGE+=1; break; case MCMC: val->MCMC+=1; frequency_count.MCMC+=1; break; case MD: val->MD+=1; frequency_count.MD+=1; break; case MF: val->MF+=1; frequency_count.MF+=1; break; case ND1: val->ND1+=1; frequency_count.ND1+=1; break; case NN: val->NN+=1; frequency_count.NN+=1; break; case NN1: val->NN1+=1; frequency_count.NN1+=1; break; case NN2: val->NN2+=1; frequency_count.NN2+=1; break; case NNA: val->NNA+=1; frequency_count.NNA+=1; break; case NNB: val->NNB+=1; frequency_count.NNB+=1; break; case NNL1: val->NNL1+=1; frequency_count.NNL1+=1; break; case NNL2: val->NNL2+=1; frequency_count.NNL2+=1; break; case NNO: val->NNO+=1; frequency_count.NNO+=1; break; case NNO2: val->NNO2+=1; frequency_count.NNO2+=1; break; case NNT1: val->NNT1+=1; frequency_count.NNT1+=1; break; case NNT2: val->NNT2+=1; frequency_count.NNT2+=1; break; case NNU: val->NNU+=1; frequency_count.NNU+=1; break; case NNU1: val->NNU1+=1; frequency_count.NNU1+=1; break; case NNU2: val->NNU2+=1; frequency_count.NNU2+=1; break; case NP: val->NP+=1; frequency_count.NP+=1; break; case NP1: val->NP1+=1; frequency_count.NP1+=1; break; case NP2: val->NP2+=1; frequency_count.NP2+=1; break; case NPD1: val->NPD1+=1; frequency_count.NPD1+=1; break; case NPD2: val->NPD2+=1; frequency_count.NPD2+=1; break; case NPM1: val->NPM1+=1; frequency_count.NPM1+=1; break; case NPM2: val->NPM2+=1; frequency_count.NPM2+=1; break; case PN: val->PN+=1; frequency_count.PN+=1; break; case PN1: val->PN1+=1; frequency_count.PN1+=1; break; case PNQO: val->PNQO+=1; frequency_count.PNQO+=1; break; case PNQS: val->PNQS+=1; frequency_count.PNQS+=1; break; case PNQV: val->PNQV+=1; frequency_count.PNQV+=1; break; case PNX1: val->PNX1+=1; frequency_count.PNX1+=1; break; case PPGE: val->PPGE+=1; frequency_count.PPGE+=1; break; case PPH1: val->PPH1+=1; frequency_count.PPH1+=1; break; case PPHO1: val->PPHO1+=1; frequency_count.PPHO1+=1; break; case PPHO2: val->PPHO2+=1; frequency_count.PPHO2+=1; break; case PPHS1: val->PPHS1+=1; frequency_count.PPHS1+=1; break; case PPHS2: val->PPHS2+=1; frequency_count.PPHS2+=1; break; case PPIO1: val->PPIO1+=1; frequency_count.PPIO1+=1; break; case PPIO2: val->PPIO2+=1; frequency_count.PPIO2+=1; break; case PPIS1: val->PPIS1+=1; frequency_count.PPIS1+=1; break; case PPIS2: val->PPIS2+=1; frequency_count.PPIS2+=1; break; case PPX1: val->PPX1+=1; frequency_count.PPX1+=1; break; case PPX2: val->PPX2+=1; frequency_count.PPX2+=1; break; case PPY: val->PPY+=1; frequency_count.PPY+=1; break; case RA: val->RA+=1; frequency_count.RA+=1; break; case REX: val->REX+=1; frequency_count.REX+=1; break; case RG: val->RG+=1; frequency_count.RG+=1; break; case RGQ: val->RGQ+=1; frequency_count.RGQ+=1; break; case RGQV: val->RGQV+=1; frequency_count.RGQV+=1; break; case RGR: val->RGR+=1; frequency_count.RGR+=1; break; case RGT: val->RGT+=1; frequency_count.RGT+=1; break; case RL: val->RL+=1; frequency_count.RL+=1; break; case RP: val->RP+=1; frequency_count.RP+=1; break; case RPK: val->RPK+=1; frequency_count.RPK+=1; break; case RR: val->RR+=1; frequency_count.RR+=1; break; case RRQ: val->RRQ+=1; frequency_count.RRQ+=1; break; case RRQV: val->RRQV+=1; frequency_count.RRQV+=1; break; case RRR: val->RRR+=1; frequency_count.RRR+=1; break; case RRT: val->RRT+=1; frequency_count.RRT+=1; break; case RT: val->RT+=1; frequency_count.RT+=1; break; case TO: val->TO+=1; frequency_count.TO+=1; break; case UH: val->UH+=1; frequency_count.UH+=1; break; case VB0: val->VB0+=1; frequency_count.VB0+=1; break; case VBDR: val->VBDR+=1; frequency_count.VBDR+=1; break; case VBDZ: val->VBDZ+=1; frequency_count.VBDZ+=1; break; case VBG: val->VBG+=1; frequency_count.VBG+=1; break; case VBI: val->VBI+=1; frequency_count.VBI+=1; break; case VBM: val->VBM+=1; frequency_count.VBM+=1; break; case VBN: val->VBN+=1; frequency_count.VBN+=1; break; case VBR: val->VBR+=1; frequency_count.VBR+=1; break; case VBZ: val->VBZ+=1; frequency_count.VBZ+=1; break; case VD0: val->VD0+=1; frequency_count.VD0+=1; break; case VDD: val->VDD+=1; frequency_count.VDD+=1; break; case VDG: val->VDG+=1; frequency_count.VDG+=1; break; case VDI: val->VDI+=1; frequency_count.VDI+=1; break; case VDN: val->VDN+=1; frequency_count.VDN+=1; break; case VDZ: val->VDZ+=1; frequency_count.VDZ+=1; break; case VH0: val->VH0+=1; frequency_count.VH0+=1; break; case VHD: val->VHD+=1; frequency_count.VHD+=1; break; case VHG: val->VHG+=1; frequency_count.VHG+=1; break; case VHI: val->VHI+=1; frequency_count.VHI+=1; break; case VHN: val->VHN+=1; frequency_count.VHN+=1; break; case VHZ: val->VHZ+=1; frequency_count.VHZ+=1; break; case VM: val->VM+=1; frequency_count.VM+=1; break; case VMK: val->VMK+=1; frequency_count.VMK+=1; break; case VV0: val->VV0+=1; frequency_count.VV0+=1; break; case VVD: val->VVD+=1; frequency_count.VVD+=1; break; case VVG: val->VVG+=1; frequency_count.VVG+=1; break; case VVGK: val->VVGK+=1; frequency_count.VVGK+=1; break; case VVI: val->VVI+=1; frequency_count.VVI+=1; break; case VVN: val->VVN+=1; frequency_count.VVN+=1; break; case VVNK: val->VVNK+=1; frequency_count.VVNK+=1; break; case VVZ: val->VVZ+=1; frequency_count.VVZ+=1; break; case XX: val->XX+=1; frequency_count.XX+=1; break; case ZZ1: val->ZZ1+=1; frequency_count.ZZ1+=1; break; case ZZ2: val->ZZ2+=1; frequency_count.ZZ2+=1; break; //default: printf("Illegal input tag\n"); /* punctuation not included because these tags are known up front */ } } //This method takes in a tag structure along with an allocated integer. //The method stores the most frequent value's hash into the address of the passed in int pointer int get_highest_frequency(tagcounts_t* tags){ int highest = -1; int temp = -1; if(tags->APPGE > temp){temp = tags->APPGE; highest = APPGE;} if(tags->AT > temp){temp = tags->AT; highest = AT;} if(tags->AT1 > temp){temp = tags->AT1; highest = AT1;} if(tags->BCL > temp){temp = tags->BCL; highest = BCL;} if(tags->CC > temp){temp = tags->CC; highest = CC;} if(tags->CCB > temp){temp = tags->CCB; highest = CCB;} if(tags->CS > temp){temp = tags->CS; highest = CS;} if(tags->CSA > temp){temp = tags->CSA; highest = CSA;} if(tags->CSN > temp){temp = tags->CSN; highest = CSN;} if(tags->CST > temp){temp = tags->CST; highest = CST;} if(tags->CSW > temp){temp = tags->CSW; highest = CSW;} if(tags->DA > temp){temp = tags->DA; highest = DA;} if(tags->DA1 > temp){temp = tags->DA1; highest = DA1;} if(tags->DA2 > temp){temp = tags->DA2; highest = DA2;} if(tags->DAR > temp){temp = tags->DAR; highest = DAR;} if(tags->DAT > temp){temp = tags->DAT; highest = DAT;} if(tags->DB > temp){temp = tags->DB; highest = DB;} if(tags->DB2 > temp){temp = tags->DB2; highest = DB2;} if(tags->DD > temp){temp = tags->DD; highest = DD;} if(tags->DD1 > temp){temp = tags->DD1; highest = DD1;} if(tags->DD2 > temp){temp = tags->DD2; highest = DD2;} if(tags->DDQ > temp){temp = tags->DDQ; highest = DDQ;} if(tags->DDQGE > temp){temp = tags->DDQGE; highest = DDQGE;} if(tags->DDQV > temp){temp = tags->DDQV; highest = DDQV;} if(tags->EX > temp){temp = tags->EX; highest = EX;} if(tags->FO > temp){temp = tags->FO; highest = FO;} if(tags->FU > temp){temp = tags->FU; highest = FU;} if(tags->FW > temp){temp = tags->FW; highest = FW;} if(tags->GE > temp){temp = tags->GE; highest = GE;} if(tags->IF > temp){temp = tags->IF; highest = IF;} if(tags->II > temp){temp = tags->II; highest = II;} if(tags->IO > temp){temp = tags->IO; highest = IO;} if(tags->IW > temp){temp = tags->IW; highest = IW;} if(tags->JJ > temp){temp = tags->JJ; highest = JJ;} if(tags->JJR > temp){temp = tags->JJR; highest = JJR;} if(tags->JJT > temp){temp = tags->JJT; highest = JJT;} if(tags->JK > temp){temp = tags->JK; highest = JK;} if(tags->MC > temp){temp = tags->MC; highest = MC;} if(tags->MC1 > temp){temp = tags->MC1; highest = MC1;} if(tags->MC2 > temp){temp = tags->MC2; highest = MC2;} if(tags->MCGE > temp){temp = tags->MCGE; highest = MCGE;} if(tags->MCMC > temp){temp = tags->MCMC; highest = MCMC;} if(tags->MD > temp){temp = tags->MD; highest = MD;} if(tags->MF > temp){temp = tags->MF; highest = MF;} if(tags->ND1 > temp){temp = tags->ND1; highest = ND1;} if(tags->NN > temp){temp = tags->NN; highest = NN;} if(tags->NN1 > temp){temp = tags->NN1; highest = NN1;} if(tags->NN2 > temp){temp = tags->NN2; highest = NN2;} if(tags->NNA > temp){temp = tags->NNA; highest = NNA;} if(tags->NNB > temp){temp = tags->NNB; highest = NNB;} if(tags->NNL1 > temp){temp = tags->NNL1; highest = NNL1;} if(tags->NNL2 > temp){temp = tags->NNL2; highest = NNL2;} if(tags->NNO > temp){temp = tags->NNO; highest = NNO;} if(tags->NNO2 > temp){temp = tags->NNO2; highest = NNO2;} if(tags->NNT1 > temp){temp = tags->NNT1; highest = NNT1;} if(tags->NNT2 > temp){temp = tags->NNT2; highest = NNT2;} if(tags->NNU > temp){temp = tags->NNU; highest = NNU;} if(tags->NNU1 > temp){temp = tags->NNU1; highest = NNU1;} if(tags->NNU2 > temp){temp = tags->NNU2; highest = NNU2;} if(tags->NP > temp){temp = tags->NP; highest = NP;} if(tags->NP1 > temp){temp = tags->NP1; highest = NP1;} if(tags->NP2 > temp){temp = tags->NP2; highest = NP2;} if(tags->NPD1 > temp){temp = tags->NPD1; highest = NPD1;} if(tags->NPD2 > temp){temp = tags->NPD2; highest = NPD2;} if(tags->NPM1 > temp){temp = tags->NPM1; highest = NPM1;} if(tags->NPM2 > temp){temp = tags->NPM2; highest = NPM2;} if(tags->PN > temp){temp = tags->PN; highest = PN;} if(tags->PN1 > temp){temp = tags->PN1; highest = PN1;} if(tags->PNQO > temp){temp = tags->PNQO; highest = PNQO;} if(tags->PNQS > temp){temp = tags->PNQS; highest = PNQS;} if(tags->PNQV > temp){temp = tags->PNQV; highest = PNQV;} if(tags->PNX1 > temp){temp = tags->PNX1; highest = PNX1;} if(tags->PPGE > temp){temp = tags->PPGE; highest = PPGE;} if(tags->PPH1 > temp){temp = tags->PPH1; highest = PPH1;} if(tags->PPHO1 > temp){temp = tags->PPHO1; highest = PPHO1;} if(tags->PPHO2 > temp){temp = tags->PPHO2; highest = PPHO2;} if(tags->PPHS1 > temp){temp = tags->PPHS1; highest = PPHS1;} if(tags->PPHS2 > temp){temp = tags->PPHS2; highest = PPHS2;} if(tags->PPIO1 > temp){temp = tags->PPIO1; highest = PPIO1;} if(tags->PPIO2 > temp){temp = tags->PPIO2; highest = PPIO2;} if(tags->PPIS1 > temp){temp = tags->PPIS1; highest = PPIS1;} if(tags->PPIS2 > temp){temp = tags->PPIS2; highest = PPIS2;} if(tags->PPX1 > temp){temp = tags->PPX1; highest = PPX1;} if(tags->PPX2 > temp){temp = tags->PPX2; highest = PPX2;} if(tags->PPY > temp){temp = tags->PPY; highest = PPY;} if(tags->RA > temp){temp = tags->RA; highest = RA;} if(tags->REX > temp){temp = tags->REX; highest = REX;} if(tags->RG > temp){temp = tags->RG; highest = RG;} if(tags->RGQ > temp){temp = tags->RGQ; highest = RGQ;} if(tags->RGQV > temp){temp = tags->RGQV; highest = RGQV;} if(tags->RGR > temp){temp = tags->RGR; highest = RGR;} if(tags->RGT > temp){temp = tags->RGT; highest = RGT;} if(tags->RL > temp){temp = tags->RL; highest = RL;} if(tags->RP > temp){temp = tags->RP; highest = RP;} if(tags->RPK > temp){temp = tags->RPK; highest = RPK;} if(tags->RR > temp){temp = tags->RR; highest = RR;} if(tags->RRQ > temp){temp = tags->RRQ; highest = RRQ;} if(tags->RRQV > temp){temp = tags->RRQV; highest = RRQV;} if(tags->RRR > temp){temp = tags->RRR; highest = RRR;} if(tags->RRT > temp){temp = tags->RRT; highest = RRT;} if(tags->RT > temp){temp = tags->RT; highest = RT;} if(tags->TO > temp){temp = tags->TO; highest = TO;} if(tags->UH > temp){temp = tags->UH; highest = UH;} if(tags->VB0 > temp){temp = tags->VB0; highest = VB0;} if(tags->VBDR > temp){temp = tags->VBDR; highest = VBDR;} if(tags->VBDZ > temp){temp = tags->VBDZ; highest = VBDZ;} if(tags->VBG > temp){temp = tags->VBG; highest = VBG;} if(tags->VBI > temp){temp = tags->VBI; highest = VBI;} if(tags->VBM > temp){temp = tags->VBM; highest = VBM;} if(tags->VBN > temp){temp = tags->VBN; highest = VBN;} if(tags->VBR > temp){temp = tags->VBR; highest = VBR;} if(tags->VBZ > temp){temp = tags->VBZ; highest = VBZ;} if(tags->VD0 > temp){temp = tags->VD0; highest = VD0;} if(tags->VDD > temp){temp = tags->VDD; highest = VDD;} if(tags->VDG > temp){temp = tags->VDG; highest = VDG;} if(tags->VDI > temp){temp = tags->VDI; highest = VDI;} if(tags->VDN > temp){temp = tags->VDN; highest = VDN;} if(tags->VDZ > temp){temp = tags->VDZ; highest = VDZ;} if(tags->VH0 > temp){temp = tags->VH0; highest = VH0;} if(tags->VHD > temp){temp = tags->VHD; highest = VHD;} if(tags->VHG > temp){temp = tags->VHG; highest = VHG;} if(tags->VHI > temp){temp = tags->VHI; highest = VHI;} if(tags->VHN > temp){temp = tags->VHN; highest = VHN;} if(tags->VHZ > temp){temp = tags->VHZ; highest = VHZ;} if(tags->VM > temp){temp = tags->VM; highest = VM;} if(tags->VMK > temp){temp = tags->VMK; highest = VMK;} if(tags->VV0 > temp){temp = tags->VV0; highest = VV0;} if(tags->VVD > temp){temp = tags->VVD; highest = VVD;} if(tags->VVG > temp){temp = tags->VVG; highest = VVG;} if(tags->VVGK > temp){temp = tags->VVGK; highest = VVGK;} if(tags->VVI > temp){temp = tags->VVI; highest = VVI;} if(tags->VVN > temp){temp = tags->VVN; highest = VVN;} if(tags->VVNK > temp){temp = tags->VVNK; highest = VVNK;} if(tags->VVZ > temp){temp = tags->VVZ; highest = VVZ;} if(tags->XX > temp){temp = tags->XX; highest = XX;} if(tags->ZZ1 > temp){temp = tags->ZZ1; highest = ZZ1;} if(tags->ZZ2 > temp){temp = tags->ZZ2; highest = ZZ2;} return highest; }
parallel-simple.c
/* * parallel-simple.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run-race | FileCheck %s // RUN: %libarcher-compile-and-run-race-noserial | FileCheck %s // REQUIRES: tsan #include <omp.h> #include <stdio.h> int main(int argc, char *argv[]) { int var = 0; #pragma omp parallel num_threads(2) shared(var) { var++; } int error = (var != 2); fprintf(stderr, "DONE\n"); return error; } // CHECK: WARNING: ThreadSanitizer: data race // CHECK-NEXT: {{(Write|Read)}} of size 4 // CHECK-NEXT: #0 {{.*}}parallel-simple.c:23 // CHECK: Previous write of size 4 // CHECK-NEXT: #0 {{.*}}parallel-simple.c:23 // CHECK: DONE // CHECK: ThreadSanitizer: reported 1 warnings
ast-dump-openmp-target-parallel-for-simd.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp target parallel for simd for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp target parallel for simd for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp target parallel for simd collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp target parallel for simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp target parallel for simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-parallel-for-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:4:1, col:37> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:10:1, col:37> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:17:1, col:49> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:38, col:48> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:47> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:47> 'int' 1 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:24:1, col:49> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:38, col:48> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:47> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:47> 'int' 2 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPTargetParallelForSimdDirective {{.*}} <line:31:1, col:49> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:38, col:48> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:47> 'int' // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:47> 'int' 2 // CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
globals.c
#include <omp.h> #include <stdlib.h> #include <stdio.h> #define BONES_MIN(a,b) ((a<b) ? a : b) #define BONES_MAX(a,b) ((a>b) ? a : b) #define DIV_CEIL(a,b) ((a+b-1)/b) #define DIV_FLOOR(a,b) (a/b) // Multiple iterations for kernel measurements #define ITERS 1 // Function to initialize the CPU platform (for fair measurements) void bones_initialize_target(void) { int bones_thread_count = omp_get_num_procs(); omp_set_num_threads(bones_thread_count); #pragma omp parallel { int bones_thread_id = omp_get_thread_num(); } } // Declaration of the original function int bones_main(void); // New main function for initialisation and clean-up int main(void) { // Initialisation bones_initialize_target(); // Original main function int bones_return = bones_main(); // Clean-up return bones_return; }
GB_binop__bshift_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bshift_int8 // A.*B function (eWiseMult): GB_AemultB__bshift_int8 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bshift_int8 // C+=b function (dense accum): GB_Cdense_accumb__bshift_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bshift_int8 // C=scalar+B GB_bind1st__bshift_int8 // C=scalar+B' GB_bind1st_tran__bshift_int8 // C=A+scalar GB_bind2nd__bshift_int8 // C=A'+scalar GB_bind2nd_tran__bshift_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = GB_bitshift_int8 (aij, bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_bitshift_int8 (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_INT8 || GxB_NO_BSHIFT_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bshift_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bshift_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bshift_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__bshift_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bshift_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bshift_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = GB_bitshift_int8 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bshift_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = GB_bitshift_int8 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_bitshift_int8 (x, aij) ; \ } GrB_Info GB_bind1st_tran__bshift_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_bitshift_int8 (aij, y) ; \ } GrB_Info GB_bind2nd_tran__bshift_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Example_nested_loop.1.c
/* * @@name: nested_loop.1c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success */ void work(int i, int j) {} void good_nesting(int n) { int i, j; #pragma omp parallel default(shared) { #pragma omp for for (i=0; i<n; i++) { #pragma omp parallel shared(i, n) { #pragma omp for for (j=0; j < n; j++) work(i, j); } } } }
VolumetricMaxUnpooling.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/VolumetricMaxUnpooling.c" #else static inline void THNN_(VolumetricMaxUnpooling_shapeCheck)( THNNState *state, THTensor *input, THTensor *gradOutput, THIndexTensor *indices, int oT, int oW, int oH, int dT, int dW, int dH, int pT, int pW, int pH) { THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input, "4D or 5D (batch mode) tensor expected for input, but got: %s"); THNN_CHECK_SHAPE_INDICES(input, indices); THArgCheck(dT > 0 && dW > 0 && dH > 0, 10, "stride should be greater than zero, but got dT: %d dH: %d dW: %d", dT, dH, dW); int dimw = 3; int dimh = 2; int dimt = 1; int dimn = 0; if (input->nDimension == 5) { dimt++; dimw++; dimh++; dimn++; } int nslices = input->size[dimn]; if (gradOutput != NULL) { if (oT != gradOutput->size[dimt] || oW != gradOutput->size[dimw] || oH != gradOutput->size[dimh]) { THError( "Inconsistent gradOutput size. oT= %d, oH= %d, oW= %d, gradOutput: %dx%dx%d", oT, oH, oW, gradOutput->size[dimt], gradOutput->size[dimh], gradOutput->size[dimw] ); } THNN_CHECK_DIM_SIZE(gradOutput, input->nDimension, dimn, nslices); } } static void THNN_(VolumetricMaxUnpooling_updateOutput_frame)( real *input_p, real *output_p, THIndex_t *ind_p, int nslices, int iT, int iW, int iH, int oT, int oW, int oH, int dT, int dW, int dH, int pT, int pW, int pH) { int k; int has_error = 0; THIndex_t error_index; #pragma omp parallel for private(k) for (k = 0; k < nslices; k++) { int ti, i, j, maxz, maxy, maxx; for (ti = 0; ti < iT; ti++) { for (i = 0; i < iH; i++) { for (j = 0; j < iW; j++) { int start_t = ti * dT - pT; int start_h = i * dH - pH; int start_w = j * dW - pW; real *input_p_k = input_p + k*iT*iW*iH + ti*iW*iH + i*iW + j; THIndex_t *ind_p_k = ind_p + k*iT*iW*iH + ti*iW*iH + i*iW + j; maxz = ((unsigned char*)(ind_p_k))[0]; /* retrieve position of max */ maxy = ((unsigned char*)(ind_p_k))[1]; maxx = ((unsigned char*)(ind_p_k))[2]; THIndex_t idx = k*oT*oW*oH + oH*oW*(start_t+maxz) + oW*(start_h+maxy) + (start_w+maxx); if (start_t+maxz<0 || start_h+maxy<0 || start_w+maxx<0 || start_t+maxz>=oT || start_h+maxy>=oH || start_w+maxx>=oW) { #pragma omp critical { has_error = 1; error_index = idx; } } else { output_p[idx] = *input_p_k; /* update output */ } } } } } if (has_error) { THError( "found an invalid max index %ld (output volumes are of size %dx%dx%d)", error_index, oT, oH, oW ); } } void THNN_(VolumetricMaxUnpooling_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THIndexTensor *indices, int oT, int oW, int oH, int dT, int dW, int dH, int pT, int pW, int pH) { int dimw = 3; int dimh = 2; int dimt = 1; int nbatch = 1; int nslices; int iT; int iH; int iW; real *input_data; real *output_data; THIndex_t *indices_data; THNN_(VolumetricMaxUnpooling_shapeCheck)( state, input, NULL, indices, oT, oW, oH, dT, dW, dH, pT, pW, pH); if (input->nDimension == 5) { nbatch = input->size[0]; dimt++; dimw++; dimh++; } /* sizes */ nslices = input->size[dimt-1]; iT = input->size[dimt]; iH = input->size[dimh]; iW = input->size[dimw]; /* get contiguous input */ input = THTensor_(newContiguous)(input); indices = THIndexTensor_(newContiguous)(indices); /* resize output */ if (input->nDimension == 4) { THTensor_(resize4d)(output, nslices, oT, oH, oW); THTensor_(zero)(output); input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); indices_data = THIndexTensor_(data)(indices); THNN_(VolumetricMaxUnpooling_updateOutput_frame)( input_data, output_data, indices_data, nslices, iT, iW, iH, oT, oW, oH, dT, dW, dH, pT, pW, pH ); } else { int p; THTensor_(resize5d)(output, nbatch, nslices, oT, oH, oW); THTensor_(zero)(output); input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); indices_data = THIndexTensor_(data)(indices); for (p = 0; p < nbatch; p++) { THNN_(VolumetricMaxUnpooling_updateOutput_frame)( input_data+p*nslices*iT*iW*iH, output_data+p*nslices*oT*oW*oH, indices_data+p*nslices*iT*iW*iH, nslices, iT, iW, iH, oT, oW, oH, dT, dW, dH, pT, pW, pH ); } } /* cleanup */ THTensor_(free)(input); THIndexTensor_(free)(indices); } static void THNN_(VolumetricMaxUnpooling_updateGradInput_frame)( real *gradInput_p, real *gradOutput_p, THIndex_t *ind_p, int nslices, int iT, int iW, int iH, int oT, int oW, int oH, int dT, int dW, int dH, int pT, int pW, int pH) { int k; #pragma omp parallel for private(k) for (k = 0; k < nslices; k++) { int ti, i, j, maxz, maxy, maxx; for (ti = 0; ti < iT; ti++) { for (i = 0; i < iH; i++) { for (j = 0; j < iW; j++) { int start_t = ti * dT - pT; int start_h = i * dH - pH; int start_w = j * dW - pW; real *gradInput_p_k = gradInput_p + k*iT*iW*iH + ti*iW*iH + i*iW + j; THIndex_t *ind_p_k = ind_p + k*iT*iW*iH + ti*iW*iH + i*iW + j; maxz = ((unsigned char*)(ind_p_k))[0]; /* retrieve position of max */ maxy = ((unsigned char*)(ind_p_k))[1]; maxx = ((unsigned char*)(ind_p_k))[2]; if (start_t+maxz<0 || start_h+maxy<0 || start_w+maxx<0 || start_t+maxz>=oT || start_h+maxy>=oH || start_w+maxx>=oW) { THError( "invalid max index z= %d, y= %d, x= %d, oT= %d, oW= %d, oH= %d", start_t+maxz, start_h+maxy, start_w+maxx, oT, oW, oH ); } *gradInput_p_k = gradOutput_p[k*oT*oW*oH + oH*oW*(start_t+maxz) + oW*(start_h+maxy) + (start_w+maxx)]; /* update gradient */ } } } } } void THNN_(VolumetricMaxUnpooling_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THIndexTensor *indices, int oT, int oW, int oH, int dT, int dW, int dH, int pT, int pW, int pH) { int dimw = 3; int dimh = 2; int dimt = 1; int nbatch = 1; int nslices; int iT; int iH; int iW; real *gradInput_data; real *gradOutput_data; THIndex_t *indices_data; THNN_(VolumetricMaxUnpooling_shapeCheck)( state, input, gradOutput, indices, oT, oW, oH, dT, dW, dH, pT, pW, pH); // TODO: check gradOutput shape /* get contiguous gradOutput */ gradOutput = THTensor_(newContiguous)(gradOutput); indices = THIndexTensor_(newContiguous)(indices); /* resize */ THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); if (input->nDimension == 5) { nbatch = input->size[0]; dimt++; dimw++; dimh++; } /* sizes */ nslices = input->size[dimt-1]; iT = input->size[dimt]; iH = input->size[dimh]; iW = input->size[dimw]; /* get raw pointers */ gradInput_data = THTensor_(data)(gradInput); gradOutput_data = THTensor_(data)(gradOutput); indices_data = THIndexTensor_(data)(indices); /* backprop */ if (input->nDimension == 4) { THNN_(VolumetricMaxUnpooling_updateGradInput_frame)( gradInput_data, gradOutput_data, indices_data, nslices, iT, iW, iH, oT, oW, oH, dT, dW, dH, pT, pW, pH ); } else { int p; for (p = 0; p < nbatch; p++) { THNN_(VolumetricMaxUnpooling_updateGradInput_frame)( gradInput_data+p*nslices*iT*iW*iH, gradOutput_data+p*nslices*oT*oW*oH, indices_data+p*nslices*iT*iW*iH, nslices, iT, iW, iH, oT, oW, oH, dT, dW, dH, pT, pW, pH ); } } /* cleanup */ THTensor_(free)(gradOutput); THIndexTensor_(free)(indices); } #endif
layerramsubset.h
/********************************************************************************* * * Inviwo - Interactive Visualization Workshop * * Copyright (c) 2018-2019 Inviwo Foundation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *********************************************************************************/ #ifndef IVW_LAYERRAMSUBSET_H #define IVW_LAYERRAMSUBSET_H #include <modules/base/basemoduledefine.h> #include <inviwo/core/common/inviwo.h> #include <inviwo/core/datastructures/image/layer.h> #include <inviwo/core/datastructures/image/layerram.h> #include <inviwo/core/datastructures/image/layerramprecision.h> #include <inviwo/core/util/glm.h> #include <algorithm> namespace inviwo { namespace util { /** * \brief extracts a subregion from a layer and returns it as a new layer * * This function extracts a subregion given by offset and extent from the input layer. * If border clamping is enabled, the output region will be clamped to lie completely within the * source layer. Otherwise (default), the areas outside the source layer will be filled with * zeros. * * @param in input layer * @param offset subregion offset in input layer * @param extent extent (width and height) of subregion * @param clampBorderOutsideImage if true, the output region is clamped to the layer boundaries * @return std::shared_ptr<LayerRAM> */ IVW_MODULE_BASE_API std::shared_ptr<LayerRAM> layerSubSet(const Layer* in, ivec2 offset, size2_t extent, bool clampBorderOutsideImage = false); /** * \brief extracts a subregion from a layer and converts it into a new layer * * This function extracts a subregion given by offset and extent from the input layer. The values * will be converted to type T using util::glm_convert_normalized. * If border clamping is enabled, the output region will be clamped to lie completely within the * source layer. Otherwise (default), the areas outside the source layer will be filled with * zeros. * * @param in input layer * @param offset subregion offset in input layer * @param extent extent (width and height) of subregion * @param clampBorderOutsideImage if true, the output region is clamped to the layer boundaries * @return std::shared_ptr<LayerRAMPrecision<T>> */ template <typename T> std::shared_ptr<LayerRAMPrecision<T>> layerSubSet(const Layer* in, ivec2 offset, size2_t extent, bool clampBorderOutsideImage = false); namespace detail { template <typename T> void conversionCopy(const T* src, T* dst, size_t len) { std::copy(src, src + len, dst); } template <typename To, typename From> void conversionCopy(const From* src, To* dst, size_t len) { for (size_t i = 0; i < len; i++) { dst[i] = util::glm_convert_normalized<To, From>(src[i]); } } template <typename T, typename U = T> std::shared_ptr<LayerRAMPrecision<U>> extractLayerSubSet(const LayerRAMPrecision<T>* inLayer, ivec2 offset, size2_t extent, bool clampBorderOutsideImage) { // determine parameters const ivec2 srcDim(inLayer->getDimensions()); // adjust the output dimensions to match the intersection of output and input regions const ivec2 srcOffset(glm::max(ivec2(0), offset)); const ivec2 dstOffset = clampBorderOutsideImage ? ivec2(0) : (glm::max(ivec2(0), -offset)); // clamp copy extent to source layer const ivec2 copyExtent = glm::min(ivec2(extent) - dstOffset, srcDim - srcOffset); const ivec2 dstDim = clampBorderOutsideImage ? copyExtent : ivec2(extent); // allocate space auto newLayer = std::make_shared<LayerRAMPrecision<U>>(dstDim); const auto src = inLayer->getDataTyped(); auto dst = newLayer->getDataTyped(); if (!clampBorderOutsideImage) { // clear entire layer as only parts will be copied std::fill(dst, dst + dstDim.x * dstDim.y, U(0)); } // memcpy each row to form sub layer #pragma omp parallel for for (int j = 0; j < copyExtent.y; j++) { size_t srcPos = (j + srcOffset.y) * srcDim.x + srcOffset.x; size_t dstPos = (j + dstOffset.y) * dstDim.x + dstOffset.x; conversionCopy(src + srcPos, dst + dstPos, static_cast<size_t>(copyExtent.x)); } return newLayer; } } // namespace detail } // namespace util template <typename T> std::shared_ptr<LayerRAMPrecision<T>> util::layerSubSet(const Layer* in, ivec2 offset, size2_t extent, bool clampBorderOutsideImage) { return in->getRepresentation<LayerRAM>()->dispatch<std::shared_ptr<LayerRAMPrecision<T>>>( [offset, extent, clampBorderOutsideImage](auto layerpr) { using ValueType = util::PrecisionValueType<decltype(layerpr)>; return util::detail::extractLayerSubSet<ValueType, T>(layerpr, offset, extent, clampBorderOutsideImage); }); } } // namespace inviwo #endif // IVW_LAYERRAMSUBSET_H
transpose.h
// Copyright 2018 Xiaomi, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_KERNELS_TRANSPOSE_H_ #define MACE_KERNELS_TRANSPOSE_H_ #if defined(MACE_ENABLE_NEON) #include <arm_neon.h> #endif #include <vector> #include "mace/core/future.h" #include "mace/core/tensor.h" #include "mace/public/mace.h" #include "mace/utils/utils.h" namespace mace { namespace kernels { static void TransposeNHWCToNCHWC3(const float *input, float *output, const index_t height, const index_t width) { index_t image_size = height * width; #pragma omp parallel for for (index_t h = 0; h < height; ++h) { index_t in_offset = h * width * 3; index_t out_offset = h * width; #if defined(MACE_ENABLE_NEON) index_t w; for (w = 0; w + 3 < width; w += 4) { float32x4x3_t vi = vld3q_f32(input + in_offset); vst1q_f32(output + out_offset, vi.val[0]); vst1q_f32(output + out_offset + image_size, vi.val[1]); vst1q_f32(output + out_offset + image_size * 2, vi.val[2]); in_offset += 12; out_offset += 4; } for (; w < width; ++w) { for (index_t c = 0; c < 3; ++c) { output[h * width + image_size * c + w] = input[h * width * 3 + w * 3 + c]; } } #else for (index_t w = 0; w < width; ++w) { for (index_t c = 0; c < 3; ++c) { output[out_offset + c * image_size + w] = input[in_offset + w * 3 + c]; } } #endif } } static void TransposeNCHWToNHWCC2(const float *input, float *output, const index_t height, const index_t width) { index_t image_size = height * width; #pragma omp parallel for for (index_t h = 0; h < height; ++h) { index_t in_offset = h * width; index_t out_offset = h * width * 2; #if defined(MACE_ENABLE_NEON) index_t w; for (w = 0; w + 3 < width; w += 4) { float32x4_t vi0 = vld1q_f32(input + in_offset); float32x4_t vi1 = vld1q_f32(input + in_offset + image_size); float32x4x2_t vi = {vi0, vi1}; vst2q_f32(output + out_offset, vi); in_offset += 4; out_offset += 8; } for (; w < width; ++w) { for (index_t c = 0; c < 2; ++c) { output[h * width * 2 + w * 2 + c] = input[h * width + image_size * c + w]; } } #else for (index_t w = 0; w < width; ++w) { for (index_t c = 0; c < 2; ++c) { output[out_offset + w * 2 + c] = input[in_offset + c * image_size + w]; } } #endif } } template<DeviceType D, typename T> struct TransposeFunctor { explicit TransposeFunctor(const std::vector<int> &dims) : dims_(dims) {} MaceStatus operator()(const Tensor *input, Tensor *output, StatsFuture *future) { MACE_UNUSED(future); Tensor::MappingGuard input_guard(input); Tensor::MappingGuard output_guard(output); const std::vector<index_t> &input_shape = input->shape(); const std::vector<index_t> &output_shape = output->shape(); const T *input_data = input->data<T>(); T *output_data = output->mutable_data<T>(); if (input->dim_size() == 2) { MACE_CHECK(dims_[0] == 1 && dims_[1] == 0, "no need transform"); index_t stride_i = input_shape[0]; index_t stride_j = input_shape[1]; for (int i = 0; i < input_shape[0]; ++i) { for (int j = 0; j < input_shape[1]; ++j) { output_data[j * stride_i + i] = input_data[i * stride_j + j]; } } } else if (input->dim_size() == 4) { std::vector<int> transpose_order_from_NHWC_to_NCHW{0, 3, 1, 2}; std::vector<int> transpose_order_from_NCHW_to_NHWC{0, 2, 3, 1}; index_t batch_size = input->dim(1) * input->dim(2) * input->dim(3); if (dims_ == transpose_order_from_NHWC_to_NCHW && input->dim(3) == 3) { for (index_t b = 0; b < input->dim(0); ++b) { TransposeNHWCToNCHWC3(input_data + b * batch_size, output_data + b * batch_size, input->dim(1), input->dim(2)); } } else if (dims_ == transpose_order_from_NCHW_to_NHWC && input->dim(1) == 2) { for (index_t b = 0; b < input->dim(0); ++b) { TransposeNCHWToNHWCC2(input_data + b * batch_size, output_data + b * batch_size, input->dim(2), input->dim(3)); } } else { std::vector<index_t> in_stride{input_shape[1] * input_shape[2] * input_shape[3], input_shape[2] * input_shape[3], input_shape[3], 1}; std::vector<index_t> out_stride{output_shape[1] * output_shape[2] * output_shape[3], output_shape[2] * output_shape[3], output_shape[3], 1}; std::vector<index_t> idim(4, 0); std::vector<index_t> odim(4, 0); for (odim[0] = 0; odim[0] < output_shape[0]; ++odim[0]) { for (odim[1] = 0; odim[1] < output_shape[1]; ++odim[1]) { for (odim[2] = 0; odim[2] < output_shape[2]; ++odim[2]) { for (odim[3] = 0; odim[3] < output_shape[3]; ++odim[3]) { idim[dims_[0]] = odim[0]; idim[dims_[1]] = odim[1]; idim[dims_[2]] = odim[2]; idim[dims_[3]] = odim[3]; output_data[odim[0] * out_stride[0] + odim[1] * out_stride[1] + odim[2] * out_stride[2] + odim[3]] = input_data[idim[0] * in_stride[0] + idim[1] * in_stride[1] + idim[2] * in_stride[2] + idim[3]]; } } } } } } else { MACE_NOT_IMPLEMENTED; } return MACE_SUCCESS; } std::vector<int> dims_; }; } // namespace kernels } // namespace mace #endif // MACE_KERNELS_TRANSPOSE_H_
VolumetricConvolutionMM.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/VolumetricConvolutionMM.c" #else static void inline THNN_(VolumetricConvolutionMM_shapeCheck)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *weight, THTensor *bias, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) { THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input, "4D or 5D (batch mode) tensor expected for input, but got: %s"); THArgCheck(kT > 0 && kW > 0 && kH > 0, 8, "kernel size should be greater than zero, but got kT: %d kH: %d kW: %d", kT, kH, kW); THArgCheck(dT > 0 && dW > 0 && dH > 0, 11, "stride should be greater than zero, but got dT: %d dH: %d dW: %d", dT, dH, dW); int ndim = input->nDimension; int dimf = 0; int dimt = 1; int dimh = 2; int dimw = 3; if (ndim == 5) { dimf++; dimt++; dimh++; dimw++; } int64_t nInputPlane; int64_t inputDepth; int64_t inputHeight; int64_t inputWidth; int64_t nOutputPlane; int64_t outputDepth; int64_t outputHeight; int64_t outputWidth; nInputPlane = input->size[dimf]; inputDepth = input->size[dimt]; inputHeight = input->size[dimh]; inputWidth = input->size[dimw]; nOutputPlane = weight->size[0]; outputDepth = (inputDepth + 2*pT - kT) / dT + 1; outputHeight = (inputHeight + 2*pH - kH) / dH + 1; outputWidth = (inputWidth + 2*pW - kW) / dW + 1; if (outputWidth < 1 || outputHeight < 1 || outputDepth < 1) { THError( "Given input size: (%dx%dx%dx%d). Calculated output size: (%dx%dx%dx%d). Output size is too small", nInputPlane, inputDepth, inputHeight, inputWidth, nOutputPlane, outputDepth, outputHeight, outputWidth ); } THArgCheck(weight->nDimension == 2 || weight->nDimension == 5, 4, "weight tensor should be 2D or 5D - got %d", weight->nDimension); if (bias != NULL) { THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size[0]); } THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane); if (gradOutput != NULL) { THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimt, outputDepth); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth); } } static THTensor* THNN_(view_weight)(THTensor *weight) { weight = THTensor_(newContiguous)(weight); if (weight->nDimension == 5) { int64_t s1 = weight->size[0]; int64_t s2 = weight->size[1] * weight->size[2] * weight->size[3] * weight->size[4]; THTensor *old_weight = weight; weight = THTensor_(newWithStorage2d)(weight->storage, weight->storageOffset, s1, -1, s2, -1); THTensor_(free)(old_weight); } return weight; } /* note: due to write issues, this one cannot be parallelized as well as unfolded_copy */ static void THNN_(unfolded_acc_vol)( THTensor *finput, THTensor *input, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, long nInputPlane, long inputDepth, long inputWidth, long inputHeight, long outputDepth, long outputWidth, long outputHeight) { long nip; real *input_data = THTensor_(data)(input); real *finput_data = THTensor_(data)(finput); //#pragma omp parallel for private(nip) for (nip = 0; nip < nInputPlane; nip++) { long kt, kw, kh, t, y, x, it, ix, iy; for (kt = 0; kt < kT; kt++) { for (kh = 0; kh < kH; kh++) { for (kw = 0; kw < kW; kw++) { real *src = finput_data + nip * (kT*kH*kW*outputDepth*outputHeight*outputWidth) + kt * (kH*kW*outputDepth*outputHeight*outputWidth) + kh * (kW*outputDepth*outputHeight*outputWidth) + kw * (outputDepth*outputHeight*outputWidth); real *dst = input_data + nip*(inputDepth*inputHeight*inputWidth); if (pT > 0 || pH > 0 || pW > 0) { for (t = 0; t < outputDepth; t++) { it = t*dT - pT + kt; for (y = 0; y < outputHeight; y++) { iy = y*dH - pH + kh; for (x = 0; x < outputWidth; x++) { ix = x*dW - pW + kw; if (it < 0 || it >= inputDepth || iy < 0 || iy >= inputHeight || ix < 0 || ix >= inputWidth) { } else { real *dst_slice = dst+it*inputHeight*inputWidth+iy*inputWidth+ix; THVector_(cadd)(dst_slice, dst_slice, src+t*outputHeight*outputWidth+y*outputWidth+x, 1, 1); } } } } } else { for (t = 0; t < outputDepth; t++) { it = t*dT + kt; for (y = 0; y < outputHeight; y++) { iy = y*dH + kh; for(x = 0; x < outputWidth; x++) { ix = x*dW + kw; real *dst_slice = dst+it*inputHeight*inputWidth+iy*inputWidth+ix; THVector_(cadd)(dst_slice, dst_slice, src+t*outputHeight*outputWidth+y*outputWidth+x, 1, 1); } } } } } } } } } static void THNN_(unfolded_copy_vol)( THTensor *finput, THTensor *input, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, long nInputPlane, long inputDepth, long inputWidth, long inputHeight, long outputDepth, long outputWidth, long outputHeight) { int64_t k; real *input_data = THTensor_(data)(input); real *finput_data = THTensor_(data)(finput); // #pragma omp parallel for private(k) for (k = 0; k < nInputPlane*kT*kH*kW; k++) { long nip = k / (kT*kH*kW); long rest = k % (kT*kH*kW); long kt = rest / (kH*kW); rest = rest % (kH*kW); long kh = rest / kW; long kw = rest % kW; long t,x,y,it,ix,iy; real *dst = finput_data + nip * (kT*kH*kW*outputDepth*outputHeight*outputWidth) + kt * (kH*kW*outputDepth*outputHeight*outputWidth) + kh * (kW*outputDepth*outputHeight*outputWidth) + kw * (outputDepth*outputHeight*outputWidth); real *src = input_data + nip*(inputDepth*inputHeight*inputWidth); if (pT > 0 || pH > 0 || pW > 0) { for (t = 0; t < outputDepth; t++) { it = t*dT - pT + kt; for (y = 0; y < outputHeight; y++) { iy = y*dH - pH + kh; for (x = 0; x < outputWidth; x++) { ix = x*dW - pW + kw; if (it < 0 || it >= inputDepth || iy < 0 || iy >= inputHeight || ix < 0 || ix >= inputWidth) memset(dst+t*outputHeight*outputWidth+y*outputWidth+x, 0, sizeof(real)*(1)); else memcpy(dst+t*outputHeight*outputWidth+y*outputWidth+x, src+it*inputHeight*inputWidth+iy*inputWidth+ix, sizeof(real)*(1)); } } } } else { for (t = 0; t < outputDepth; t++) { it = t*dT + kt; for (y = 0; y < outputHeight; y++) { iy = y*dH + kh; for(x = 0; x < outputWidth; x++) { ix = x*dW + kw; memcpy(dst+t*outputHeight*outputWidth+y*outputWidth+x, src+it*inputHeight*inputWidth+iy*inputWidth+ix, sizeof(real)*(1)); } } } } } } static void THNN_(VolumetricConvolutionMM_updateOutput_frame)( THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int64_t nInputPlane, int64_t inputDepth, int64_t inputWidth, int64_t inputHeight, int64_t nOutputPlane, int64_t outputDepth, int64_t outputWidth, int64_t outputHeight) { int64_t i; THTensor *output2d; THNN_(unfolded_copy_vol)( finput, input, kT, kW, kH, dT, dW, dH, pT, pW, pH, nInputPlane, inputDepth, inputWidth, inputHeight, outputDepth, outputWidth, outputHeight ); output2d = THTensor_(newWithStorage2d)( output->storage, output->storageOffset, nOutputPlane, -1, outputDepth*outputHeight*outputWidth, -1 ); if (bias) { for (i = 0; i < nOutputPlane; i++) { THVector_(fill)( output->storage->data+output->storageOffset+output->stride[0]*i, THTensor_(get1d)(bias, i), outputDepth*outputHeight*outputWidth ); } } else { THTensor_(zero)(output); } THTensor_(addmm)(output2d, 1, output2d, 1, weight, finput); THTensor_(free)(output2d); } void THNN_(VolumetricConvolutionMM_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) { int dimf = 0; int dimt = 1; int dimh = 2; int dimw = 3; int64_t nInputPlane; int64_t inputDepth; int64_t inputHeight; int64_t inputWidth; int64_t nOutputPlane; int64_t outputDepth; int64_t outputHeight; int64_t outputWidth; THNN_(VolumetricConvolutionMM_shapeCheck)( state, input, NULL, weight, bias, kT, kW, kH, dT, dW, dH, pT, pW, pH); input = THTensor_(newContiguous)(input); if (input->nDimension == 5) { dimf++; dimt++; dimh++; dimw++; } nInputPlane = input->size[dimf]; inputDepth = input->size[dimt]; inputHeight = input->size[dimh]; inputWidth = input->size[dimw]; nOutputPlane = weight->size[0]; outputDepth = (inputDepth + 2*pT - kT) / dT + 1; outputHeight = (inputHeight + 2*pH - kH) / dH + 1; outputWidth = (inputWidth + 2*pW - kW) / dW + 1; weight = THNN_(view_weight)(weight); if (input->nDimension == 4) { THTensor_(resize2d)(finput, kT*kW*kH*nInputPlane, outputDepth*outputHeight*outputWidth); THTensor_(resize4d)(output, nOutputPlane, outputDepth, outputHeight, outputWidth); THNN_(VolumetricConvolutionMM_updateOutput_frame)( input, output, weight, bias, finput, kT, kW, kH, dT, dW, dH, pT, pW, pH, nInputPlane, inputDepth, inputWidth, inputHeight, nOutputPlane, outputDepth, outputWidth, outputHeight ); } else { int64_t T = input->size[0]; int64_t t; THTensor_(resize3d)(finput, T, kT*kW*kH*nInputPlane, outputDepth*outputHeight*outputWidth); THTensor_(resize5d)(output, T, nOutputPlane, outputDepth, outputHeight, outputWidth); // #pragma omp parallel for private(t) for (t = 0; t < T; t++) { THTensor *input_t = THTensor_(newSelect)(input, 0, t); THTensor *output_t = THTensor_(newSelect)(output, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); THNN_(VolumetricConvolutionMM_updateOutput_frame)( input_t, output_t, weight, bias, finput_t, kT, kW, kH, dT, dW, dH, pT, pW, pH, nInputPlane, inputDepth, inputWidth, inputHeight, nOutputPlane, outputDepth, outputWidth, outputHeight ); THTensor_(free)(input_t); THTensor_(free)(output_t); THTensor_(free)(finput_t); } } THTensor_(free)(input); THTensor_(free)(weight); } static void THNN_(VolumetricConvolutionMM_updateGradInput_frame)( THTensor *gradInput, THTensor *gradOutput, THTensor *weight, THTensor *fgradInput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) { THTensor *gradOutput2d = THTensor_(newWithStorage2d)( gradOutput->storage, gradOutput->storageOffset, gradOutput->size[0], -1, gradOutput->size[1]*gradOutput->size[2]*gradOutput->size[3], -1 ); THTensor_(addmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput2d); THTensor_(free)(gradOutput2d); THTensor_(zero)(gradInput); THNN_(unfolded_acc_vol)( fgradInput, gradInput, kT, kW, kH, dT, dW, dH, pT, pW, pH, gradInput->size[0], gradInput->size[1], gradInput->size[3], gradInput->size[2], gradOutput->size[1], gradOutput->size[3], gradOutput->size[2] ); } void THNN_(VolumetricConvolutionMM_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *finput, THTensor *fgradInput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) { int nOutputPlane = (int)weight->size[0]; THNN_(VolumetricConvolutionMM_shapeCheck)( state, input, gradOutput, weight, NULL, kT, kW, kH, dT, dW, dH, pT, pW, pH); input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); weight = THNN_(view_weight)(weight); THTensor_(resizeAs)(gradInput, input); THTensor_(resizeAs)(fgradInput, finput); // depending on the BLAS library, fgradInput (result tensor) might // be left uninitialized on zero alpha, which might lead to weird behavior // hence, to be safe, zero it THTensor_(zero)(fgradInput); THTensor *tweight = THTensor_(new)(); THTensor_(transpose)(tweight, weight, 0, 1); if (input->nDimension == 4) { THNN_(VolumetricConvolutionMM_updateGradInput_frame)( gradInput, gradOutput, tweight, fgradInput, kT, kW, kH, dT, dW, dH, pT, pW, pH ); } else { int64_t T = input->size[0]; int64_t t; //#pragma omp parallel for private(t) for (t = 0; t < T; t++) { THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t); THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t); THNN_(VolumetricConvolutionMM_updateGradInput_frame)( gradInput_t, gradOutput_t, tweight, fgradInput_t, kT, kW, kH, dT, dW, dH, pT, pW, pH ); THTensor_(free)(gradInput_t); THTensor_(free)(gradOutput_t); THTensor_(free)(fgradInput_t); } } THTensor_(free)(tweight); THTensor_(free)(input); THTensor_(free)(gradOutput); THTensor_(free)(weight); } static void THNN_(VolumetricConvolutionMM_accGradParameters_frame)( THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, real scale) { int64_t i; THTensor *gradOutput2d = THTensor_(newWithStorage2d)( gradOutput->storage, gradOutput->storageOffset, gradOutput->size[0], -1, gradOutput->size[1]*gradOutput->size[2]*gradOutput->size[3], -1 ); THTensor *tfinput = THTensor_(new)(); THTensor_(transpose)(tfinput, finput, 0, 1); THTensor_(addmm)(gradWeight, 1, gradWeight, scale, gradOutput2d, tfinput); THTensor_(free)(tfinput); if (gradBias) { for (i = 0; i < gradBias->size[0]; i++) { int64_t k; real sum = 0; real *data = gradOutput2d->storage->data + gradOutput2d->storageOffset + i*gradOutput2d->stride[0]; for (k = 0; k < gradOutput2d->size[1]; k++) sum += data[k]; (gradBias->storage->data + gradBias->storageOffset)[i] += scale * sum; } } THTensor_(free)(gradOutput2d); } void THNN_(VolumetricConvolutionMM_accGradParameters)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, accreal scale_) { real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); int nOutputPlane = (int)gradWeight->size[0]; THNN_(VolumetricConvolutionMM_shapeCheck)( state, input, gradOutput, gradWeight, gradBias, kT, kW, kH, dT, dW, dH, pT, pW, pH); input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); gradWeight = THNN_(view_weight)(gradWeight); if (input->nDimension == 4) // non-batch mode { THNN_(VolumetricConvolutionMM_accGradParameters_frame)(gradOutput, gradWeight, gradBias, finput, scale); } else // batch mode { int64_t T = input->size[0]; int64_t t; for (t = 0; t < T; t++) { THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); THNN_(VolumetricConvolutionMM_accGradParameters_frame)(gradOutput_t, gradWeight, gradBias, finput_t, scale); THTensor_(free)(gradOutput_t); THTensor_(free)(finput_t); } } THTensor_(free)(input); THTensor_(free)(gradOutput); THTensor_(free)(gradWeight); } #endif
mandel_par.c
#include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <time.h> #include <sys/time.h> #include "pngwriter.h" #include "consts.h" #include <omp.h> int main (int argc, char** argv) { png_data* pPng = png_create (IMAGE_WIDTH, IMAGE_HEIGHT); double x, y, x2, y2, cx, cy; cy = MIN_Y; double fDeltaX = (MAX_X - MIN_X) / (double) IMAGE_WIDTH; double fDeltaY = (MAX_Y - MIN_Y) / (double) IMAGE_HEIGHT; long nTotalIterationsCount = 0; unsigned long nTimeStart = get_time (); long i, j, n; n = 0; #pragma omp parallel for private(cx,cy,x,y,x2,y2,n,i,j) for (j = 0; j < IMAGE_HEIGHT; j++) { cy = MIN_Y + fDeltaY * j; cx = MIN_X; for (i = 0; i < IMAGE_WIDTH; i++){ x = cx; y = cy; x2 = x * x; y2 = y * y; // compute the orbit z, f(z), f²(z), f³(z), ... // count the iterations until the orbit leaves the circle |z|=2. // stop if the number of iterations exceeds the bound MAX_ITERS. for (n = 0; (n < MAX_ITERS) && (x2 + y2 < 4); n++) { y = 2 * x * y + cy; x = x2 - y2 + cx; x2 = x * x; y2 = y * y; } // n indicates if the point belongs to the mandelbrot set // plot the number of iterations at point (i, j) int c = ((long) n * 255) / MAX_ITERS; png_plot (pPng, i, j, c, c, c); nTotalIterationsCount++; cx += fDeltaX; } } unsigned long nTimeEnd = get_time (); // print benchmark data printf ("Total time: %g ms\n", (nTimeEnd - nTimeStart) / 1000.0); printf ("Image size: %ld x %ld = %ld Pixels\n", (long) IMAGE_WIDTH, (long) IMAGE_HEIGHT, (long) (IMAGE_WIDTH * IMAGE_HEIGHT)); printf ("Total number of iterations: %ld\n", nTotalIterationsCount); printf ("Avg. time per pixel: %g µs\n", (nTimeEnd - nTimeStart) / (double) (IMAGE_WIDTH * IMAGE_HEIGHT)); printf ("Avg. time per iteration: %g µs\n", (nTimeEnd - nTimeStart) / (double) nTotalIterationsCount); printf ("Iterations/second: %g\n", nTotalIterationsCount / (double) (nTimeEnd - nTimeStart) * 1e6); // assume there are 8 floating point operations per iteration printf ("MFlop/s: %g\n", nTotalIterationsCount * 8.0 / (double) (nTimeEnd - nTimeStart)); png_write (pPng, "mandel.png"); return 0; }
roulette_sim_omp.c
/* * Sequential simulation of a roulette wheel. * * American wheel has 38 slots: * -2 are 'green' (0 & 00) * house ALWAYS wins on green * -18 are red (1..18) * -18 are black (1..18) * -Our user always bets "red" * -Odds should be: 18/38 or 47.37% * * History: * Dave Valentine (Slippery Rock University): Original C++ program * Libby Shoop (Macalester University) : Adapted for CS in * Parallel Module * Justin Ragatz (UW-La Crosse) : Adapted for OnRamp Module * rewritten in C. */ #include "roulette_sim_omp.h" int main(int argc, char** argv) { int num_spins = 1; // spins per trial int num_wins; // wins per trial double start_time; double end_time; // Get number of threads n_threads = 1; if (argc > 1) { n_threads = atoi(argv[1]); if (n_threads > 32) { n_threads = 32; } } printf("\n Starting simulation with %d thread(s).\n\n", n_threads); printf(" ---------------------------------------------------\n"); printf(" | Number of spins | Number of wins | Percent wins |\n"); printf(" ---------------------------------------------------\n"); start_time = omp_get_wtime(); /* * begin simulation */ while (num_spins < MAX) { // spin wheel num_wins = get_num_wins(num_spins); // print results show_results(num_spins, num_wins); // double spins for next simulation num_spins += num_spins; } end_time = omp_get_wtime(); /* * all done */ printf(" ---------------------------------------------------\n\n"); printf(" Elapsed wall clock time: %7.2f\n\n", (double)(end_time - start_time)); printf(" *** Normal Termination ***\n\n"); return 0; } int get_num_wins(int num_spins) { int wins = 0; // win counter int spin = 0; // loop control variable int my_bet = 10; // amount we bet per spin int tid; #pragma omp parallel num_threads(n_threads) default(none) \ shared(num_spins, my_bet, seeds) \ private(spin, tid) \ reduction(+:wins) { tid = omp_get_thread_num(); seeds[tid] = abs( ( ( time(NULL) * 181) * (tid - 83) * 359 ) % 104729 ); #pragma omp for for (spin = 0; spin < num_spins; spin++) { if (spin_red(my_bet, tid) > 0) { wins++; } } } return wins; } int spin_red(int bet, int tid) { int payout = -1; int slot = rand_int_between(1, 38, tid); /* * 0..17 red - win * 18..36 black - lose * else green - lose half */ if (slot <= 18) { payout = bet; } else if (slot <= 36) { payout = -bet; } else { payout = -(bet/2); } return payout; } int pretty_int(int n, char* s) { int digit; int digit_cnt = 11; if (NULL == s) { return -1; } do { digit = n % 10; n = n/10; if (8 == digit_cnt || 4 == digit_cnt) { s[digit_cnt] = ','; digit_cnt--; } s[digit_cnt] = '0' + digit; digit_cnt--; } while (n > 0); return 0; } void show_results(int num_spins, int num_wins){ char spins_string[] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '\0'}; char wins_string[] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '\0'}; double percent = 100.0 * (double)num_wins / (double)num_spins; pretty_int(num_spins, spins_string); pretty_int(num_wins, wins_string); printf(" | %15s | %14s | %11.2f%% |\n", spins_string, wins_string, percent); } int rand_int_between(int low, int hi, int tid) { return rand_r(&seeds[tid]) % (hi - low + 1) + low; }
GB_binop__ge_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__ge_fp32 // A.*B function (eWiseMult): GB_AemultB__ge_fp32 // A*D function (colscale): GB_AxD__ge_fp32 // D*A function (rowscale): GB_DxB__ge_fp32 // C+=B function (dense accum): GB_Cdense_accumB__ge_fp32 // C+=b function (dense accum): GB_Cdense_accumb__ge_fp32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ge_fp32 // C=scalar+B GB_bind1st__ge_fp32 // C=scalar+B' GB_bind1st_tran__ge_fp32 // C=A+scalar GB_bind2nd__ge_fp32 // C=A'+scalar GB_bind2nd_tran__ge_fp32 // C type: bool // A type: float // B,b type: float // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x >= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_FP32 || GxB_NO_GE_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__ge_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__ge_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__ge_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__ge_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__ge_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__ge_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__ge_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__ge_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; float bij = Bx [p] ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__ge_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = Ax [p] ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB_bind1st_tran__ge_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB_bind2nd_tran__ge_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif