source
stringlengths
3
92
c
stringlengths
26
2.25M
soxr.c
/* SoX Resampler Library Copyright (c) 2007-13 robs@users.sourceforge.net * Licence for this file: LGPL v2.1 See LICENCE for details. */ #include <math.h> #include <stdlib.h> #include <string.h> #include <time.h> #include "soxr.h" #include "data-io.h" #include "internal.h" char const * soxr_version(void) { return "libsoxr-" SOXR_THIS_VERSION_STR; } typedef void sample_t; /* float or double */ typedef void (* fn_t)(void); typedef fn_t control_block_t[10]; #define resampler_input (*(sample_t * (*)(void *, sample_t * samples, size_t n))p->control_block[0]) #define resampler_process (*(void (*)(void *, size_t))p->control_block[1]) #define resampler_output (*(sample_t const * (*)(void *, sample_t * samples, size_t * n))p->control_block[2]) #define resampler_flush (*(void (*)(void *))p->control_block[3]) #define resampler_close (*(void (*)(void *))p->control_block[4]) #define resampler_delay (*(double (*)(void *))p->control_block[5]) #define resampler_sizes (*(void (*)(size_t * shared, size_t * channel))p->control_block[6]) #define resampler_create (*(char const * (*)(void * channel, void * shared, double io_ratio, soxr_quality_spec_t * q_spec, soxr_runtime_spec_t * r_spec, double scale))p->control_block[7]) #define resampler_set_io_ratio (*(void (*)(void *, double io_ratio, size_t len))p->control_block[8]) #define resampler_id (*(char const * (*)(void))p->control_block[9]) typedef void * resampler_t; /* For one channel. */ typedef void * resampler_shared_t; /* Between channels. */ typedef void (* deinterleave_t)(sample_t * * dest, soxr_datatype_t data_type, void const * * src0, size_t n, unsigned ch); typedef size_t (* interleave_t)(soxr_datatype_t data_type, void * * dest, sample_t const * const * src, size_t, unsigned, unsigned long *); struct soxr { unsigned num_channels; double io_ratio; soxr_error_t error; soxr_quality_spec_t q_spec; soxr_io_spec_t io_spec; soxr_runtime_spec_t runtime_spec; void * input_fn_state; soxr_input_fn_t input_fn; size_t max_ilen; resampler_shared_t shared; resampler_t * resamplers; control_block_t control_block; deinterleave_t deinterleave; interleave_t interleave; void * * channel_ptrs; size_t clips; unsigned long seed; int flushing; }; #define RESET_ON_CLEAR (1u<<31) /* TODO: these should not be here. */ #define TO_3dB(a) ((1.6e-6*a-7.5e-4)*a+.646) #define LOW_Q_BW0 (1385 / 2048.) /* 0.67625 rounded to be a FP exact. */ soxr_quality_spec_t soxr_quality_spec(unsigned long recipe, unsigned long flags) { soxr_quality_spec_t spec, * p = &spec; unsigned quality = recipe & 0xf; double rej; memset(p, 0, sizeof(*p)); if (quality > 13) { p->e = "invalid quality type"; return spec; } flags |= quality < SOXR_LSR0Q? RESET_ON_CLEAR : 0; if (quality == 13) quality = 6; else if (quality > 10) quality = 0; p->phase_response = "\62\31\144"[(recipe & 0x30) >> 4]; p->stopband_begin = 1; p->precision = !quality? 0: quality < 3? 16 : quality < 8? 4 + quality * 4 : 55 - quality * 4; rej = p->precision * linear_to_dB(2.); p->flags = flags; if (quality < 8) { p->passband_end = quality == 1? LOW_Q_BW0 : 1 - .05 / TO_3dB(rej); if (quality <= 2) p->flags &= ~SOXR_ROLLOFF_NONE, p->flags |= SOXR_ROLLOFF_MEDIUM; } else { static float const bw[] = {.931f, .832f, .663f}; p->passband_end = bw[quality - 8]; if (quality - 8 == 2) p->flags &= ~SOXR_ROLLOFF_NONE, p->flags |= SOXR_ROLLOFF_MEDIUM; } if (recipe & SOXR_STEEP_FILTER) p->passband_end = 1 - .01 / TO_3dB(rej); return spec; } char const * soxr_engine(soxr_t p) { return resampler_id(); } size_t * soxr_num_clips(soxr_t p) { return &p->clips; } soxr_error_t soxr_error(soxr_t p) { return p->error; } soxr_runtime_spec_t soxr_runtime_spec(unsigned num_threads) { soxr_runtime_spec_t spec, * p = &spec; memset(p, 0, sizeof(*p)); p->log2_min_dft_size = 10; p->log2_large_dft_size = 17; p->coef_size_kbytes = 400; p->num_threads = num_threads; return spec; } soxr_io_spec_t soxr_io_spec( soxr_datatype_t itype, soxr_datatype_t otype) { soxr_io_spec_t spec, * p = &spec; memset(p, 0, sizeof(*p)); if ((itype | otype) >= SOXR_SPLIT * 2) p->e = "invalid io datatype(s)"; else { p->itype = itype; p->otype = otype; p->scale = 1; } return spec; } #if HAVE_SIMD static bool cpu_has_simd(void) { #if defined __x86_64__ || defined _M_X64 return true; #elif defined __GNUC__ && defined i386 uint32_t eax, ebx, ecx, edx; __asm__ __volatile__ ( "pushl %%ebx \n\t" "cpuid \n\t" "movl %%ebx, %1\n\t" "popl %%ebx \n\t" : "=a"(eax), "=r"(ebx), "=c"(ecx), "=d"(edx) : "a"(1) : "cc" ); return !!(edx & 0x06000000); #elif defined _MSC_VER && defined _M_IX86 uint32_t d; __asm { xor eax, eax inc eax push ebx cpuid pop ebx mov d, edx } return !!(d & 0x06000000); #endif return false; } #endif extern control_block_t _soxr_rate32s_cb, _soxr_rate32_cb, _soxr_rate64_cb, _soxr_vr32_cb; soxr_t soxr_create( double input_rate, double output_rate, unsigned num_channels, soxr_error_t * error0, soxr_io_spec_t const * io_spec, soxr_quality_spec_t const * q_spec, soxr_runtime_spec_t const * runtime_spec) { double io_ratio = output_rate? input_rate? input_rate / output_rate : -1 : input_rate? -1 : 0; static const float datatype_full_scale[] = {1, 1, 65536.*32768, 32768}; soxr_t p = 0; soxr_error_t error = 0; if (q_spec && q_spec->e) error = q_spec->e; else if (io_spec && (io_spec->itype | io_spec->otype) >= SOXR_SPLIT * 2) error = "invalid io datatype(s)"; if (!error && !(p = calloc(sizeof(*p), 1))) error = "malloc failed"; if (p) { p->q_spec = q_spec? *q_spec : soxr_quality_spec(SOXR_HQ, 0); if (q_spec) { /* Backwards compatibility with original API: */ if (p->q_spec.passband_end > 2) p->q_spec.passband_end /= 100; if (p->q_spec.stopband_begin > 2) p->q_spec.stopband_begin = 2 - p->q_spec.stopband_begin / 100; } p->io_ratio = io_ratio; p->num_channels = num_channels; if (io_spec) p->io_spec = *io_spec; else p->io_spec.scale = 1; p->runtime_spec = runtime_spec? *runtime_spec : soxr_runtime_spec(1); p->io_spec.scale *= datatype_full_scale[p->io_spec.otype & 3] / datatype_full_scale[p->io_spec.itype & 3]; p->seed = (unsigned long)time(0) ^ (unsigned long)(size_t)p; #if HAVE_SINGLE_PRECISION if (!HAVE_DOUBLE_PRECISION || (p->q_spec.precision <= 20 && !(p->q_spec.flags & SOXR_DOUBLE_PRECISION)) || (p->q_spec.flags & SOXR_VR)) { p->deinterleave = (deinterleave_t)_soxr_deinterleave_f; p->interleave = (interleave_t)_soxr_interleave_f; memcpy(&p->control_block, (p->q_spec.flags & SOXR_VR)? &_soxr_vr32_cb : #if HAVE_SIMD cpu_has_simd()? &_soxr_rate32s_cb : #endif &_soxr_rate32_cb, sizeof(p->control_block)); } #if HAVE_DOUBLE_PRECISION else #endif #endif #if HAVE_DOUBLE_PRECISION { p->deinterleave = (deinterleave_t)_soxr_deinterleave; p->interleave = (interleave_t)_soxr_interleave; memcpy(&p->control_block, &_soxr_rate64_cb, sizeof(p->control_block)); } #endif if (p->num_channels && io_ratio) error = soxr_set_io_ratio(p, io_ratio, 0); } if (error) soxr_delete(p), p = 0; if (error0) *error0 = error; return p; } soxr_error_t soxr_set_input_fn(soxr_t p, soxr_input_fn_t input_fn, void * input_fn_state, size_t max_ilen) { p->input_fn_state = input_fn_state; p->input_fn = input_fn; p->max_ilen = max_ilen? max_ilen : (size_t)-1; return 0; } static void soxr_delete0(soxr_t p) { unsigned i; if (p->resamplers) for (i = 0; i < p->num_channels; ++i) { if (p->resamplers[i]) resampler_close(p->resamplers[i]); free(p->resamplers[i]); } free(p->resamplers); free(p->channel_ptrs); free(p->shared); memset(p, 0, sizeof(*p)); } double soxr_delay(soxr_t p) { return (p && !p->error && p->resamplers)? resampler_delay(p->resamplers[0]) : 0; } static soxr_error_t fatal_error(soxr_t p, soxr_error_t error) { soxr_delete0(p); return p->error = error; } static soxr_error_t initialise(soxr_t p) { unsigned i; size_t shared_size, channel_size; resampler_sizes(&shared_size, &channel_size); p->channel_ptrs = calloc(sizeof(*p->channel_ptrs), p->num_channels); p->shared = calloc(shared_size, 1); p->resamplers = calloc(sizeof(*p->resamplers), p->num_channels); if (!p->shared || !p->channel_ptrs || !p->resamplers) return fatal_error(p, "malloc failed"); for (i = 0; i < p->num_channels; ++i) { soxr_error_t error; if (!(p->resamplers[i] = calloc(channel_size, 1))) return fatal_error(p, "malloc failed"); error = resampler_create( p->resamplers[i], p->shared, p->io_ratio, &p->q_spec, &p->runtime_spec, p->io_spec.scale); if (error) return fatal_error(p, error); } return 0; } soxr_error_t soxr_set_num_channels(soxr_t p, unsigned num_channels) { if (!p) return "invalid soxr_t pointer"; if (num_channels == p->num_channels) return p->error; if (!num_channels) return "invalid # of channels"; if (p->resamplers) return "# of channels can't be changed"; p->num_channels = num_channels; return soxr_set_io_ratio(p, p->io_ratio, 0); } soxr_error_t soxr_set_io_ratio(soxr_t p, double io_ratio, size_t slew_len) { unsigned i; soxr_error_t error; if (!p) return "invalid soxr_t pointer"; if ((error = p->error)) return error; if (!p->num_channels) return "must set # channels before O/I ratio"; if (io_ratio <= 0) return "I/O ratio out-of-range"; if (!p->channel_ptrs) { p->io_ratio = io_ratio; return initialise(p); } if (p->control_block[8]) { for (i = 0; !error && i < p->num_channels; ++i) resampler_set_io_ratio(p->resamplers[i], io_ratio, slew_len); return error; } return fabs(p->io_ratio - io_ratio) < 1e-15? 0 : "Varying O/I ratio is not supported with this quality level"; } void soxr_delete(soxr_t p) { if (p) soxr_delete0(p), free(p); } soxr_error_t soxr_clear(soxr_t p) /* TODO: this, properly. */ { if (p) { struct soxr tmp = *p; soxr_delete0(p); memset(p, 0, sizeof(*p)); p->input_fn = tmp.input_fn; p->runtime_spec = tmp.runtime_spec; p->q_spec = tmp.q_spec; p->io_spec = tmp.io_spec; p->num_channels = tmp.num_channels; p->input_fn_state = tmp.input_fn_state; memcpy(p->control_block, tmp.control_block, sizeof(p->control_block)); p->deinterleave = tmp.deinterleave; p->interleave = tmp.interleave; return (p->q_spec.flags & RESET_ON_CLEAR)? soxr_set_io_ratio(p, tmp.io_ratio, 0) : 0; } return "invalid soxr_t pointer"; } static void soxr_input_1ch(soxr_t p, unsigned i, soxr_cbuf_t src, size_t len) { sample_t * dest = resampler_input(p->resamplers[i], NULL, len); (*p->deinterleave)(&dest, p->io_spec.itype, &src, len, 1); } static size_t soxr_input(soxr_t p, void const * in, size_t len) { bool separated = !!(p->io_spec.itype & SOXR_SPLIT); unsigned i; if (!p || p->error) return 0; if (!in && len) {p->error = "null input buffer pointer"; return 0;} if (!len) { p->flushing = true; return 0; } if (separated) for (i = 0; i < p->num_channels; ++i) soxr_input_1ch(p, i, ((soxr_cbufs_t)in)[i], len); else { for (i = 0; i < p->num_channels; ++i) p->channel_ptrs[i] = resampler_input(p->resamplers[i], NULL, len); (*p->deinterleave)( (sample_t **)p->channel_ptrs, p->io_spec.itype, &in, len, p->num_channels); } return len; } static size_t soxr_output_1ch(soxr_t p, unsigned i, soxr_buf_t dest, size_t len, bool separated) { sample_t const * src; if (p->flushing) resampler_flush(p->resamplers[i]); resampler_process(p->resamplers[i], len); src = resampler_output(p->resamplers[i], NULL, &len); if (separated) p->clips += (p->interleave)(p->io_spec.otype, &dest, &src, len, 1, (p->io_spec.flags & SOXR_NO_DITHER)? 0 : &p->seed); else p->channel_ptrs[i] = (void /* const */ *)src; return len; } static size_t soxr_output_no_callback(soxr_t p, soxr_buf_t out, size_t len) { unsigned u; size_t done = 0; bool separated = !!(p->io_spec.otype & SOXR_SPLIT); #if defined _OPENMP int i; if (!p->runtime_spec.num_threads && p->num_channels > 1) #pragma omp parallel for for (i = 0; i < (int)p->num_channels; ++i) { size_t done1; done1 = soxr_output_1ch(p, (unsigned)i, ((soxr_bufs_t)out)[i], len, separated); if (!i) done = done1; } else #endif for (u = 0; u < p->num_channels; ++u) done = soxr_output_1ch(p, u, ((soxr_bufs_t)out)[u], len, separated); if (!separated) p->clips += (p->interleave)(p->io_spec.otype, &out, (sample_t const * const *)p->channel_ptrs, done, p->num_channels, (p->io_spec.flags & SOXR_NO_DITHER)? 0 : &p->seed); return done; } size_t soxr_output(soxr_t p, void * out, size_t len0) { size_t odone, odone0 = 0, olen = len0, osize, idone; size_t ilen = min(p->max_ilen, (size_t)ceil((double)olen *p->io_ratio)); void const * in = out; /* Set to !=0, so that caller may leave unset. */ bool was_flushing; if (!p || p->error) return 0; if (!out && len0) {p->error = "null output buffer pointer"; return 0;} do { odone = soxr_output_no_callback(p, out, olen); odone0 += odone; if (odone0 == len0 || !p->input_fn || p->flushing) break; osize = soxr_datatype_size(p->io_spec.otype) * p->num_channels; out = (char *)out + osize * odone; olen -= odone; idone = p->input_fn(p->input_fn_state, &in, ilen); was_flushing = p->flushing; if (!in) p->error = "input function reported failure"; else soxr_input(p, in, idone); } while (odone || idone || (!was_flushing && p->flushing)); return odone0; } static size_t soxr_i_for_o(soxr_t p, size_t olen, size_t ilen) { size_t result; #if 0 if (p->runtime_spec.flags & SOXR_STRICT_BUFFERING) result = rate_i_for_o(p->resamplers[0], olen); else #endif result = (size_t)ceil((double)olen * p->io_ratio); return min(result, ilen); } #if 0 static size_t soxr_o_for_i(soxr_t p, size_t ilen, size_t olen) { size_t result = (size_t)ceil((double)ilen / p->io_ratio); return min(result, olen); } #endif soxr_error_t soxr_process(soxr_t p, void const * in , size_t ilen0, size_t * idone0, void * out, size_t olen , size_t * odone0) { size_t ilen, idone, odone = 0; unsigned u; bool flush_requested = false; if (!p) return "null pointer"; if (!in) flush_requested = true, ilen = ilen0 = 0; else { if ((ptrdiff_t)ilen0 < 0) flush_requested = true, ilen0 = ~ilen0; if (idone0 && (1 || flush_requested)) ilen = soxr_i_for_o(p, olen, ilen0); else ilen = ilen0/*, olen = soxr_o_for_i(p, ilen, olen)*/; } p->flushing |= ilen == ilen0 && flush_requested; if (!out && !in) idone = ilen; else if (p->io_spec.itype & p->io_spec.otype & SOXR_SPLIT) { /* Both i & o */ #if defined _OPENMP int i; if (!p->runtime_spec.num_threads && p->num_channels > 1) #pragma omp parallel for for (i = 0; i < (int)p->num_channels; ++i) { size_t done; if (in) soxr_input_1ch(p, (unsigned)i, ((soxr_cbufs_t)in)[i], ilen); done = soxr_output_1ch(p, (unsigned)i, ((soxr_bufs_t)out)[i], olen, true); if (!i) odone = done; } else #endif for (u = 0; u < p->num_channels; ++u) { if (in) soxr_input_1ch(p, u, ((soxr_cbufs_t)in)[u], ilen); odone = soxr_output_1ch(p, u, ((soxr_bufs_t)out)[u], olen, true); } idone = ilen; } else { idone = ilen? soxr_input (p, in , ilen) : 0; odone = soxr_output(p, out, olen); } if (idone0) *idone0 = idone; if (odone0) *odone0 = odone; return p->error; } soxr_error_t soxr_oneshot( double irate, double orate, unsigned num_channels, void const * in , size_t ilen, size_t * idone, void * out, size_t olen, size_t * odone, soxr_io_spec_t const * io_spec, soxr_quality_spec_t const * q_spec, soxr_runtime_spec_t const * runtime_spec) { soxr_t resampler; soxr_error_t error = q_spec? q_spec->e : 0; if (!error) { soxr_quality_spec_t q_spec1; if (!q_spec) q_spec1 = soxr_quality_spec(SOXR_LQ, 0), q_spec = &q_spec1; resampler = soxr_create(irate, orate, num_channels, &error, io_spec, q_spec, runtime_spec); } if (!error) { error = soxr_process(resampler, in, ~ilen, idone, out, olen, odone); soxr_delete(resampler); } return error; } soxr_error_t soxr_set_error(soxr_t p, soxr_error_t error) { if (!p) return "null pointer"; if (!p->error && p->error != error) return p->error; p->error = error; return 0; }
wino_conv_kernel_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: haoluo@openailab.com */ #include <stdint.h> #include <stdlib.h> #include <math.h> #include <arm_neon.h> #include "wino_conv_kernel_arm.h" #define TILE 4 #define ELEM_SIZE ((TILE + 2) * (TILE + 2)) #define WINO_MAX(a, b) ((a) > (b) ? (a) : (b)) #define WINO_MIN(a, b) ((a) < (b) ? (a) : (b)) #ifdef __aarch64__ #define PER_OUT_CHAN 16 void tran_inp_4(float*, float*, float*, int, int, int); void wino_sgemm_4x16_A72(float* output, const float* input, const float* kernel, long cin, short stride_save); void wino_sgemm_4x4_A72(float* output, const float* input, const float* kernel, long cin, short stride_save); void wino_sgemm_1x16(float* output, const float* input, const float* kernel, long cin); void wino_sgemm_1x4(float* output, const float* input, const float* kernel, long cin); void tran_out_4(float*, float*, int, float*, float*, int); #else #define PER_OUT_CHAN 12 void wino_sgemm_4x12_A17(float* output, const float* input, const float* kernel, long cin); void wino_sgemm_4x4_A17(float* output, const float* input, const float* kernel, long cin); void wino_sgemm_1x12_A17(float* output, const float* input, const float* kernel, long cin); // need to be optimized by neon static inline void wino_sgemm_1x4_cpu(float* output, const float* input, const float* kernel, long cin) { for (int i = 0; i < 4; i++) { float sum = 0; for (int k = 0; k < cin; k++) { sum += input[k] * kernel[k * 4 + i]; } output[i] = sum; } } #endif static inline void trans_kernel_f43(float* ker, float* trans_ker) { /* float G[18]={ 1./4 , 0. , 0. , -1./6 , -1./6 , -1./6 , -1./6 , 1./6 , -1./6 , 1./24 , 1./12 , 1./6 , 1./24 , -1./12 , 1./6 , 0. , 0. , 1. }; float GT[18]={ 1./4 , -1./6, -1./6 , 1./24, 1./24 , 0., 0., -1./6, 1./6 , 1./12, -1./12 , 0., 0., -1./6, -1./6 , 1./6, 1./6 , 1. }; */ float tmp[18] = {0}; float neg_r0_add_r2_x_1_6[6]; // (r0+r2)*1./6 float r0_1_4_add_r2_x_1_6[6]; // (r0*1/4 + r2)*1./6 float r1_1_6[6]; // r1*1/6 float r1_1_12[6]; // r1*1/12 float s_1_6 = 1. / 6.f; for (int j = 0; j < 3; j++) { neg_r0_add_r2_x_1_6[j] = -(ker[j] + ker[6 + j]) * s_1_6; r0_1_4_add_r2_x_1_6[j] = (ker[j] * 0.25 + ker[6 + j]) * s_1_6; r1_1_6[j] = ker[3 + j] * s_1_6; r1_1_12[j] = r1_1_6[j] * 0.5; } for (int j = 0; j < 3; j++) { tmp[j] = ker[j] * 0.25; tmp[3 + j] = -r1_1_6[j] + neg_r0_add_r2_x_1_6[j]; tmp[6 + j] = r1_1_6[j] + neg_r0_add_r2_x_1_6[j]; tmp[9 + j] = r1_1_12[j] + r0_1_4_add_r2_x_1_6[j]; tmp[12 + j] = -r1_1_12[j] + r0_1_4_add_r2_x_1_6[j]; tmp[15 + j] = ker[6 + j]; } // gemm(6,3,3,G,ker,tmp); done int idx; for (int j = 0; j < 6; j++) { idx = j * 3; neg_r0_add_r2_x_1_6[j] = -(tmp[idx] + tmp[idx + 2]) * s_1_6; r0_1_4_add_r2_x_1_6[j] = (tmp[idx] * 0.25 + tmp[idx + 2]) * s_1_6; r1_1_6[j] = tmp[idx + 1] * s_1_6; r1_1_12[j] = r1_1_6[j] * 0.5; } for (int j = 0; j < 6; j++) { idx = j * 6; trans_ker[idx] = tmp[j * 3] * 0.25; trans_ker[idx + 1] = -r1_1_6[j] + neg_r0_add_r2_x_1_6[j]; trans_ker[idx + 2] = r1_1_6[j] + neg_r0_add_r2_x_1_6[j]; trans_ker[idx + 3] = r1_1_12[j] + r0_1_4_add_r2_x_1_6[j]; trans_ker[idx + 4] = -r1_1_12[j] + r0_1_4_add_r2_x_1_6[j]; trans_ker[idx + 5] = tmp[j * 3 + 2]; } // gemm(6,6,3,tmp,GT,trans_ker); done } static inline void transform_kernel_f43_tile(struct ir_tensor* filter, float* trans_ker) { int outc = filter->dims[0]; int inc = filter->dims[1]; float* kernel = ( float* )filter->data; float* ker_ptr = trans_ker; for (int i = 0; i < outc; i++) { for (int j = 0; j < inc; j++) { trans_kernel_f43(( float* )(kernel + 9 * (j + i * inc)), ker_ptr); ker_ptr += ELEM_SIZE; } } } // src [out_c][in_c][ELEM_SIZE] // --> dst [out_c/PER_OUT_CHAN][ELEM_SIZE][in_c][PER_OUT_CHAN] static inline void interleave_kernel(float* ker0, float* ker1, int out_c, int in_c) { float* ker1_ptr = ker1; int p, i, j; int nn_out = out_c / PER_OUT_CHAN; for (p = 0; p < nn_out; p++) { int pp = p * PER_OUT_CHAN; for (int s = 0; s < ELEM_SIZE; s++) { for (i = 0; i < in_c; i++) { for (j = 0; j < PER_OUT_CHAN; j++) { *ker1_ptr = ker0[((pp + j) * in_c + i) * ELEM_SIZE + s]; ker1_ptr++; } } } } // cout 4 for (p = (nn_out * PER_OUT_CHAN); p < (out_c & -4); p += 4) { for (int s = 0; s < ELEM_SIZE; s++) { for (i = 0; i < in_c; i++) { for (j = 0; j < 4; j++) { *ker1_ptr = ker0[((p + j) * in_c + i) * ELEM_SIZE + s]; ker1_ptr++; } } } } // cout 1 for (p = (out_c & -4); p < out_c; p++) { for (int s = 0; s < ELEM_SIZE; s++) { for (i = 0; i < in_c; i++) { *ker1_ptr = ker0[(p * in_c + i) * ELEM_SIZE + s]; ker1_ptr++; } } } } static inline void pad_input1(const float* input, float* inp_padded, int inc, int inh, int inw, int padded_h, int padded_w, int pad0, int pad1) { int padded_hw = padded_h * padded_w; float* pad_ptr; float* inp_ptr = ( float* )input; int resi_h = padded_h - pad0 - inh; int resi_w = padded_w - pad1 - inw; for (int c = 0; c < inc; c++) { pad_ptr = inp_padded + c * padded_hw; // pad h_top memset(pad_ptr, 0, padded_w * pad0 * sizeof(float)); pad_ptr += pad0 * padded_w; // pad h_mid for (int h = 0; h < inh; h++) { // pad w_left memset(pad_ptr, 0, pad1 * sizeof(float)); // pad w_mid memcpy(pad_ptr + pad1, inp_ptr, inw * sizeof(float)); // pad w_end memset(pad_ptr + pad1 + inw, 0, resi_w * sizeof(float)); inp_ptr += inw; pad_ptr += padded_w; } // pad h_bottom memset(pad_ptr, 0, padded_w * resi_h * sizeof(float)); } } static inline void trans_inp_1tile(float* input, float* inp_ptr, int ih, int jw, int c, int in_hw, int inw) { float* inp = ( float* )input + c * in_hw + ih * 4 * inw + jw * 4; float* inp0 = inp; float* inp1 = inp0 + inw; float* inp2 = inp1 + inw; float* inp3 = inp2 + inw; float* inp4 = inp3 + inw; float* inp5 = inp4 + inw; float tmp[36] = {0}; float r1_add_r2[6]; float r3_add_r4[6]; float r1_minus_r2[6]; float r3_minus_r4[6]; float r4_minus_r2[6]; float r1_minus_r3[6]; for (int j = 0; j < 6; j++) { r1_add_r2[j] = inp1[j] + inp2[j]; r1_minus_r2[j] = inp1[j] - inp2[j]; r3_add_r4[j] = inp3[j] + inp4[j]; r3_minus_r4[j] = inp3[j] - inp4[j]; r4_minus_r2[j] = inp4[j] - inp2[j]; r1_minus_r3[j] = inp1[j] - inp3[j]; } for (int j = 0; j < 6; j++) { tmp[j] = 4 * inp0[j] - 5 * inp2[j] + inp4[j]; tmp[6 + j] = r3_add_r4[j] - 4 * r1_add_r2[j]; tmp[12 + j] = 4 * r1_minus_r2[j] - r3_minus_r4[j]; tmp[18 + j] = r4_minus_r2[j] - 2 * r1_minus_r3[j]; tmp[24 + j] = r4_minus_r2[j] + 2 * r1_minus_r3[j]; tmp[30 + j] = 4 * inp1[j] - 5 * inp3[j] + inp5[j]; } float r1_4_minus_r3[6]; float r4_minus_4_r2[6]; float r4_minus_r2_[6]; float r1_minus_r3_x2[6]; for (int j = 0; j < 6; j++) { r4_minus_r2_[j] = tmp[j * 6 + 4] - tmp[j * 6 + 2]; r1_4_minus_r3[j] = 4 * tmp[j * 6 + 1] - tmp[j * 6 + 3]; r4_minus_4_r2[j] = tmp[j * 6 + 4] - 4 * tmp[j * 6 + 2]; r1_minus_r3_x2[j] = 2 * (tmp[j * 6 + 1] - tmp[j * 6 + 3]); } for (int j = 0; j < 6; j++) { inp_ptr[j * 6] = 4 * tmp[j * 6] - 5 * tmp[j * 6 + 2] + tmp[j * 6 + 4]; inp_ptr[1 + j * 6] = r4_minus_4_r2[j] - r1_4_minus_r3[j]; inp_ptr[2 + j * 6] = r4_minus_4_r2[j] + r1_4_minus_r3[j]; inp_ptr[3 + j * 6] = r4_minus_r2_[j] - r1_minus_r3_x2[j]; inp_ptr[4 + j * 6] = r4_minus_r2_[j] + r1_minus_r3_x2[j]; inp_ptr[5 + j * 6] = 4 * tmp[j * 6 + 1] - 5 * tmp[j * 6 + 3] + tmp[j * 6 + 5]; } } static inline void trans_inp_4_cpu(float* inp, float* inp_ptr, int inw, int s_size) { float* inp0 = inp; float* inp1 = inp0 + inw; float* inp2 = inp1 + inw; float* inp3 = inp2 + inw; float* inp4 = inp3 + inw; float* inp5 = inp4 + inw; float mid[36 * 4] = {0}; float r4_minus_r2[24]; float r1_4_minus_r3[24]; float r4_minus_4_r2[24]; float r1_minus_r3_x2[24]; for (int i = 0; i < 6; i++) { // 0 mid[i * 4] = 4 * inp0[i] - 5 * inp2[i] + inp4[i]; mid[(30 + i) * 4] = 4 * inp1[i] - 5 * inp3[i] + inp5[i]; r1_minus_r3_x2[i * 4 + 0] = (inp1[i] - inp3[i]) * 2; r1_4_minus_r3[i * 4 + 0] = 4 * inp1[i] - inp3[i]; r4_minus_4_r2[i * 4 + 0] = inp4[i] - 4 * inp2[i]; r4_minus_r2[i * 4 + 0] = inp4[i] - inp2[i]; // 1 mid[i * 4 + 1] = 4 * inp0[i + 4] - 5 * inp2[i + 4] + inp4[i + 4]; mid[(30 + i) * 4 + 1] = 4 * inp1[i + 4] - 5 * inp3[i + 4] + inp5[i + 4]; r1_minus_r3_x2[i * 4 + 1] = (inp1[i + 4] - inp3[i + 4]) * 2; r1_4_minus_r3[i * 4 + 1] = 4 * inp1[i + 4] - inp3[i + 4]; r4_minus_4_r2[i * 4 + 1] = inp4[i + 4] - 4 * inp2[i + 4]; r4_minus_r2[i * 4 + 1] = inp4[i + 4] - inp2[i + 4]; // 2 mid[i * 4 + 2] = 4 * inp0[i + 8] - 5 * inp2[i + 8] + inp4[i + 8]; mid[(30 + i) * 4 + 2] = 4 * inp1[i + 8] - 5 * inp3[i + 8] + inp5[i + 8]; r1_minus_r3_x2[i * 4 + 2] = (inp1[i + 8] - inp3[i + 8]) * 2; r1_4_minus_r3[i * 4 + 2] = 4 * inp1[i + 8] - inp3[i + 8]; r4_minus_4_r2[i * 4 + 2] = inp4[i + 8] - 4 * inp2[i + 8]; r4_minus_r2[i * 4 + 2] = inp4[i + 8] - inp2[i + 8]; // 3 mid[i * 4 + 3] = 4 * inp0[i + 12] - 5 * inp2[i + 12] + inp4[i + 12]; mid[(30 + i) * 4 + 3] = 4 * inp1[i + 12] - 5 * inp3[i + 12] + inp5[i + 12]; r1_minus_r3_x2[i * 4 + 3] = (inp1[i + 12] - inp3[i + 12]) * 2; r1_4_minus_r3[i * 4 + 3] = 4 * inp1[i + 12] - inp3[i + 12]; r4_minus_4_r2[i * 4 + 3] = inp4[i + 12] - 4 * inp2[i + 12]; r4_minus_r2[i * 4 + 3] = inp4[i + 12] - inp2[i + 12]; } //==================================================================== // for(int i = 0; i < 6; i++) // { // for(int k = 0; k < 4; k++) // { // mid[(6 + i) * 4 + k] = r4_minus_4_r2[i * 4 + k] - r1_4_minus_r3[i * 4 + k]; // mid[(12 + i) * 4 + k] = r4_minus_4_r2[i * 4 + k] + r1_4_minus_r3[i * 4 + k]; // mid[(18 + i) * 4 + k] = r4_minus_r2[i * 4 + k] - r1_minus_r3_x2[i * 4 + k]; // mid[(24 + i) * 4 + k] = r4_minus_r2[i * 4 + k] + r1_minus_r3_x2[i * 4 + k]; // } // } float32x4_t r0 = vld1q_f32(r4_minus_4_r2); float32x4_t r1 = vld1q_f32(r4_minus_4_r2 + 4); float32x4_t r2 = vld1q_f32(r4_minus_4_r2 + 8); float32x4_t r3 = vld1q_f32(r4_minus_4_r2 + 12); float32x4_t r4 = vld1q_f32(r4_minus_4_r2 + 16); float32x4_t r5 = vld1q_f32(r4_minus_4_r2 + 20); float32x4_t r0_ = vld1q_f32(r1_4_minus_r3); float32x4_t r1_ = vld1q_f32(r1_4_minus_r3 + 4); float32x4_t r2_ = vld1q_f32(r1_4_minus_r3 + 8); float32x4_t r3_ = vld1q_f32(r1_4_minus_r3 + 12); float32x4_t r4_ = vld1q_f32(r1_4_minus_r3 + 16); float32x4_t r5_ = vld1q_f32(r1_4_minus_r3 + 20); float32x4_t line0_0 = vld1q_f32(mid); float32x4_t line0_1 = vld1q_f32(mid + 4); float32x4_t line0_2 = vld1q_f32(mid + 8); float32x4_t line0_3 = vld1q_f32(mid + 12); float32x4_t line0_4 = vld1q_f32(mid + 16); float32x4_t line0_5 = vld1q_f32(mid + 20); float32x4_t line1_0 = vsubq_f32(r0, r0_); // mid[(6 + i) * 4 + k] [1][0] float32x4_t line1_1 = vsubq_f32(r1, r1_); // mid[(6 + i) * 4 + k] [1][1] float32x4_t line1_2 = vsubq_f32(r2, r2_); // mid[(6 + i) * 4 + k] [1][2] float32x4_t line1_3 = vsubq_f32(r3, r3_); // mid[(6 + i) * 4 + k] [1][3] float32x4_t line1_4 = vsubq_f32(r4, r4_); // mid[(6 + i) * 4 + k] [1][4] float32x4_t line1_5 = vsubq_f32(r5, r5_); // mid[(6 + i) * 4 + k] [1][5] float32x4_t line2_0 = vaddq_f32(r0, r0_); // mid[(12 + i) * 4 + k] [2][0] float32x4_t line2_1 = vaddq_f32(r1, r1_); // mid[(12 + i) * 4 + k] [2][1] float32x4_t line2_2 = vaddq_f32(r2, r2_); // mid[(12 + i) * 4 + k] [2][2] float32x4_t line2_3 = vaddq_f32(r3, r3_); // mid[(12 + i) * 4 + k] [2][3] float32x4_t line2_4 = vaddq_f32(r4, r4_); // mid[(12 + i) * 4 + k] [2][4] float32x4_t line2_5 = vaddq_f32(r5, r5_); // mid[(12 + i) * 4 + k] [2][5] r0 = vld1q_f32(r4_minus_r2); r1 = vld1q_f32(r4_minus_r2 + 4); r2 = vld1q_f32(r4_minus_r2 + 8); r3 = vld1q_f32(r4_minus_r2 + 12); r4 = vld1q_f32(r4_minus_r2 + 16); r5 = vld1q_f32(r4_minus_r2 + 20); r0_ = vld1q_f32(r1_minus_r3_x2); r1_ = vld1q_f32(r1_minus_r3_x2 + 4); r2_ = vld1q_f32(r1_minus_r3_x2 + 8); r3_ = vld1q_f32(r1_minus_r3_x2 + 12); r4_ = vld1q_f32(r1_minus_r3_x2 + 16); r5_ = vld1q_f32(r1_minus_r3_x2 + 20); float32x4_t line5_0 = vld1q_f32(mid + 120); float32x4_t line5_1 = vld1q_f32(mid + 124); float32x4_t line5_2 = vld1q_f32(mid + 128); float32x4_t line5_3 = vld1q_f32(mid + 132); float32x4_t line5_4 = vld1q_f32(mid + 136); float32x4_t line5_5 = vld1q_f32(mid + 140); float32x4_t line3_0 = vsubq_f32(r0, r0_); // mid[(18 + i) * 4 + k] [3][0] float32x4_t line3_1 = vsubq_f32(r1, r1_); // mid[(18 + i) * 4 + k] [3][1] float32x4_t line3_2 = vsubq_f32(r2, r2_); // mid[(18 + i) * 4 + k] [3][2] float32x4_t line3_3 = vsubq_f32(r3, r3_); // mid[(18 + i) * 4 + k] [3][3] float32x4_t line3_4 = vsubq_f32(r4, r4_); // mid[(18 + i) * 4 + k] [3][4] float32x4_t line3_5 = vsubq_f32(r5, r5_); // mid[(18 + i) * 4 + k] [3][5] float32x4_t line4_0 = vaddq_f32(r0, r0_); // mid[(24 + i) * 4 + k] [4][0] float32x4_t line4_1 = vaddq_f32(r1, r1_); // mid[(24 + i) * 4 + k] [4][1] float32x4_t line4_2 = vaddq_f32(r2, r2_); // mid[(24 + i) * 4 + k] [4][2] float32x4_t line4_3 = vaddq_f32(r3, r3_); // mid[(24 + i) * 4 + k] [4][3] float32x4_t line4_4 = vaddq_f32(r4, r4_); // mid[(24 + i) * 4 + k] [4][4] float32x4_t line4_5 = vaddq_f32(r5, r5_); // mid[(24 + i) * 4 + k] [4][5] // r4_minus_r2[i * 4 + k] i=0 = mid[0][4] r0 = vsubq_f32(line0_4, line0_2); r1 = vsubq_f32(line1_4, line1_2); r2 = vsubq_f32(line2_4, line2_2); r3 = vsubq_f32(line3_4, line3_2); r4 = vsubq_f32(line4_4, line4_2); r5 = vsubq_f32(line5_4, line5_2); r0_ = vsubq_f32(line0_1, line0_3); r1_ = vsubq_f32(line1_1, line1_3); r2_ = vsubq_f32(line2_1, line2_3); r3_ = vsubq_f32(line3_1, line3_3); r4_ = vsubq_f32(line4_1, line4_3); r5_ = vsubq_f32(line5_1, line5_3); float32x4_t const2 = vdupq_n_f32(2.f); r0_ = vmulq_f32(r0_, const2); r1_ = vmulq_f32(r1_, const2); r2_ = vmulq_f32(r2_, const2); r3_ = vmulq_f32(r3_, const2); r4_ = vmulq_f32(r4_, const2); r5_ = vmulq_f32(r5_, const2); vst1q_f32(inp_ptr + s_size * 3, vsubq_f32(r0, r0_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 9, vsubq_f32(r1, r1_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 15, vsubq_f32(r2, r2_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 21, vsubq_f32(r3, r3_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 27, vsubq_f32(r4, r4_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 33, vsubq_f32(r5, r5_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 4, vaddq_f32(r0, r0_)); // inp_ptr[ s_size * (4 + i * 6)] vst1q_f32(inp_ptr + s_size * 10, vaddq_f32(r1, r1_)); // inp_ptr[ s_size * (4 + i * 6)] vst1q_f32(inp_ptr + s_size * 16, vaddq_f32(r2, r2_)); // inp_ptr[ s_size * (4 + i * 6)] vst1q_f32(inp_ptr + s_size * 22, vaddq_f32(r3, r3_)); // inp_ptr[ s_size * (4 + i * 6)] vst1q_f32(inp_ptr + s_size * 28, vaddq_f32(r4, r4_)); // inp_ptr[ s_size * (4 + i * 6)] vst1q_f32(inp_ptr + s_size * 34, vaddq_f32(r5, r5_)); // inp_ptr[ s_size * (4 + i * 6)] float32x4_t const4 = vdupq_n_f32(4.f); float32x4_t const5 = vdupq_n_f32(-5.f); r0_ = vmulq_f32(line0_1, const4); // line 1*4 ======== r1_ = vmulq_f32(line1_1, const4); r2_ = vmulq_f32(line2_1, const4); r3_ = vmulq_f32(line3_1, const4); r4_ = vmulq_f32(line4_1, const4); r5_ = vmulq_f32(line5_1, const4); float32x4_t rr0_ = vsubq_f32(r0_, line0_3); // line1*4-line3 float32x4_t rr1_ = vsubq_f32(r1_, line1_3); float32x4_t rr2_ = vsubq_f32(r2_, line2_3); float32x4_t rr3_ = vsubq_f32(r3_, line3_3); float32x4_t rr4_ = vsubq_f32(r4_, line4_3); float32x4_t rr5_ = vsubq_f32(r5_, line5_3); r0 = vmulq_f32(line0_2, const4); r1 = vmulq_f32(line1_2, const4); r2 = vmulq_f32(line2_2, const4); r3 = vmulq_f32(line3_2, const4); r4 = vmulq_f32(line4_2, const4); r5 = vmulq_f32(line5_2, const4); r0 = vsubq_f32(line0_4, r0); // line4 -4*line2 r1 = vsubq_f32(line1_4, r1); r2 = vsubq_f32(line2_4, r2); r3 = vsubq_f32(line3_4, r3); r4 = vsubq_f32(line4_4, r4); r5 = vsubq_f32(line5_4, r5); vst1q_f32(inp_ptr + s_size * 1, vsubq_f32(r0, rr0_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 7, vsubq_f32(r1, rr1_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 13, vsubq_f32(r2, rr2_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 19, vsubq_f32(r3, rr3_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 25, vsubq_f32(r4, rr4_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 31, vsubq_f32(r5, rr5_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 2, vaddq_f32(r0, rr0_)); // inp_ptr[ s_size * (2 + i * 6)] vst1q_f32(inp_ptr + s_size * 8, vaddq_f32(r1, rr1_)); // inp_ptr[ s_size * (2 + i * 6)] vst1q_f32(inp_ptr + s_size * 14, vaddq_f32(r2, rr2_)); // inp_ptr[ s_size * (2 + i * 6)] vst1q_f32(inp_ptr + s_size * 20, vaddq_f32(r3, rr3_)); // inp_ptr[ s_size * (2 + i * 6)] vst1q_f32(inp_ptr + s_size * 26, vaddq_f32(r4, rr4_)); // inp_ptr[ s_size * (2 + i * 6)] vst1q_f32(inp_ptr + s_size * 32, vaddq_f32(r5, rr5_)); // inp_ptr[ s_size * (2 + i * 6)] r0_ = vaddq_f32(line0_5, r0_); // 5 + 1*4 r1_ = vaddq_f32(line1_5, r1_); r2_ = vaddq_f32(line2_5, r2_); r3_ = vaddq_f32(line3_5, r3_); r4_ = vaddq_f32(line4_5, r4_); r5_ = vaddq_f32(line5_5, r5_); r0 = vmulq_f32(line0_3, const5); r1 = vmulq_f32(line1_3, const5); r2 = vmulq_f32(line2_3, const5); r3 = vmulq_f32(line3_3, const5); r4 = vmulq_f32(line4_3, const5); r5 = vmulq_f32(line5_3, const5); vst1q_f32(inp_ptr + s_size * 5, vaddq_f32(r0, r0_)); // inp_ptr[ s_size * (5 + i * 6)] vst1q_f32(inp_ptr + s_size * 11, vaddq_f32(r1, r1_)); // inp_ptr[ s_size * (5 + i * 6)] vst1q_f32(inp_ptr + s_size * 17, vaddq_f32(r2, r2_)); // inp_ptr[ s_size * (5 + i * 6)] vst1q_f32(inp_ptr + s_size * 23, vaddq_f32(r3, r3_)); // inp_ptr[ s_size * (5 + i * 6)] vst1q_f32(inp_ptr + s_size * 29, vaddq_f32(r4, r4_)); // inp_ptr[ s_size * (5 + i * 6)] vst1q_f32(inp_ptr + s_size * 35, vaddq_f32(r5, r5_)); // inp_ptr[ s_size * (5 + i * 6)] r0 = vmulq_f32(line0_0, const4); r1 = vmulq_f32(line1_0, const4); r2 = vmulq_f32(line2_0, const4); r3 = vmulq_f32(line3_0, const4); r4 = vmulq_f32(line4_0, const4); r5 = vmulq_f32(line5_0, const4); r0_ = vmulq_f32(line0_2, const5); r1_ = vmulq_f32(line1_2, const5); r2_ = vmulq_f32(line2_2, const5); r3_ = vmulq_f32(line3_2, const5); r4_ = vmulq_f32(line4_2, const5); r5_ = vmulq_f32(line5_2, const5); r0 = vaddq_f32(r0, line0_4); r1 = vaddq_f32(r1, line1_4); r2 = vaddq_f32(r2, line2_4); r3 = vaddq_f32(r3, line3_4); r4 = vaddq_f32(r4, line4_4); r5 = vaddq_f32(r5, line5_4); vst1q_f32(inp_ptr + s_size * 0, vaddq_f32(r0, r0_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 6, vaddq_f32(r1, r1_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 12, vaddq_f32(r2, r2_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 18, vaddq_f32(r3, r3_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 24, vaddq_f32(r4, r4_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 30, vaddq_f32(r5, r5_)); // inp_ptr[ s_size * (1 + i * 6)] // for(int i = 0; i < 6; i++) // { // for(int k = 0; k < 4; k++) // { // r4_minus_r2[i * 4 + k] = mid[(i * 6 + 4) * 4 + k] - mid[(i * 6 + 2) * 4 + k]; // r1_4_minus_r3[i * 4 + k] = 4 * mid[(i * 6 + 1) * 4 + k] - mid[(i * 6 + 3) * 4 + k]; // r4_minus_4_r2[i * 4 + k] = mid[(i * 6 + 4) * 4 + k] - 4 * mid[(i * 6 + 2) * 4 + k]; // r1_minus_r3_x2[i * 4 + k] = 2 * (mid[(i * 6 + 1) * 4 + k] - mid[(i * 6 + 3) * 4 + k]); // } // } // for(int i = 1; i < 2; i++) // { // for(int k = 0; k < 4; k++) // { // inp_ptr[k + s_size * (i * 6)] = // 4 * mid[(i * 6) * 4 + k] - 5 * mid[(i * 6 + 2) * 4 + k] + mid[(i * 6 + 4) * 4 + k]; // // // inp_ptr[k + s_size * (1 + i * 6)] = r4_minus_4_r2[i * 4 + k] - r1_4_minus_r3[i * 4 + k]; // // // inp_ptr[k + s_size * (2 + i * 6)] = r4_minus_4_r2[i * 4 + k] + r1_4_minus_r3[i * 4 + k]; // // // inp_ptr[k + s_size * (3 + i * 6)] = r4_minus_r2[i * 4 + k] - r1_minus_r3_x2[i * 4 + k]; // // // inp_ptr[k + s_size * (4 + i * 6)] = r4_minus_r2[i * 4 + k] + r1_minus_r3_x2[i * 4 + k]; // // // inp_ptr[k + s_size * (5 + i * 6)] = // // // 4 * mid[(i * 6 + 1) * 4 + k] - 5 * mid[(i * 6 + 3) * 4 + k] + mid[(i * 6 + 5) * 4 + k]; // } // } } // trans_input [block_hw/4][ELEM_SIZE][inc][4] static inline void tran_input_4block(const float* input, float* trans_inp, int inc, int block_h, int block_w, int inh, int inw) { int in_hw = inh * inw; int block_hw = block_h * block_w; int nn_block = block_hw >> 2; int idxh[4]; int idxw[4]; for (int ib = 0; ib < nn_block; ib++) { float* inp_ptr_4tile = trans_inp + ib * 4 * ELEM_SIZE * inc; idxh[0] = (ib * 4) / block_w; idxh[1] = (ib * 4 + 1) / block_w; idxh[2] = (ib * 4 + 2) / block_w; idxh[3] = (ib * 4 + 3) / block_w; idxw[0] = (ib * 4) % block_w; idxw[1] = (ib * 4 + 1) % block_w; idxw[2] = (ib * 4 + 2) % block_w; idxw[3] = (ib * 4 + 3) % block_w; if (idxh[0] == idxh[3]) { float* temp_inp_ptr = ( float* )(input + idxh[0] * 4 * inw + idxw[0] * 4); for (int c = 0; c < inc; c++) { #ifdef __aarch64__ float ker00[4] = {1, 2, 4, 5}; tran_inp_4(temp_inp_ptr, inp_ptr_4tile + 4 * c, ker00, inw, inc * 16, in_hw); temp_inp_ptr += in_hw; #else trans_inp_4_cpu(temp_inp_ptr, inp_ptr_4tile + c * 4, inw, inc * 4); temp_inp_ptr += in_hw; #endif } } else { float buffer0[inc * ELEM_SIZE * 4]; float* buffer = buffer0; for (int c = 0; c < inc; c++) { trans_inp_1tile(( float* )input, buffer, idxh[0], idxw[0], c, in_hw, inw); buffer += ELEM_SIZE; trans_inp_1tile(( float* )input, buffer, idxh[1], idxw[1], c, in_hw, inw); buffer += ELEM_SIZE; trans_inp_1tile(( float* )input, buffer, idxh[2], idxw[2], c, in_hw, inw); buffer += ELEM_SIZE; trans_inp_1tile(( float* )input, buffer, idxh[3], idxw[3], c, in_hw, inw); buffer += ELEM_SIZE; } // interleave float* tmp_inp = inp_ptr_4tile; for (int s = 0; s < ELEM_SIZE; s++) { for (int i = 0; i < inc; i++) { for (int j = 0; j < 4; j++) { *tmp_inp = buffer0[i * ELEM_SIZE * 4 + j * ELEM_SIZE + s]; tmp_inp++; } } } // end interleave } } } static inline void tran_input_resi_block(const float* input, float* trans_inp, int inc, int nn_block, int resi_block, int block_hw, int block_w, int in_hw, int inw) { float* inp_ptr = trans_inp + nn_block * 4 * ELEM_SIZE * inc; for (int ib = resi_block; ib < block_hw; ib++) { float buffer0[ELEM_SIZE * inc]; float* buffer = buffer0; for (int c = 0; c < inc; c++) { int ih = ib / block_w; int jw = ib % block_w; trans_inp_1tile(( float* )input, buffer, ih, jw, c, in_hw, inw); buffer += ELEM_SIZE; } // interleave for (int s = 0; s < ELEM_SIZE; s++) { for (int i = 0; i < inc; i++) { *inp_ptr = buffer0[i * ELEM_SIZE + s]; inp_ptr++; } } // end interleave } } static inline float do_activation(float value, int activation) { if (activation >= 0) value = WINO_MAX(value, 0); if (activation == 6) value = WINO_MIN(value, 6); return value; } static inline void trans_output_f43(const float* mid, float* out, int outw, const float* bias_ptr, int activation) { /* float AT[24]={ 1., 1., 1., 1., 1., 0., 0., 1., -1., 2., -2., 0., 0., 1., 1., 4., 4., 0., 0., 1., -1., 8., -8., 1. }; float A[24]={ 1., 0., 0., 0., 1., 1., 1., 1., 1., -1., 1., -1., 1., 2., 4., 8., 1., -2., 4., -8., 0., 0., 0., 1. }; */ float tmp[24] = {0}; float r1_add_r2[6]; float r1_minus_r2[6]; float r3_add_r4[6]; float r3_minus_r4_x2[6]; for (int j = 0; j < 6; j++) { r1_add_r2[j] = mid[6 * 1 + j] + mid[6 * 2 + j]; r1_minus_r2[j] = mid[6 * 1 + j] - mid[6 * 2 + j]; r3_add_r4[j] = mid[6 * 3 + j] + mid[6 * 4 + j]; r3_minus_r4_x2[j] = (mid[6 * 3 + j] - mid[6 * 4 + j]) * 2; } for (int j = 0; j < 6; j++) { tmp[j] = mid[j] + r1_add_r2[j] + r3_add_r4[j]; tmp[6 + j] = r1_minus_r2[j] + r3_minus_r4_x2[j]; tmp[12 + j] = r1_add_r2[j] + 4 * r3_add_r4[j]; tmp[18 + j] = r1_minus_r2[j] + 4 * r3_minus_r4_x2[j] + mid[6 * 5 + j]; } float* out0 = out; float* out1 = out0 + outw; float* out2 = out1 + outw; float* out3 = out2 + outw; float _r1_add_r2[4]; float _r1_minus_r2[4]; float _r3_add_r4[4]; float _r3_minus_r4_x2[4]; int idx; for (int j = 0; j < 4; j++) { idx = 6 * j; _r1_add_r2[j] = tmp[idx + 1] + tmp[idx + 2]; _r1_minus_r2[j] = tmp[idx + 1] - tmp[idx + 2]; _r3_add_r4[j] = tmp[idx + 3] + tmp[idx + 4]; _r3_minus_r4_x2[j] = (tmp[idx + 3] - tmp[idx + 4]) * 2; } if (bias_ptr) { float bias = bias_ptr[0]; out0[0] = do_activation(tmp[0 * 6] + _r1_add_r2[0] + _r3_add_r4[0] + bias, activation); out1[0] = do_activation(tmp[1 * 6] + _r1_add_r2[1] + _r3_add_r4[1] + bias, activation); out2[0] = do_activation(tmp[2 * 6] + _r1_add_r2[2] + _r3_add_r4[2] + bias, activation); out3[0] = do_activation(tmp[3 * 6] + _r1_add_r2[3] + _r3_add_r4[3] + bias, activation); out0[1] = do_activation(_r1_minus_r2[0] + _r3_minus_r4_x2[0] + bias, activation); out1[1] = do_activation(_r1_minus_r2[1] + _r3_minus_r4_x2[1] + bias, activation); out2[1] = do_activation(_r1_minus_r2[2] + _r3_minus_r4_x2[2] + bias, activation); out3[1] = do_activation(_r1_minus_r2[3] + _r3_minus_r4_x2[3] + bias, activation); out0[2] = do_activation(_r1_add_r2[0] + 4 * _r3_add_r4[0] + bias, activation); out1[2] = do_activation(_r1_add_r2[1] + 4 * _r3_add_r4[1] + bias, activation); out2[2] = do_activation(_r1_add_r2[2] + 4 * _r3_add_r4[2] + bias, activation); out3[2] = do_activation(_r1_add_r2[3] + 4 * _r3_add_r4[3] + bias, activation); out0[3] = do_activation(_r1_minus_r2[0] + 4 * _r3_minus_r4_x2[0] + tmp[0 * 6 + 5] + bias, activation); out1[3] = do_activation(_r1_minus_r2[1] + 4 * _r3_minus_r4_x2[1] + tmp[1 * 6 + 5] + bias, activation); out2[3] = do_activation(_r1_minus_r2[2] + 4 * _r3_minus_r4_x2[2] + tmp[2 * 6 + 5] + bias, activation); out3[3] = do_activation(_r1_minus_r2[3] + 4 * _r3_minus_r4_x2[3] + tmp[3 * 6 + 5] + bias, activation); } else { out0[0] = do_activation(tmp[0 * 6] + _r1_add_r2[0] + _r3_add_r4[0], activation); out1[0] = do_activation(tmp[1 * 6] + _r1_add_r2[1] + _r3_add_r4[1], activation); out2[0] = do_activation(tmp[2 * 6] + _r1_add_r2[2] + _r3_add_r4[2], activation); out3[0] = do_activation(tmp[3 * 6] + _r1_add_r2[3] + _r3_add_r4[3], activation); out0[1] = do_activation(_r1_minus_r2[0] + _r3_minus_r4_x2[0], activation); out1[1] = do_activation(_r1_minus_r2[1] + _r3_minus_r4_x2[1], activation); out2[1] = do_activation(_r1_minus_r2[2] + _r3_minus_r4_x2[2], activation); out3[1] = do_activation(_r1_minus_r2[3] + _r3_minus_r4_x2[3], activation); out0[2] = do_activation(_r1_add_r2[0] + 4 * _r3_add_r4[0], activation); out1[2] = do_activation(_r1_add_r2[1] + 4 * _r3_add_r4[1], activation); out2[2] = do_activation(_r1_add_r2[2] + 4 * _r3_add_r4[2], activation); out3[2] = do_activation(_r1_add_r2[3] + 4 * _r3_add_r4[3], activation); out0[3] = do_activation(_r1_minus_r2[0] + 4 * _r3_minus_r4_x2[0] + tmp[0 * 6 + 5], activation); out1[3] = do_activation(_r1_minus_r2[1] + 4 * _r3_minus_r4_x2[1] + tmp[1 * 6 + 5], activation); out2[3] = do_activation(_r1_minus_r2[2] + 4 * _r3_minus_r4_x2[2] + tmp[2 * 6 + 5], activation); out3[3] = do_activation(_r1_minus_r2[3] + 4 * _r3_minus_r4_x2[3] + tmp[3 * 6 + 5], activation); } } static inline void trans_output_f43_ordinary(const float* mid, float* out, const float* bias_ptr) { /* float AT[24]={ 1., 1., 1., 1., 1., 0., 0., 1., -1., 2., -2., 0., 0., 1., 1., 4., 4., 0., 0., 1., -1., 8., -8., 1. }; float A[24]={ 1., 0., 0., 0., 1., 1., 1., 1., 1., -1., 1., -1., 1., 2., 4., 8., 1., -2., 4., -8., 0., 0., 0., 1. }; */ float tmp[24] = {0}; float r1_add_r2[6]; float r1_minus_r2[6]; float r3_add_r4[6]; float r3_minus_r4_x2[6]; for (int j = 0; j < 6; j++) { r1_add_r2[j] = mid[6 * 1 + j] + mid[6 * 2 + j]; r1_minus_r2[j] = mid[6 * 1 + j] - mid[6 * 2 + j]; r3_add_r4[j] = mid[6 * 3 + j] + mid[6 * 4 + j]; r3_minus_r4_x2[j] = (mid[6 * 3 + j] - mid[6 * 4 + j]) * 2; } for (int j = 0; j < 6; j++) { tmp[j] = mid[j] + r1_add_r2[j] + r3_add_r4[j]; tmp[6 + j] = r1_minus_r2[j] + r3_minus_r4_x2[j]; tmp[12 + j] = r1_add_r2[j] + 4 * r3_add_r4[j]; tmp[18 + j] = r1_minus_r2[j] + 4 * r3_minus_r4_x2[j] + mid[6 * 5 + j]; } float _r1_add_r2[4]; float _r1_minus_r2[4]; float _r3_add_r4[4]; float _r3_minus_r4_x2[4]; int idx; for (int j = 0; j < 4; j++) { idx = 6 * j; _r1_add_r2[j] = tmp[idx + 1] + tmp[idx + 2]; _r1_minus_r2[j] = tmp[idx + 1] - tmp[idx + 2]; _r3_add_r4[j] = tmp[idx + 3] + tmp[idx + 4]; _r3_minus_r4_x2[j] = (tmp[idx + 3] - tmp[idx + 4]) * 2; } if (bias_ptr) { float bias = bias_ptr[0]; for (int j = 0; j < 4; j++) { idx = j * 4; out[idx] = bias + tmp[j * 6] + _r1_add_r2[j] + _r3_add_r4[j]; out[idx + 1] = bias + _r1_minus_r2[j] + _r3_minus_r4_x2[j]; out[idx + 2] = bias + _r1_add_r2[j] + 4 * _r3_add_r4[j]; out[idx + 3] = bias + _r1_minus_r2[j] + 4 * _r3_minus_r4_x2[j] + tmp[j * 6 + 5]; } } else { for (int j = 0; j < 4; j++) { idx = j * 4; out[idx] = tmp[j * 6] + _r1_add_r2[j] + _r3_add_r4[j]; out[idx + 1] = _r1_minus_r2[j] + _r3_minus_r4_x2[j]; out[idx + 2] = _r1_add_r2[j] + 4 * _r3_add_r4[j]; out[idx + 3] = _r1_minus_r2[j] + 4 * _r3_minus_r4_x2[j] + tmp[j * 6 + 5]; } } } static inline void transform_output_f43_1tile(const float* buffer_ptr, float* out, int p_idx, int idx_blockhw, int block_h, int block_w, int out_hw, int outw, int resi_h, int resi_w, int KER_COUT_UNIT_, const float* bias, int activation) { float tmp_buffer[TILE * TILE]; const float* bias_ptr = NULL; for (int p = 0; p < KER_COUT_UNIT_; p++) { int cout_idx = p_idx + p; if (bias) { bias_ptr = (bias + cout_idx); } float* out_ptr = out + cout_idx * out_hw; int i_h = idx_blockhw / block_w; int j_w = idx_blockhw % block_w; if ((resi_h == 0 && resi_w == 0) || (resi_h == 0 && (j_w < block_w - 1)) || (resi_w == 0 && (i_h < block_h - 1)) || ((j_w < block_w - 1) && (i_h < block_h - 1))) { trans_output_f43(buffer_ptr, out_ptr + (i_h * TILE * outw + j_w * TILE), outw, bias_ptr, activation); } else { int ret_h = TILE - resi_h; if (i_h < block_h - 1) ret_h = TILE; int ret_w = TILE - resi_w; if (j_w < block_w - 1) ret_w = TILE; // tmp_buffer trans_output_f43_ordinary(buffer_ptr, tmp_buffer, bias_ptr); float* out_pointer = out_ptr + (i_h * TILE * outw + j_w * TILE); for (int hh = 0; hh < ret_h; hh++) { for (int ww = 0; ww < ret_w; ww++) { out_pointer[hh * outw + ww] = do_activation(tmp_buffer[hh * TILE + ww], activation); } } } buffer_ptr += ELEM_SIZE; } } static inline void transform_output_f43_4tile(float* buffer_ptr, float* out, int p_idx, int block_idx, int block_h, int block_w, int outh, int outw, int resi_h, int resi_w, int KER_COUT_UNIT_, const float* bias, int activation) { int out_hw = outh * outw; float tmp_buffer[TILE * TILE]; int idx_h[4]; int idx_w[4]; idx_h[0] = (block_idx) / block_w; idx_h[1] = (block_idx + 1) / block_w; idx_h[2] = (block_idx + 2) / block_w; idx_h[3] = (block_idx + 3) / block_w; idx_w[0] = (block_idx) % block_w; idx_w[1] = (block_idx + 1) % block_w; idx_w[2] = (block_idx + 2) % block_w; idx_w[3] = (block_idx + 3) % block_w; float* bias_ptr = NULL; for (int p = 0; p < KER_COUT_UNIT_; p++) { int cout_idx = p_idx + p; float* out_ptr = out + cout_idx * out_hw; if (bias) { bias_ptr = ( float* )bias + cout_idx; } for (int ii = 0; ii < 4; ii++) { int i_h = idx_h[ii]; int j_w = idx_w[ii]; if ((resi_h == 0 && resi_w == 0) || (resi_h == 0 && (j_w < block_w - 1)) || (resi_w == 0 && (i_h < block_h - 1)) || ((j_w < block_w - 1) && (i_h < block_h - 1))) { trans_output_f43(buffer_ptr, out_ptr + (i_h * TILE * outw + j_w * TILE), outw, bias_ptr, activation); } // direct use_out_ptr else { int ret_h = TILE - resi_h; if (i_h < block_h - 1) ret_h = TILE; int ret_w = TILE - resi_w; if (j_w < block_w - 1) ret_w = TILE; // tmp_buffer trans_output_f43_ordinary(buffer_ptr, tmp_buffer, bias_ptr); float* out_pointer = out_ptr + (i_h * TILE * outw + j_w * TILE); for (int hh = 0; hh < ret_h; hh++) { for (int ww = 0; ww < ret_w; ww++) { out_pointer[hh * outw + ww] = do_activation(tmp_buffer[hh * 4 + ww], activation); } } } // end else, tmp_buff buffer_ptr += ELEM_SIZE; } } } // trans_input [block_hw/4][ELEM_SIZE][inc][4] // kernel [out_c/PER_OUT_CHAN][ELEM_SIZE][in_c][PER_OUT_CHAN] static void wino_sgemm_set(const float* ker, const float* inp, float* output, const float* bias, int cin, int cout_end, int block_h, int block_w, int out_h, int out_w, int resi_h, int resi_w, int activation, int num_thread, int cpu_affinity) { int flag_outw = 1; if (out_w < 16) flag_outw = 0; #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < (cout_end & -PER_OUT_CHAN); p += PER_OUT_CHAN) { int out_hw = out_w * out_h; int block_hw = block_h * block_w; const float* ker_ptr = ker + p * ELEM_SIZE * cin; int i = 0; for (; i < (block_hw & -4); i += 4) { const float* inp_ptr = inp + i * ELEM_SIZE * cin; float out_buffer[PER_OUT_CHAN * 4 * ELEM_SIZE]; #ifdef __aarch64__ int idx_h[4]; int idx_w[4]; idx_h[0] = (i) / block_w; idx_h[1] = (i + 1) / block_w; idx_h[2] = (i + 2) / block_w; idx_h[3] = (i + 3) / block_w; idx_w[0] = (i) % block_w; idx_w[1] = (i + 1) % block_w; idx_w[2] = (i + 2) % block_w; idx_w[3] = (i + 3) % block_w; int wino_out_4_tiles = 0; int mulitplier = PER_OUT_CHAN; if (flag_outw) { if ((idx_h[0] == idx_h[3]) && (idx_h[0] < (block_h - 1)) && (idx_w[3] < (block_w - 1))) { wino_out_4_tiles = 1; mulitplier = 1; } } for (int s = 0; s < ELEM_SIZE; s++) { wino_sgemm_4x16_A72(out_buffer + s * 4 * mulitplier, inp_ptr + s * 4 * cin, ker_ptr + s * PER_OUT_CHAN * cin, cin, wino_out_4_tiles); } if (wino_out_4_tiles == 1) { float* bias_ptr = NULL; for (int pss = 0; pss < PER_OUT_CHAN; pss++) { int cout_idx = p + pss; float* out_ptr = output + cout_idx * out_hw + idx_h[0] * TILE * out_w + idx_w[0] * TILE; if (bias) { bias_ptr = ( float* )(bias + cout_idx); } float ker00[4] = {2, 4, 8, 0}; tran_out_4(out_buffer + pss * ELEM_SIZE * 4, out_ptr, out_w * sizeof(float), ker00, bias_ptr, activation); } } else { float buffer[PER_OUT_CHAN * 4 * ELEM_SIZE]; float* buffer_ptr0 = buffer; for (int pp = 0; pp < PER_OUT_CHAN; pp++) { for (int t = 0; t < 4; t++) { for (int ss = 0; ss < ELEM_SIZE; ss++) { *buffer_ptr0 = out_buffer[ss * 4 * PER_OUT_CHAN + pp * 4 + t]; buffer_ptr0++; } } } // end interleave { float tmp_buffer[TILE * TILE]; const float* bias_ptr = NULL; for (int pss = 0; pss < PER_OUT_CHAN; pss++) { int cout_idx = p + pss; float* out_ptr = output + cout_idx * out_hw; if (bias) { bias_ptr = bias + cout_idx; } for (int ii = 0; ii < 4; ii++) { int i_h = idx_h[ii]; int j_w = idx_w[ii]; if ((resi_h == 0 && resi_w == 0) || (resi_h == 0 && (j_w < block_w - 1)) || (resi_w == 0 && (i_h < block_h - 1)) || ((j_w < block_w - 1) && (i_h < block_h - 1))) { trans_output_f43(buffer + ii * ELEM_SIZE + pss * 36 * 4, out_ptr + (i_h * TILE * out_w + j_w * TILE), out_w, (const float*)bias_ptr, activation); } // direct use_out_ptr else { int ret_h = TILE - resi_h; if (i_h < block_h - 1) ret_h = TILE; int ret_w = TILE - resi_w; if (j_w < block_w - 1) ret_w = TILE; // tmp_buffer trans_output_f43_ordinary(buffer + ii * ELEM_SIZE + pss * 36 * 4, tmp_buffer, (const float*)bias_ptr); float* out_pointer = out_ptr + (i_h * TILE * out_w + j_w * TILE); for (int hh = 0; hh < ret_h; hh++) { for (int ww = 0; ww < ret_w; ww++) { out_pointer[hh * out_w + ww] = do_activation(tmp_buffer[hh * 4 + ww], activation); } } } // end else, tmp_buff } } } // end transform } #else for (int s = 0; s < ELEM_SIZE; s++) { wino_sgemm_4x12_A17(out_buffer + s * 4 * PER_OUT_CHAN, inp_ptr + s * 4 * cin, ker_ptr + s * PER_OUT_CHAN * cin, cin); } float buffer[PER_OUT_CHAN * 4 * ELEM_SIZE]; float* buffer_ptr0 = buffer; for (int pp = 0; pp < PER_OUT_CHAN; pp++) { for (int t = 0; t < 4; t++) { for (int ss = 0; ss < ELEM_SIZE; ss++) { *buffer_ptr0 = out_buffer[ss * 4 * PER_OUT_CHAN + pp * 4 + t]; buffer_ptr0++; } } } transform_output_f43_4tile(buffer, output, p, i, block_h, block_w, out_h, out_w, resi_h, resi_w, PER_OUT_CHAN, bias, activation); #endif } for (; i < block_hw; i++) { const float* inp_ptr = inp + i * ELEM_SIZE * cin; float out_buffer[PER_OUT_CHAN * ELEM_SIZE]; for (int s = 0; s < ELEM_SIZE; s++) { #ifdef __aarch64__ wino_sgemm_1x16(out_buffer + s * PER_OUT_CHAN, inp_ptr + s * cin, ker_ptr + s * PER_OUT_CHAN * cin, cin); #else wino_sgemm_1x12_A17(out_buffer + s * PER_OUT_CHAN, inp_ptr + s * cin, ker_ptr + s * PER_OUT_CHAN * cin, cin); #endif } // interleave float buffer[PER_OUT_CHAN * ELEM_SIZE]; float* buffer_ptr0 = buffer; for (int pp = 0; pp < PER_OUT_CHAN; pp++) { for (int ss = 0; ss < ELEM_SIZE; ss++) { *buffer_ptr0 = out_buffer[ss * PER_OUT_CHAN + pp]; buffer_ptr0++; } } // end interleave transform_output_f43_1tile(( const float* )buffer, output, p, i, block_h, block_w, out_hw, out_w, resi_h, resi_w, PER_OUT_CHAN, bias, activation); // end transform } } } void wino_sgemm_4x4(const float* ker, const float* inp, float* output, const float* bias, int cin, int cout_start, int cout_end, int block_h, int block_w, int out_h, int out_w, int resi_h, int resi_w, int activation, int num_thread, int cpu_affinity) { int block_hw = block_h * block_w; int out_hw = out_w * out_h; int p, i; int flag_outw = 1; if (out_w < 16) flag_outw = 0; const float* ker_ptr; const float* inp_ptr; for (p = (cout_start & -4); p < (cout_end & -4); p += 4) { ker_ptr = ker + p * ELEM_SIZE * cin; for (i = 0; i < (block_hw & -4); i += 4) { inp_ptr = inp + i * ELEM_SIZE * cin; float out_buffer[4 * 4 * ELEM_SIZE]; #ifdef __aarch64__ int idx_h[4]; int idx_w[4]; idx_h[0] = (i) / block_w; idx_h[1] = (i + 1) / block_w; idx_h[2] = (i + 2) / block_w; idx_h[3] = (i + 3) / block_w; idx_w[0] = (i) % block_w; idx_w[1] = (i + 1) % block_w; idx_w[2] = (i + 2) % block_w; idx_w[3] = (i + 3) % block_w; int wino_out_4_tiles = 0; int mulitplier = 4; if (flag_outw) if ((idx_h[0] == idx_h[3]) && (idx_h[0] < (block_h - 1)) && (idx_w[3] < (block_w - 1))) { wino_out_4_tiles = 1; mulitplier = 1; } for (int s = 0; s < ELEM_SIZE; s++) { { wino_sgemm_4x4_A72(out_buffer + s * 4 * mulitplier, inp_ptr + s * 4 * cin, ker_ptr + s * 4 * cin, cin, wino_out_4_tiles); } } if (wino_out_4_tiles == 1) { float* bias_ptr = NULL; for (int pss = 0; pss < 4; pss++) { int cout_idx = p + pss; float* out_ptr = output + cout_idx * out_hw + idx_h[0] * TILE * out_w + idx_w[0] * TILE; if (bias) { bias_ptr = ( float* )(bias + cout_idx); } float ker00[4] = {2, 4, 8, 0}; tran_out_4(out_buffer + pss * ELEM_SIZE * 4, out_ptr, out_w * sizeof(float), ker00, bias_ptr, activation); } } else { float buffer[4 * 4 * ELEM_SIZE]; float* buffer_ptr0 = buffer; for (int pp = 0; pp < 4; pp++) { for (int t = 0; t < 4; t++) { for (int ss = 0; ss < ELEM_SIZE; ss++) { *buffer_ptr0 = out_buffer[ss * 4 * 4 + pp * 4 + t]; buffer_ptr0++; } } } // end interleave // transform_output_f43_4tile((const float*)buffer, output, p, i, block_h, block_w, out_hw, out_w, // resi_h, resi_w, // KER_COUT_UNIT, bias, bias_term); { float tmp_buffer[TILE * TILE]; const float* bias_ptr = NULL; for (int pss = 0; pss < 4; pss++) { int cout_idx = p + pss; float* out_ptr = output + cout_idx * out_hw; if (bias) { bias_ptr = bias + cout_idx; } for (int ii = 0; ii < 4; ii++) { int i_h = idx_h[ii]; int j_w = idx_w[ii]; if ((resi_h == 0 && resi_w == 0) || (resi_h == 0 && (j_w < block_w - 1)) || (resi_w == 0 && (i_h < block_h - 1)) || ((j_w < block_w - 1) && (i_h < block_h - 1))) { trans_output_f43(buffer + ii * ELEM_SIZE + pss * 36 * 4, out_ptr + (i_h * TILE * out_w + j_w * TILE), out_w, ( const float* )bias_ptr, activation); } // direct use_out_ptr else { int ret_h = TILE - resi_h; if (i_h < block_h - 1) ret_h = TILE; int ret_w = TILE - resi_w; if (j_w < block_w - 1) ret_w = TILE; // tmp_buffer trans_output_f43_ordinary(buffer + ii * ELEM_SIZE + pss * 36 * 4, tmp_buffer, ( const float* )bias_ptr); float* out_pointer = out_ptr + (i_h * TILE * out_w + j_w * TILE); for (int hh = 0; hh < ret_h; hh++) { for (int ww = 0; ww < ret_w; ww++) { out_pointer[hh * out_w + ww] = do_activation(tmp_buffer[hh * 4 + ww], activation); } } } // end else, tmp_buff } } } // end transform } #else for (int s = 0; s < ELEM_SIZE; s++) { wino_sgemm_4x4_A17(out_buffer + s * 4 * 4, inp_ptr + s * 4 * cin, ker_ptr + s * 4 * cin, cin); } // interleave float buffer[4 * 4 * ELEM_SIZE]; float* buffer_ptr0 = buffer; for (int pp = 0; pp < 4; pp++) { for (int t = 0; t < 4; t++) { for (int ss = 0; ss < ELEM_SIZE; ss++) { *buffer_ptr0 = out_buffer[ss * 4 * 4 + pp * 4 + t]; buffer_ptr0++; } } } // end interleave transform_output_f43_4tile(buffer, output, p, i, block_h, block_w, out_h, out_w, resi_h, resi_w, 4, bias, activation); #endif } for (; i < block_hw; i++) { inp_ptr = inp + i * ELEM_SIZE * cin; float out_buffer[4 * ELEM_SIZE]; for (int s = 0; s < ELEM_SIZE; s++) { #ifdef __aarch64__ wino_sgemm_1x4(out_buffer + s * 4, inp_ptr + s * cin, ker_ptr + s * 4 * cin, cin); #else wino_sgemm_1x4_cpu(out_buffer + s * 4, inp_ptr + s * cin, ker_ptr + s * 4 * cin, cin); #endif } // interleave float buffer[4 * ELEM_SIZE]; float* buffer_ptr0 = buffer; for (int pp = 0; pp < 4; pp++) { for (int ss = 0; ss < ELEM_SIZE; ss++) { *buffer_ptr0 = out_buffer[ss * 4 + pp]; buffer_ptr0++; } } // end interleave transform_output_f43_1tile(( const float* )buffer, output, p, i, block_h, block_w, out_hw, out_w, resi_h, resi_w, 4, bias, activation); // end transform } } for (p = (cout_end & -4); p < cout_end; p++) { ker_ptr = ker + p * ELEM_SIZE * cin; for (i = 0; i < (block_hw & -4); i += 4) { inp_ptr = inp + i * ELEM_SIZE * cin; float buffer[4 * ELEM_SIZE]; int idx_h[4]; int idx_w[4]; idx_h[0] = (i) / block_w; idx_h[1] = (i + 1) / block_w; idx_h[2] = (i + 2) / block_w; idx_h[3] = (i + 3) / block_w; idx_w[0] = (i) % block_w; idx_w[1] = (i + 1) % block_w; idx_w[2] = (i + 2) % block_w; idx_w[3] = (i + 3) % block_w; // gemm+interleave buffer[4][36] for (int s = 0; s < ELEM_SIZE; s++) { float* inp_ = ( float* )(inp_ptr + s * 4 * cin); float* ker_ = ( float* )(ker_ptr + s * cin); float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; for (int k = 0; k < cin; k++) { sum0 += inp_[k * 4] * ker_[k]; sum1 += inp_[k * 4 + 1] * ker_[k]; sum2 += inp_[k * 4 + 2] * ker_[k]; sum3 += inp_[k * 4 + 3] * ker_[k]; } buffer[s] = sum0; buffer[36 + s] = sum1; buffer[72 + s] = sum2; buffer[108 + s] = sum3; } // trans_out buffer[4][36] float tmp_buffer[TILE * TILE]; const float* bias_ptr = NULL; float* out_ptr = output + p * out_hw; if (bias) { bias_ptr = bias + p; } for (int ii = 0; ii < 4; ii++) { int i_h = idx_h[ii]; int j_w = idx_w[ii]; if ((resi_h == 0 && resi_w == 0) || (resi_h == 0 && (j_w < block_w - 1)) || (resi_w == 0 && (i_h < block_h - 1)) || ((j_w < block_w - 1) && (i_h < block_h - 1))) { trans_output_f43(buffer + ii * ELEM_SIZE, out_ptr + (i_h * TILE * out_w + j_w * TILE), out_w, ( const float* )bias_ptr, activation); } // direct use_out_ptr else { int ret_h = TILE - resi_h; if (i_h < block_h - 1) ret_h = TILE; int ret_w = TILE - resi_w; if (j_w < block_w - 1) ret_w = TILE; // tmp_buffer trans_output_f43_ordinary(buffer + ii * ELEM_SIZE, tmp_buffer, ( const float* )bias_ptr); float* out_pointer = out_ptr + (i_h * TILE * out_w + j_w * TILE); for (int hh = 0; hh < ret_h; hh++) { for (int ww = 0; ww < ret_w; ww++) { out_pointer[hh * out_w + ww] = do_activation(tmp_buffer[hh * 4 + ww], activation); } } } // end else, tmp_buff } // end transform } for (; i < block_hw; i++) { inp_ptr = inp + i * ELEM_SIZE * cin; float buffer[ELEM_SIZE]; for (int s = 0; s < ELEM_SIZE; s++) { float* inp_ = ( float* )(inp_ptr + s * cin); float* ker_ = ( float* )(ker_ptr + s * cin); float sum = 0; for (int k = 0; k < cin; k++) { sum += inp_[k] * ker_[k]; } buffer[s] = sum; } // end interleave transform_output_f43_1tile(( const float* )buffer, output, p, i, block_h, block_w, out_hw, out_w, resi_h, resi_w, 1, bias, activation); // end transform } } } static int get_private_mem_size(struct ir_tensor* filter, struct conv_param* param) { int output_c = filter->dims[0]; int input_c = filter->dims[1]; int trans_ker_size = output_c * input_c * ELEM_SIZE * sizeof(float); return trans_ker_size + 128; // caution } int wino_conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { int output_c = filter_tensor->dims[0]; int input_c = filter_tensor->dims[1]; int mem_size = get_private_mem_size(filter_tensor, param); float* trans_mem = ( float* )sys_malloc(mem_size); if (!priv_info->external_interleave_mem) { void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } transform_kernel_f43_tile(filter_tensor, trans_mem); interleave_kernel(trans_mem, ( float* )priv_info->interleave_buffer, output_c, input_c); sys_free(trans_mem); return 0; } int wino_conv_hcl_postrun(struct conv_priv_info* priv_info) { if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } return 0; } int wino_conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { /* param */ int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int pad_h0 = param->pad_h0; int pad_w0 = param->pad_w0; int act_type = param->activation; int batch = input_tensor->dims[0]; int in_c = input_tensor->dims[1]; int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; int input_size = in_c * in_h * in_w; int kernel_size = in_c * kernel_h * kernel_w; int out_c = output_tensor->dims[1]; int out_h = output_tensor->dims[2]; int out_w = output_tensor->dims[3]; int out_hw = out_h * out_w; int output_size = out_c * out_h * out_w; int out_c_align = ((out_c + 3) & -4); /* wino param */ int block_h = (out_h + TILE - 1) / TILE; int block_w = (out_w + TILE - 1) / TILE; int block_hw = block_h * block_w; int padded_in_h = block_h * TILE + 2; int padded_in_w = block_w * TILE + 2; int padded_in_hw = padded_in_h * padded_in_w; /* buffer addr */ float* input_buf = ( float* )input_tensor->data; float* output_buf = ( float* )output_tensor->data; float* biases_buf = NULL; if (bias_tensor != NULL) biases_buf = ( float* )bias_tensor->data; float* col_buf = ( float* )priv_info->im2col_buffer; float* interleave_buf = ( float* )priv_info->interleave_buffer; float* input_padd_buf = ( float* )sys_malloc(sizeof(float) * padded_in_hw * in_c + 128); float* trans_input_buf = ( float* )sys_malloc(sizeof(float) * block_hw * in_c * ELEM_SIZE + 128); int nn_out_c = out_c / PER_OUT_CHAN * PER_OUT_CHAN; int nn_block = block_hw >> 2; int resi_block = nn_block << 2; int resi_h = block_h * TILE - out_h; int resi_w = block_w * TILE - out_w; for (int n = 0; n < batch; n++) { float* input = input_buf + n * input_size; float* output = output_buf + n * output_size; /* PAD input */ pad_input1(input, input_padd_buf, in_c, in_h, in_w, padded_in_h, padded_in_w, pad_h0, pad_w0); /* trans input */ tran_input_4block(input_padd_buf, trans_input_buf, in_c, block_h, block_w, padded_in_h, padded_in_w); if (resi_block != block_hw) { tran_input_resi_block(input_padd_buf, trans_input_buf, in_c, nn_block, resi_block, block_hw, block_w, padded_in_hw, padded_in_w); } /* sdot */ wino_sgemm_set(interleave_buf, trans_input_buf, output, biases_buf, in_c, nn_out_c, block_h, block_w, out_h, out_w, resi_h, resi_w, act_type, num_thread, cpu_affinity); if (nn_out_c != out_c) { wino_sgemm_4x4(interleave_buf, trans_input_buf, output, biases_buf, in_c, nn_out_c, out_c, block_h, block_w, out_h, out_w, resi_h, resi_w, act_type, num_thread, cpu_affinity); } } sys_free(input_padd_buf); sys_free(trans_input_buf); return 0; }
aux_interp.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ #include "_hypre_parcsr_ls.h" #include "aux_interp.h" #include "hypre_hopscotch_hash.h" /*--------------------------------------------------------------------------- * Auxilary routines for the long range interpolation methods. * Implemented: "standard", "extended", "multipass", "FF" *--------------------------------------------------------------------------*/ /* AHB 11/06: Modification of the above original - takes two communication packages and inserts nodes to position expected for OUT_marker offd nodes from comm_pkg take up first chunk of CF_marker_offd, offd nodes from extend_comm_pkg take up the second chunk 0f CF_marker_offd. */ HYPRE_Int hypre_alt_insert_new_nodes(hypre_ParCSRCommPkg *comm_pkg, hypre_ParCSRCommPkg *extend_comm_pkg, HYPRE_Int *IN_marker, HYPRE_Int full_off_procNodes, HYPRE_Int *OUT_marker) { hypre_ParCSRCommHandle *comm_handle; HYPRE_Int i, index, shift; HYPRE_Int num_sends, num_recvs; HYPRE_Int *recv_vec_starts; HYPRE_Int e_num_sends; HYPRE_Int *int_buf_data; HYPRE_Int *e_out_marker; num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); e_num_sends = hypre_ParCSRCommPkgNumSends(extend_comm_pkg); index = hypre_max(hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, e_num_sends)); int_buf_data = hypre_CTAlloc(HYPRE_Int, index); /* orig commpkg data*/ index = 0; HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = begin; i < end; ++i) { int_buf_data[i - begin] = IN_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, OUT_marker); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; /* now do the extend commpkg */ /* first we need to shift our position in the OUT_marker */ shift = recv_vec_starts[num_recvs]; e_out_marker = OUT_marker + shift; index = 0; begin = hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, 0); end = hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, e_num_sends); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = begin; i < end; ++i) { int_buf_data[i - begin] = IN_marker[hypre_ParCSRCommPkgSendMapElmt(extend_comm_pkg, i)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, extend_comm_pkg, int_buf_data, e_out_marker); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; hypre_TFree(int_buf_data); return hypre_error_flag; } /* AHB 11/06 : alternate to the extend function below - creates a * second comm pkg based on found - this makes it easier to use the * global partition*/ HYPRE_Int hypre_ParCSRFindExtendCommPkg(hypre_ParCSRMatrix *A, HYPRE_Int newoff, HYPRE_Int *found, hypre_ParCSRCommPkg **extend_comm_pkg) { HYPRE_Int num_sends; HYPRE_Int *send_procs; HYPRE_Int *send_map_starts; HYPRE_Int *send_map_elmts; HYPRE_Int num_recvs; HYPRE_Int *recv_procs; HYPRE_Int *recv_vec_starts; hypre_ParCSRCommPkg *new_comm_pkg; MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int first_col_diag = hypre_ParCSRMatrixFirstColDiag(A); /* use found instead of col_map_offd in A, and newoff instead of num_cols_offd*/ #ifdef HYPRE_NO_GLOBAL_PARTITION HYPRE_Int row_start=0, row_end=0, col_start = 0, col_end = 0; HYPRE_Int global_num_cols; hypre_IJAssumedPart *apart; hypre_ParCSRMatrixGetLocalRange( A, &row_start, &row_end , &col_start, &col_end ); global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A); /* Create the assumed partition */ if (hypre_ParCSRMatrixAssumedPartition(A) == NULL) { hypre_ParCSRMatrixCreateAssumedPartition(A); } apart = hypre_ParCSRMatrixAssumedPartition(A); hypre_NewCommPkgCreate_core( comm, found, first_col_diag, col_start, col_end, newoff, global_num_cols, &num_recvs, &recv_procs, &recv_vec_starts, &num_sends, &send_procs, &send_map_starts, &send_map_elmts, apart); #else HYPRE_Int *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int num_cols_diag = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A)); hypre_MatvecCommPkgCreate_core ( comm, found, first_col_diag, col_starts, num_cols_diag, newoff, first_col_diag, found, 1, &num_recvs, &recv_procs, &recv_vec_starts, &num_sends, &send_procs, &send_map_starts, &send_map_elmts ); #endif new_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1); hypre_ParCSRCommPkgComm(new_comm_pkg) = comm; hypre_ParCSRCommPkgNumRecvs(new_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvProcs(new_comm_pkg) = recv_procs; hypre_ParCSRCommPkgRecvVecStarts(new_comm_pkg) = recv_vec_starts; hypre_ParCSRCommPkgNumSends(new_comm_pkg) = num_sends; hypre_ParCSRCommPkgSendProcs(new_comm_pkg) = send_procs; hypre_ParCSRCommPkgSendMapStarts(new_comm_pkg) = send_map_starts; hypre_ParCSRCommPkgSendMapElmts(new_comm_pkg) = send_map_elmts; *extend_comm_pkg = new_comm_pkg; return hypre_error_flag; } /* sort for non-ordered arrays */ HYPRE_Int hypre_ssort(HYPRE_Int *data, HYPRE_Int n) { HYPRE_Int i,si; HYPRE_Int change = 0; if(n > 0) for(i = n-1; i > 0; i--){ si = hypre_index_of_minimum(data,i+1); if(i != si) { hypre_swap_int(data, i, si); change = 1; } } return change; } /* Auxilary function for hypre_ssort */ HYPRE_Int hypre_index_of_minimum(HYPRE_Int *data, HYPRE_Int n) { HYPRE_Int answer; HYPRE_Int i; answer = 0; for(i = 1; i < n; i++) if(data[answer] < data[i]) answer = i; return answer; } void hypre_swap_int(HYPRE_Int *data, HYPRE_Int a, HYPRE_Int b) { HYPRE_Int temp; temp = data[a]; data[a] = data[b]; data[b] = temp; return; } /* Initialize CF_marker_offd, CF_marker, P_marker, P_marker_offd, tmp */ void hypre_initialize_vecs(HYPRE_Int diag_n, HYPRE_Int offd_n, HYPRE_Int *diag_ftc, HYPRE_Int *offd_ftc, HYPRE_Int *diag_pm, HYPRE_Int *offd_pm, HYPRE_Int *tmp_CF) { HYPRE_Int i; /* Quicker initialization */ if(offd_n < diag_n) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for(i = 0; i < offd_n; i++) { diag_ftc[i] = -1; offd_ftc[i] = -1; tmp_CF[i] = -1; if(diag_pm != NULL) { diag_pm[i] = -1; } if(offd_pm != NULL) { offd_pm[i] = -1;} } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for(i = offd_n; i < diag_n; i++) { diag_ftc[i] = -1; if(diag_pm != NULL) { diag_pm[i] = -1; } } } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for(i = 0; i < diag_n; i++) { diag_ftc[i] = -1; offd_ftc[i] = -1; tmp_CF[i] = -1; if(diag_pm != NULL) { diag_pm[i] = -1;} if(offd_pm != NULL) { offd_pm[i] = -1;} } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for(i = diag_n; i < offd_n; i++) { offd_ftc[i] = -1; tmp_CF[i] = -1; if(offd_pm != NULL) { offd_pm[i] = -1;} } } return; } /* Find nodes that are offd and are not contained in original offd * (neighbors of neighbors) */ static HYPRE_Int hypre_new_offd_nodes(HYPRE_Int **found, HYPRE_Int num_cols_A_offd, HYPRE_Int *A_ext_i, HYPRE_Int *A_ext_j, HYPRE_Int num_cols_S_offd, HYPRE_Int *col_map_offd, HYPRE_Int col_1, HYPRE_Int col_n, HYPRE_Int *Sop_i, HYPRE_Int *Sop_j, HYPRE_Int *CF_marker_offd) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif HYPRE_Int i, i1, j, kk, k1; HYPRE_Int got_loc, loc_col; /*HYPRE_Int min;*/ HYPRE_Int newoff = 0; #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_UnorderedIntMap col_map_offd_inverse; hypre_UnorderedIntMapCreate(&col_map_offd_inverse, 2*num_cols_A_offd, 16*hypre_NumThreads()); #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = 0; i < num_cols_A_offd; i++) { hypre_UnorderedIntMapPutIfAbsent(&col_map_offd_inverse, col_map_offd[i], i); } /* Find nodes that will be added to the off diag list */ HYPRE_Int size_offP = A_ext_i[num_cols_A_offd]; hypre_UnorderedIntSet set; hypre_UnorderedIntSetCreate(&set, size_offP, 16*hypre_NumThreads()); #pragma omp parallel private(i,j,i1) { #pragma omp for HYPRE_SMP_SCHEDULE for (i = 0; i < num_cols_A_offd; i++) { if (CF_marker_offd[i] < 0) { for (j = A_ext_i[i]; j < A_ext_i[i+1]; j++) { i1 = A_ext_j[j]; if(i1 < col_1 || i1 >= col_n) { if (!hypre_UnorderedIntSetContains(&set, i1)) { HYPRE_Int k = hypre_UnorderedIntMapGet(&col_map_offd_inverse, i1); if (-1 == k) { hypre_UnorderedIntSetPut(&set, i1); } else { A_ext_j[j] = -k - 1; } } } } for (j = Sop_i[i]; j < Sop_i[i+1]; j++) { i1 = Sop_j[j]; if(i1 < col_1 || i1 >= col_n) { if (!hypre_UnorderedIntSetContains(&set, i1)) { Sop_j[j] = -hypre_UnorderedIntMapGet(&col_map_offd_inverse, i1) - 1; } } } } /* CF_marker_offd[i] < 0 */ } /* for each row */ } /* omp parallel */ hypre_UnorderedIntMapDestroy(&col_map_offd_inverse); HYPRE_Int *tmp_found = hypre_UnorderedIntSetCopyToArray(&set, &newoff); hypre_UnorderedIntSetDestroy(&set); /* Put found in monotone increasing order */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif hypre_UnorderedIntMap tmp_found_inverse; if (newoff > 0) { hypre_sort_and_create_inverse_map(tmp_found, newoff, &tmp_found, &tmp_found_inverse); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif /* Set column indices for Sop and A_ext such that offd nodes are * negatively indexed */ #pragma omp parallel for private(kk,k1,got_loc,loc_col) HYPRE_SMP_SCHEDULE for(i = 0; i < num_cols_A_offd; i++) { if (CF_marker_offd[i] < 0) { for(kk = Sop_i[i]; kk < Sop_i[i+1]; kk++) { k1 = Sop_j[kk]; if(k1 > -1 && (k1 < col_1 || k1 >= col_n)) { got_loc = hypre_UnorderedIntMapGet(&tmp_found_inverse, k1); loc_col = got_loc + num_cols_A_offd; Sop_j[kk] = -loc_col - 1; } } for (kk = A_ext_i[i]; kk < A_ext_i[i+1]; kk++) { k1 = A_ext_j[kk]; if(k1 > -1 && (k1 < col_1 || k1 >= col_n)) { got_loc = hypre_UnorderedIntMapGet(&tmp_found_inverse, k1); loc_col = got_loc + num_cols_A_offd; A_ext_j[kk] = -loc_col - 1; } } } } if (newoff) { hypre_UnorderedIntMapDestroy(&tmp_found_inverse); } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_Int size_offP; HYPRE_Int *tmp_found; HYPRE_Int min; HYPRE_Int ifound; size_offP = A_ext_i[num_cols_A_offd]+Sop_i[num_cols_A_offd]; tmp_found = hypre_CTAlloc(HYPRE_Int, size_offP); /* Find nodes that will be added to the off diag list */ for (i = 0; i < num_cols_A_offd; i++) { if (CF_marker_offd[i] < 0) { for (j = A_ext_i[i]; j < A_ext_i[i+1]; j++) { i1 = A_ext_j[j]; if(i1 < col_1 || i1 >= col_n) { ifound = hypre_BinarySearch(col_map_offd,i1,num_cols_A_offd); if(ifound == -1) { tmp_found[newoff]=i1; newoff++; } else { A_ext_j[j] = -ifound-1; } } } for (j = Sop_i[i]; j < Sop_i[i+1]; j++) { i1 = Sop_j[j]; if(i1 < col_1 || i1 >= col_n) { ifound = hypre_BinarySearch(col_map_offd,i1,num_cols_A_offd); if(ifound == -1) { tmp_found[newoff]=i1; newoff++; } else { Sop_j[j] = -ifound-1; } } } } } /* Put found in monotone increasing order */ if (newoff > 0) { hypre_qsort0(tmp_found,0,newoff-1); ifound = tmp_found[0]; min = 1; for (i=1; i < newoff; i++) { if (tmp_found[i] > ifound) { ifound = tmp_found[i]; tmp_found[min++] = ifound; } } newoff = min; } /* Set column indices for Sop and A_ext such that offd nodes are * negatively indexed */ for(i = 0; i < num_cols_A_offd; i++) { if (CF_marker_offd[i] < 0) { for(kk = Sop_i[i]; kk < Sop_i[i+1]; kk++) { k1 = Sop_j[kk]; if(k1 > -1 && (k1 < col_1 || k1 >= col_n)) { got_loc = hypre_BinarySearch(tmp_found,k1,newoff); if(got_loc > -1) loc_col = got_loc + num_cols_A_offd; Sop_j[kk] = -loc_col - 1; } } for (kk = A_ext_i[i]; kk < A_ext_i[i+1]; kk++) { k1 = A_ext_j[kk]; if(k1 > -1 && (k1 < col_1 || k1 >= col_n)) { got_loc = hypre_BinarySearch(tmp_found,k1,newoff); loc_col = got_loc + num_cols_A_offd; A_ext_j[kk] = -loc_col - 1; } } } } #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ *found = tmp_found; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif return newoff; } HYPRE_Int hypre_exchange_marker(hypre_ParCSRCommPkg *comm_pkg, HYPRE_Int *IN_marker, HYPRE_Int *OUT_marker) { HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); HYPRE_Int *int_buf_data = hypre_CTAlloc(HYPRE_Int, end); HYPRE_Int i; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = begin; i < end; ++i) { int_buf_data[i - begin] = IN_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)]; } hypre_ParCSRCommHandle *comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, OUT_marker); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data); return hypre_error_flag; } HYPRE_Int hypre_exchange_interp_data( HYPRE_Int **CF_marker_offd, HYPRE_Int **dof_func_offd, hypre_CSRMatrix **A_ext, HYPRE_Int *full_off_procNodes, hypre_CSRMatrix **Sop, hypre_ParCSRCommPkg **extend_comm_pkg, hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int skip_fine_or_same_sign) // skip_fine_or_same_sign if we want to skip fine points in S and nnz with the same sign as diagonal in A { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] -= hypre_MPI_Wtime(); #endif hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int col_n = col_1 + local_numrows; HYPRE_Int *found = NULL; /*---------------------------------------------------------------------- * Get the off processors rows for A and S, associated with columns in * A_offd and S_offd. *---------------------------------------------------------------------*/ *CF_marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd); hypre_exchange_marker(comm_pkg, CF_marker, *CF_marker_offd); hypre_ParCSRCommHandle *comm_handle_a_idx, *comm_handle_a_data; *A_ext = hypre_ParCSRMatrixExtractBExt_Overlap(A,A,1,&comm_handle_a_idx,&comm_handle_a_data,CF_marker,*CF_marker_offd,skip_fine_or_same_sign,skip_fine_or_same_sign); HYPRE_Int *A_ext_i = hypre_CSRMatrixI(*A_ext); HYPRE_Int *A_ext_j = hypre_CSRMatrixJ(*A_ext); HYPRE_Int A_ext_rows = hypre_CSRMatrixNumRows(*A_ext); hypre_ParCSRCommHandle *comm_handle_s_idx; *Sop = hypre_ParCSRMatrixExtractBExt_Overlap(S,A,0,&comm_handle_s_idx,NULL,CF_marker,*CF_marker_offd,skip_fine_or_same_sign,0); HYPRE_Int *Sop_i = hypre_CSRMatrixI(*Sop); HYPRE_Int *Sop_j = hypre_CSRMatrixJ(*Sop); HYPRE_Int Soprows = hypre_CSRMatrixNumRows(*Sop); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_s_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_s_idx); hypre_TFree(send_idx); send_idx = (HYPRE_Int *)comm_handle_a_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_a_idx); hypre_TFree(send_idx); /* Find nodes that are neighbors of neighbors, not found in offd */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] += hypre_MPI_Wtime(); #endif HYPRE_Int newoff = hypre_new_offd_nodes(&found, A_ext_rows, A_ext_i, A_ext_j, Soprows, col_map_offd, col_1, col_n, Sop_i, Sop_j, *CF_marker_offd); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] -= hypre_MPI_Wtime(); #endif if(newoff >= 0) *full_off_procNodes = newoff + num_cols_A_offd; else { return hypre_error_flag; } /* Possibly add new points and new processors to the comm_pkg, all * processors need new_comm_pkg */ /* AHB - create a new comm package just for extended info - this will work better with the assumed partition*/ hypre_ParCSRFindExtendCommPkg(A, newoff, found, extend_comm_pkg); *CF_marker_offd = hypre_TReAlloc(*CF_marker_offd, HYPRE_Int, *full_off_procNodes); hypre_exchange_marker(*extend_comm_pkg, CF_marker, *CF_marker_offd + A_ext_rows); if(num_functions > 1) { if (*full_off_procNodes > 0) *dof_func_offd = hypre_CTAlloc(HYPRE_Int, *full_off_procNodes); hypre_alt_insert_new_nodes(comm_pkg, *extend_comm_pkg, dof_func, *full_off_procNodes, *dof_func_offd); } hypre_TFree(found); HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_a_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_a_data); hypre_TFree(send_data); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } void hypre_build_interp_colmap(hypre_ParCSRMatrix *P, HYPRE_Int full_off_procNodes, HYPRE_Int *tmp_CF_marker_offd, HYPRE_Int *fine_to_coarse_offd) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif HYPRE_Int i, index; HYPRE_Int n_fine = hypre_CSRMatrixNumRows(P->diag); HYPRE_Int P_offd_size = P->offd->i[n_fine]; HYPRE_Int *P_offd_j = P->offd->j; HYPRE_Int *col_map_offd_P = NULL; HYPRE_Int *P_marker = NULL; if (full_off_procNodes) P_marker = hypre_TAlloc(HYPRE_Int, full_off_procNodes); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < full_off_procNodes; i++) P_marker[i] = 0; #ifdef HYPRE_CONCURRENT_HOPSCOTCH /* These two loops set P_marker[i] to 1 if it appears in P_offd_j and if * tmp_CF_marker_offd has i marked. num_cols_P_offd is then set to the * total number of times P_marker is set */ #pragma omp parallel for private(i,index) HYPRE_SMP_SCHEDULE for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if(tmp_CF_marker_offd[index] >= 0) { P_marker[index] = 1; } } HYPRE_Int prefix_sum_workspace[hypre_NumThreads() + 1]; HYPRE_Int num_cols_P_offd = 0; #pragma omp parallel private(i) { HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, full_off_procNodes); HYPRE_Int local_num_cols_P_offd = 0; for (i = i_begin; i < i_end; i++) { if (P_marker[i] == 1) local_num_cols_P_offd++; } hypre_prefix_sum(&local_num_cols_P_offd, &num_cols_P_offd, prefix_sum_workspace); #pragma omp master { if (num_cols_P_offd) col_map_offd_P = hypre_TAlloc(HYPRE_Int, num_cols_P_offd); } #pragma omp barrier for (i = i_begin; i < i_end; i++) { if (P_marker[i] == 1) { col_map_offd_P[local_num_cols_P_offd++] = fine_to_coarse_offd[i]; } } } hypre_UnorderedIntMap col_map_offd_P_inverse; hypre_sort_and_create_inverse_map(col_map_offd_P, num_cols_P_offd, &col_map_offd_P, &col_map_offd_P_inverse); // find old idx -> new idx map #pragma omp parallel for for (i = 0; i < full_off_procNodes; i++) P_marker[i] = hypre_UnorderedIntMapGet(&col_map_offd_P_inverse, fine_to_coarse_offd[i]); if (num_cols_P_offd) { hypre_UnorderedIntMapDestroy(&col_map_offd_P_inverse); } #pragma omp parallel for for(i = 0; i < P_offd_size; i++) P_offd_j[i] = P_marker[P_offd_j[i]]; #else /* HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_Int num_cols_P_offd = 0; HYPRE_Int j; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { if(tmp_CF_marker_offd[index] >= 0) { num_cols_P_offd++; P_marker[index] = 1; } } } if (num_cols_P_offd) col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd); index = 0; for(i = 0; i < num_cols_P_offd; i++) { while( P_marker[index] == 0) index++; col_map_offd_P[i] = index++; } for(i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(col_map_offd_P, P_offd_j[i], num_cols_P_offd); index = 0; for(i = 0; i < num_cols_P_offd; i++) { while (P_marker[index] == 0) index++; col_map_offd_P[i] = fine_to_coarse_offd[index]; index++; } /* Sort the col_map_offd_P and P_offd_j correctly */ for(i = 0; i < num_cols_P_offd; i++) P_marker[i] = col_map_offd_P[i]; /* Check if sort actually changed anything */ if(hypre_ssort(col_map_offd_P,num_cols_P_offd)) { for(i = 0; i < P_offd_size; i++) for(j = 0; j < num_cols_P_offd; j++) if(P_marker[P_offd_j[i]] == col_map_offd_P[j]) { P_offd_j[i] = j; j = num_cols_P_offd; } } #endif /* HYPRE_CONCURRENT_HOPSCOTCH */ hypre_TFree(P_marker); if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P->offd) = num_cols_P_offd; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif }
GB_binop__rdiv_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__rdiv_fc64 // A.*B function (eWiseMult): GB_AemultB__rdiv_fc64 // A*D function (colscale): GB_AxD__rdiv_fc64 // D*A function (rowscale): GB_DxB__rdiv_fc64 // C+=B function (dense accum): GB_Cdense_accumB__rdiv_fc64 // C+=b function (dense accum): GB_Cdense_accumb__rdiv_fc64 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rdiv_fc64 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rdiv_fc64 // C=scalar+B GB_bind1st__rdiv_fc64 // C=scalar+B' GB_bind1st_tran__rdiv_fc64 // C=A+scalar GB_bind2nd__rdiv_fc64 // C=A'+scalar GB_bind2nd_tran__rdiv_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = GB_FC64_div (bij, aij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ GxB_FC64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_FC64_div (y, x) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_FC64 || GxB_NO_RDIV_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__rdiv_fc64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__rdiv_fc64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__rdiv_fc64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__rdiv_fc64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__rdiv_fc64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__rdiv_fc64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__rdiv_fc64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__rdiv_fc64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__rdiv_fc64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t bij = Bx [p] ; Cx [p] = GB_FC64_div (bij, x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__rdiv_fc64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; Cx [p] = GB_FC64_div (y, aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_FC64_div (aij, x) ; \ } GrB_Info GB_bind1st_tran__rdiv_fc64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_FC64_div (y, aij) ; \ } GrB_Info GB_bind2nd_tran__rdiv_fc64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__rminus_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__rminus_int64 // A.*B function (eWiseMult): GB_AemultB__rminus_int64 // A*D function (colscale): GB_AxD__rminus_int64 // D*A function (rowscale): GB_DxB__rminus_int64 // C+=B function (dense accum): GB_Cdense_accumB__rminus_int64 // C+=b function (dense accum): GB_Cdense_accumb__rminus_int64 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rminus_int64 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rminus_int64 // C=scalar+B GB_bind1st__rminus_int64 // C=scalar+B' GB_bind1st_tran__rminus_int64 // C=A+scalar GB_bind2nd__rminus_int64 // C=A'+scalar GB_bind2nd_tran__rminus_int64 // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (y - x) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_INT64 || GxB_NO_RMINUS_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__rminus_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__rminus_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__rminus_int64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__rminus_int64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__rminus_int64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__rminus_int64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__rminus_int64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__rminus_int64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__rminus_int64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = Bx [p] ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__rminus_int64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = Ax [p] ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB_bind1st_tran__rminus_int64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB_bind2nd_tran__rminus_int64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
target_data.c
// RUN: %libomptarget-compile-generic -fopenmp-version=51 // RUN: %libomptarget-run-fail-generic 2>&1 \ // RUN: | %fcheck-generic #include <stdio.h> int main() { int i; // CHECK: addr=0x[[#%x,HOST_ADDR:]], size=[[#%u,SIZE:]] fprintf(stderr, "addr=%p, size=%ld\n", &i, sizeof i); // CHECK-NOT: Libomptarget #pragma omp target data map(alloc: i) #pragma omp target data map(present, alloc: i) ; // CHECK: i is present fprintf(stderr, "i is present\n"); // CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#HOST_ADDR]] ([[#SIZE]] bytes) // CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory #pragma omp target data map(present, alloc: i) ; // CHECK-NOT: i is present fprintf(stderr, "i is present\n"); return 0; }
mesh.c
/* Copyright (c) 2012 by Marcin Krotkiewski, University of Oslo See ../License.txt for License Agreement. */ #include "mesh.h" #include <libutils/utils.h> #include <libutils/parallel.h> dimType validate_elems(const dimType *elems, dimType n_elems, dimType n_nodes, dimType n_elem_nodes) { dimType n_ref_nodes_total = 0; uint64_t temp; safemult_u((Ulong)n_elems, (Ulong)n_elem_nodes, temp, "nel X n_elem_nodes must fit into a 64-bit type"); /* data validation */ #ifdef USE_OPENMP #pragma omp parallel #endif { Uint thrid, nthr; dimType n_ref_nodes = 0; Ulong i; Ulong blk_size; Ulong el_start, el_end; parallel_get_info(&thrid, &nthr); /* perform work distribution and bind to CPUs */ blk_size = n_elems/nthr+1; el_start = thrid*blk_size; el_end = (thrid+1)*blk_size; if(thrid==nthr-1) el_end = n_elems; for(i=el_start*n_elem_nodes; i<el_end*n_elem_nodes; i++){ if(elems[i] < ONE_BASED_INDEX){ USERERROR("Invalid ELEMS. Node index can not be less than %d", MUTILS_INVALID_PARAMETER, ONE_BASED_INDEX); } if(n_nodes && elems[i] - ONE_BASED_INDEX >= n_nodes){ USERERROR("Illegal mesh structure: ELEMS access non-existant node IDs.", MUTILS_INVALID_MESH); } n_ref_nodes = MAX(n_ref_nodes, elems[i]+1-ONE_BASED_INDEX); } /* reduction : max */ for(i=0; i<nthr; i++){ if(thrid==i){ n_ref_nodes_total = MAX(n_ref_nodes, n_ref_nodes_total); } #ifdef USE_OPENMP #pragma omp barrier #endif } } return n_ref_nodes_total; } void validate_matrix_dim(dimType matrix_dim) { if(matrix_dim == MaxDimType) USERERROR("Matrix dimension is too large. Must be at most %"PRI_DIMTYPE, MUTILS_INVALID_PARAMETER, MaxDimType-1); if(matrix_dim <= 0) USERERROR("Matrix dimension must be larger than 0.", MUTILS_INVALID_PARAMETER); } #ifdef MATLAB_MEX_FILE #include "mexparams.h" t_mesh mex2mesh(const mxArray *mesh_struct, Uint n_dim){ size_t m, n; char buff[256]; mxArray *field; t_mesh mesh = EMPTY_MESH_STRUCT; Ulong i; if(!mxIsStruct(mesh_struct)){ USERERROR("mesh_struct is not a structure", MUTILS_INVALID_MESH); } /* ELEMS */ m = 0; n = 0; field = mxGetField(mesh_struct, 0, "ELEMS"); mesh.elems = mex_get_matrix(dimType, field, &m, &n, "MESH.ELEMS", "number of element nodes", "number of elements", 0); SNPRINTF(buff, 255, "No dimensions of 'MESH.ELEMS' can be larger than %"PRI_DIMTYPE, MaxDimType); managed_type_cast(dimType, mesh.n_elem_nodes, m, buff); managed_type_cast(dimType, mesh.n_elems, n, buff); /* NODES */ { char _buff[128]; if(n_dim){ sprintf(_buff, "%"PRI_UINT, n_dim); } else { sprintf(_buff, "number of dimensions"); } m = n_dim; n = 0; field = mxGetField(mesh_struct, 0, "NODES"); mesh.nodes = mex_get_matrix(Double, field, &m, &n, "MESH.NODES", _buff, "number of nodes", 0); SNPRINTF(buff, 255, "No dimensions of 'MESH.NODES' can be larger than %"PRI_DIMTYPE, MaxDimType); managed_type_cast(dimType, mesh.n_dim, m, buff); managed_type_cast(dimType, mesh.n_nodes, n, buff); } /* NEIGHBORS */ m = 0; n = mesh.n_elems; field = mxGetField(mesh_struct, 0, "NEIGHBORS"); mesh.neighbors = mex_get_matrix(dimType, field, &m, &n, "MESH.NEIGHBORS", "number of element neighbors", "number of elements", 1); SNPRINTF(buff, 255, "No dimensions of 'MESH.NEIGHBORS' can be larger than %"PRI_DIMTYPE, MaxDimType); managed_type_cast(dimType, mesh.n_neighbors, m, buff); /* validate input */ mesh.n_ref_nodes = validate_elems(mesh.elems, mesh.n_elems, mesh.n_nodes, mesh.n_elem_nodes); /* TODO: parallelize */ if(mesh.neighbors){ for(i=0; i<(Ulong)mesh.n_elems*mesh.n_neighbors; i++){ if(mesh.neighbors[i] < ONE_BASED_INDEX && mesh.neighbors[i] != NO_NEIGHBOR){ USERERROR("Invalid NEIGHBORS. Element id must be greater than or equal to %d.\n" \ "To indicate non-existance of a neighbor use %"PRI_DIMTYPE, MUTILS_INVALID_PARAMETER, ONE_BASED_INDEX, NO_NEIGHBOR); } if(mesh.neighbors[i] - ONE_BASED_INDEX >= mesh.n_elems && mesh.neighbors[i] != NO_NEIGHBOR){ USERERROR("Illegal mesh structure: NEIGHBORS access non-existant elemsent IDs.", MUTILS_INVALID_MESH); } } } return mesh; } #endif /* MATLAB_MEX_FILE */
geo_yeefdtd.kernel_runtime.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #include "local_header.h" #include "openmp_pscmc_inc.h" #include "geo_yeefdtd.kernel_inc.h" int openmp_GEO_YEE_CURL_L_init (openmp_pscmc_env * pe ,openmp_GEO_YEE_CURL_L_struct * kerstr ){ return 0 ;} void openmp_GEO_YEE_CURL_L_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_GEO_YEE_CURL_L_struct )); } int openmp_GEO_YEE_CURL_L_get_num_compute_units (openmp_GEO_YEE_CURL_L_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_GEO_YEE_CURL_L_get_xlen (){ return IDX_OPT_MAX ;} int openmp_GEO_YEE_CURL_L_exec (openmp_GEO_YEE_CURL_L_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_GEO_YEE_CURL_L_scmc_kernel ( ( kerstr )->inoutE1 , ( kerstr )->inB0 , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->x0)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_GEO_YEE_CURL_L_scmc_set_parameter_inoutE1 (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inoutE1 = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_inB0 (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inB0 = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_xoffset (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xoffset = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_yoffset (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yoffset = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_zoffset (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zoffset = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_y_cpu_core (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_numvec (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_XLEN (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_YLEN (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_ZLEN (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_ovlp (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_xblock (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_yblock (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_zblock (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_num_ele (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_DT (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_DELTA_Z (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DELTA_Z = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_DELTA_Y (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DELTA_Y = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_DELTA_X (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DELTA_X = pm->d_data); } int openmp_GEO_YEE_CURL_L_scmc_set_parameter_x0 (openmp_GEO_YEE_CURL_L_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->x0 = pm->d_data); }
wand-view.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % W W AAA N N DDDD % % W W A A NN N D D % % W W W AAAAA N N N D D % % WW WW A A N NN D D % % W W A A N N DDDD % % % % V V IIIII EEEEE W W % % V V I E W W % % V V I EEE W W W % % V V I E WW WW % % V IIIII EEEEE W W % % % % % % MagickWand Wand View Methods % % % % Software Design % % Cristy % % March 2003 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickWand/studio.h" #include "MagickWand/MagickWand.h" #include "MagickWand/magick-wand-private.h" #include "MagickWand/wand.h" #include "MagickCore/monitor-private.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #define WandViewId "WandView" /* Typedef declarations. */ struct _WandView { size_t id; char name[MagickPathExtent], *description; RectangleInfo extent; MagickWand *wand; Image *image; CacheView *view; PixelWand ***pixel_wands; ExceptionInfo *exception; MagickBooleanType debug; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneWandView() makes a copy of the specified wand view. % % The format of the CloneWandView method is: % % WandView *CloneWandView(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport WandView *CloneWandView(const WandView *wand_view) { WandView *clone_view; register ssize_t i; assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); if (wand_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name); clone_view=(WandView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", wand_view->name); (void) memset(clone_view,0,sizeof(*clone_view)); clone_view->id=AcquireWandId(); (void) FormatLocaleString(clone_view->name,MagickPathExtent,"%s-%.20g", WandViewId,(double) clone_view->id); clone_view->description=ConstantString(wand_view->description); clone_view->image=CloneImage(wand_view->image,0,0,MagickTrue, wand_view->exception); clone_view->view=CloneCacheView(wand_view->view); clone_view->extent=wand_view->extent; clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,wand_view->exception); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **) wand_view->pixel_wands[i],wand_view->extent.width); clone_view->debug=wand_view->debug; if (clone_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name); clone_view->signature=MagickWandSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyWandView() deallocates memory associated with a wand view. % % The format of the DestroyWandView method is: % % WandView *DestroyWandView(WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands, const size_t number_wands) { register ssize_t i; assert(pixel_wands != (PixelWand ***) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands); pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands); return(pixel_wands); } WandExport WandView *DestroyWandView(WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); wand_view->pixel_wands=DestroyPixelsThreadSet(wand_view->pixel_wands, wand_view->extent.width); wand_view->image=DestroyImage(wand_view->image); wand_view->view=DestroyCacheView(wand_view->view); wand_view->exception=DestroyExceptionInfo(wand_view->exception); wand_view->signature=(~MagickWandSignature); RelinquishWandId(wand_view->id); wand_view=(WandView *) RelinquishMagickMemory(wand_view); return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferWandViewIterator() iterates over three wand views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel extent is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination wand view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % The callback signature is: % % MagickBooleanType DuplexTransferImageViewMethod(const WandView *source, % const WandView *duplex,WandView *destination,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferWandViewIterator method is: % % MagickBooleanType DuplexTransferWandViewIterator(WandView *source, % WandView *duplex,WandView *destination, % DuplexTransferWandViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o duplex: the duplex wand view. % % o destination: the destination wand view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType DuplexTransferWandViewIterator(WandView *source, WandView *duplex,WandView *destination,DuplexTransferWandViewMethod transfer, void *context) { Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == MagickWandSignature); if (transfer == (DuplexTransferWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict duplex_pixels, *magick_restrict pixels; register ssize_t x; register Quantum *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source->extent.width; x++) { PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]); pixels+=GetPixelChannels(source->image); } duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y, duplex->extent.width,1,duplex->exception); if (duplex_pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) duplex->extent.width; x++) { PixelSetQuantumPixel(duplex->image,duplex_pixels, duplex->pixel_wands[id][x]); duplex_pixels+=GetPixelChannels(duplex->image); } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); if (destination_pixels == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelSetQuantumPixel(destination->image,destination_pixels, destination->pixel_wands[id][x]); destination_pixels+=GetPixelChannels(destination->image); } if (transfer(source,duplex,destination,y,id,context) == MagickFalse) status=MagickFalse; destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x], destination_pixels); destination_pixels+=GetPixelChannels(destination->image); } sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewException() returns the severity, reason, and description of any % error that occurs when utilizing a wand view. % % The format of the GetWandViewException method is: % % char *GetWandViewException(const WandView *wand_view, % ExceptionType *severity) % % A description of each parameter follows: % % o wand_view: the pixel wand_view. % % o severity: the severity of the error is returned here. % */ WandExport char *GetWandViewException(const WandView *wand_view, ExceptionType *severity) { char *description; assert(wand_view != (const WandView *) NULL); assert(wand_view->signature == MagickWandSignature); if (wand_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name); assert(severity != (ExceptionType *) NULL); *severity=wand_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MagickPathExtent, sizeof(*description)); if (description == (char *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", wand_view->name); *description='\0'; if (wand_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( wand_view->exception->severity,wand_view->exception->reason), MagickPathExtent); if (wand_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MagickPathExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( wand_view->exception->severity,wand_view->exception->description), MagickPathExtent); (void) ConcatenateMagickString(description,")",MagickPathExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewExtent() returns the wand view extent. % % The format of the GetWandViewExtent method is: % % RectangleInfo GetWandViewExtent(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport RectangleInfo GetWandViewExtent(const WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); return(wand_view->extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewIterator() iterates over the wand view in parallel and calls % your get method for each scanline of the view. The pixel extent is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % The callback signature is: % % MagickBooleanType GetImageViewMethod(const WandView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetWandViewIterator method is: % % MagickBooleanType GetWandViewIterator(WandView *source, % GetWandViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o get: the get callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType GetWandViewIterator(WandView *source, GetWandViewMethod get,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == MagickWandSignature); if (get == (GetWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register const Quantum *pixels; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source->extent.width; x++) { PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]); pixels+=GetPixelChannels(source->image); } if (get(source,y,id,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewPixels() returns the wand view pixel_wands. % % The format of the GetWandViewPixels method is: % % PixelWand *GetWandViewPixels(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport PixelWand **GetWandViewPixels(const WandView *wand_view) { const int id = GetOpenMPThreadId(); assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); return(wand_view->pixel_wands[id]); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewWand() returns the magick wand associated with the wand view. % % The format of the GetWandViewWand method is: % % MagickWand *GetWandViewWand(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickWand *GetWandViewWand(const WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); return(wand_view->wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsWandView() returns MagickTrue if the the parameter is verified as a wand % view object. % % The format of the IsWandView method is: % % MagickBooleanType IsWandView(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickBooleanType IsWandView(const WandView *wand_view) { size_t length; if (wand_view == (const WandView *) NULL) return(MagickFalse); if (wand_view->signature != MagickWandSignature) return(MagickFalse); length=strlen(WandViewId); if (LocaleNCompare(wand_view->name,WandViewId,length) != 0) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewWandView() returns a wand view required for all other methods in the % Wand View API. % % The format of the NewWandView method is: % % WandView *NewWandView(MagickWand *wand) % % A description of each parameter follows: % % o wand: the wand. % */ static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands) { PixelWand ***pixel_wands; register ssize_t i; size_t number_threads; number_threads=GetOpenMPMaximumThreads(); pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return((PixelWand ***) NULL); (void) memset(pixel_wands,0,number_threads*sizeof(*pixel_wands)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_wands[i]=NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return(DestroyPixelsThreadSet(pixel_wands,number_wands)); } return(pixel_wands); } WandExport WandView *NewWandView(MagickWand *wand) { ExceptionInfo *exception; WandView *wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickWandSignature); wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view)); if (wand_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) memset(wand_view,0,sizeof(*wand_view)); wand_view->id=AcquireWandId(); (void) FormatLocaleString(wand_view->name,MagickPathExtent,"%s-%.20g", WandViewId,(double) wand_view->id); wand_view->description=ConstantString("WandView"); wand_view->wand=wand; exception=AcquireExceptionInfo(); wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,exception); wand_view->extent.width=wand->images->columns; wand_view->extent.height=wand->images->rows; wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width); wand_view->exception=exception; if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug=IsEventLogging(); wand_view->signature=MagickWandSignature; return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w W a n d V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewWandViewExtent() returns a wand view required for all other methods % in the Wand View API. % % The format of the NewWandViewExtent method is: % % WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a extent of % pixel_wands view. % */ WandExport WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { ExceptionInfo *exception; WandView *wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickWandSignature); wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view)); if (wand_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) memset(wand_view,0,sizeof(*wand_view)); wand_view->id=AcquireWandId(); (void) FormatLocaleString(wand_view->name,MagickPathExtent,"%s-%.20g", WandViewId,(double) wand_view->id); wand_view->description=ConstantString("WandView"); exception=AcquireExceptionInfo(); wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,exception); wand_view->wand=wand; wand_view->extent.width=width; wand_view->extent.height=height; wand_view->extent.x=x; wand_view->extent.y=y; wand_view->exception=exception; wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width); if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug=IsEventLogging(); wand_view->signature=MagickWandSignature; return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w D e s c r i p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewDescription() associates a description with an image view. % % The format of the SetWandViewDescription method is: % % void SetWandViewDescription(WandView *image_view,const char *description) % % A description of each parameter follows: % % o wand_view: the wand view. % % o description: the wand view description. % */ MagickExport void SetWandViewDescription(WandView *wand_view, const char *description) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); wand_view->description=ConstantString(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewIterator() iterates over the wand view in parallel and calls % your set method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % The callback signature is: % % MagickBooleanType SetImageViewMethod(ImageView *destination, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetWandViewIterator method is: % % MagickBooleanType SetWandViewIterator(WandView *destination, % SetWandViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the wand view. % % o set: the set callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType SetWandViewIterator(WandView *destination, SetWandViewMethod set,void *context) { Image *destination_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(destination != (WandView *) NULL); assert(destination->signature == MagickWandSignature); if (set == (SetWandViewMethod) NULL) return(MagickFalse); destination_image=destination->wand->images; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=destination->extent.height-destination->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(destination_image,destination_image,height,1) #endif for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x, y,destination->extent.width,1,destination->exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (set(destination,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x], pixels); pixels+=GetPixelChannels(destination->image); } sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(destination_image,destination->description, progress,destination->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferWandViewIterator() iterates over two wand views in parallel and % calls your transfer method for each scanline of the view. The source pixel % extent is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination wand view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % The callback signature is: % % MagickBooleanType TransferImageViewMethod(const WandView *source, % WandView *destination,const ssize_t y,const int thread_id, % void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferWandViewIterator method is: % % MagickBooleanType TransferWandViewIterator(WandView *source, % WandView *destination,TransferWandViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o destination: the destination wand view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType TransferWandViewIterator(WandView *source, WandView *destination,TransferWandViewMethod transfer,void *context) { Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == MagickWandSignature); if (transfer == (TransferWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict pixels; register ssize_t x; register Quantum *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source->extent.width; x++) { PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]); pixels+=GetPixelChannels(source->image); } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); if (destination_pixels == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelSetQuantumPixel(destination->image,destination_pixels, destination->pixel_wands[id][x]); destination_pixels+=GetPixelChannels(destination->image); } if (transfer(source,destination,y,id,context) == MagickFalse) status=MagickFalse; destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x], destination_pixels); destination_pixels+=GetPixelChannels(destination->image); } sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdateWandViewIterator() iterates over the wand view in parallel and calls % your update method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % The callback signature is: % % MagickBooleanType UpdateImageViewMethod(WandView *source,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdateWandViewIterator method is: % % MagickBooleanType UpdateWandViewIterator(WandView *source, % UpdateWandViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o update: the update callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdateWandViewIterator(WandView *source, UpdateWandViewMethod update,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == MagickWandSignature); if (update == (UpdateWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=SetImageStorageClass(source_image,DirectClass,source->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source->extent.width; x++) { PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]); pixels+=GetPixelChannels(source->image); } if (update(source,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) source->extent.width; x++) { PixelGetQuantumPixel(source->image,source->pixel_wands[id][x],pixels); pixels+=GetPixelChannels(source->image); } sync=SyncCacheViewAuthenticPixels(source->view,source->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); }
convolution_3x3_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv3x3s1_winograd23_transform_kernel_int8_neon(const Mat& kernel, std::vector<Mat> &kernel_tm2, int inch, int outch) { Mat kernel_tm(4*4, inch, outch, 2ul); // G const short ktm[4][3] = { { 2, 0, 0}, { 1, 1, 1}, { 1, -1, 1}, { 0, 0, 2} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p*inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[4][3]; for (int i=0; i<4; i++) { tmp[i][0] = (short)k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = (short)k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = (short)k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j=0; j<4; j++) { short* tmpp = &tmp[j][0]; for (int i=0; i<4; i++) { kernel_tm0[j*4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } for (int r=0; r<4; r++) { Mat kernel_tm_test(4*8, inch, outch/8 + (outch%8)/4 + outch%4, 2u); int p = 0; for (; p+7<outch; p+=8) { const short* kernel0 = (const short*)kernel_tm + (p+0)*inch*16; const short* kernel1 = (const short*)kernel_tm + (p+1)*inch*16; const short* kernel2 = (const short*)kernel_tm + (p+2)*inch*16; const short* kernel3 = (const short*)kernel_tm + (p+3)*inch*16; const short* kernel4 = (const short*)kernel_tm + (p+4)*inch*16; const short* kernel5 = (const short*)kernel_tm + (p+5)*inch*16; const short* kernel6 = (const short*)kernel_tm + (p+6)*inch*16; const short* kernel7 = (const short*)kernel_tm + (p+7)*inch*16; short* ktmp = kernel_tm_test.channel(p/8); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp[16] = kernel4[r*4+0]; ktmp[17] = kernel4[r*4+1]; ktmp[18] = kernel4[r*4+2]; ktmp[19] = kernel4[r*4+3]; ktmp[20] = kernel5[r*4+0]; ktmp[21] = kernel5[r*4+1]; ktmp[22] = kernel5[r*4+2]; ktmp[23] = kernel5[r*4+3]; ktmp[24] = kernel6[r*4+0]; ktmp[25] = kernel6[r*4+1]; ktmp[26] = kernel6[r*4+2]; ktmp[27] = kernel6[r*4+3]; ktmp[28] = kernel7[r*4+0]; ktmp[29] = kernel7[r*4+1]; ktmp[30] = kernel7[r*4+2]; ktmp[31] = kernel7[r*4+3]; ktmp += 32; kernel0 += 16; kernel1 += 16; kernel2 += 16; kernel3 += 16; kernel4 += 16; kernel5 += 16; kernel6 += 16; kernel7 += 16; } } for (; p+3<outch; p+=4) { const short* kernel0 = (const short*)kernel_tm + (p+0)*inch*16; const short* kernel1 = (const short*)kernel_tm + (p+1)*inch*16; const short* kernel2 = (const short*)kernel_tm + (p+2)*inch*16; const short* kernel3 = (const short*)kernel_tm + (p+3)*inch*16; short* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp += 16; kernel0 += 16; kernel1 += 16; kernel2 += 16; kernel3 += 16; } } for (; p<outch; p++) { const short* kernel0 = (const short*)kernel_tm + p*inch*16; short* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4 + p%4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp += 4; kernel0 += 16; } } kernel_tm2.push_back(kernel_tm_test); } } static void conv3x3s2_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(8*9, inch, outch/8 + outch%8, (size_t)1u); const signed char* kernel = _kernel; int p=0; for (; p+7<outch; p+=8) { const signed char* k0 = kernel + (p+0)*inch*9; const signed char* k1 = kernel + (p+1)*inch*9; const signed char* k2 = kernel + (p+2)*inch*9; const signed char* k3 = kernel + (p+3)*inch*9; const signed char* k4 = kernel + (p+4)*inch*9; const signed char* k5 = kernel + (p+5)*inch*9; const signed char* k6 = kernel + (p+6)*inch*9; const signed char* k7 = kernel + (p+7)*inch*9; signed char* ktmp = kernel_tm.channel(p/8); for (int q=0; q<inch; q++) { for (int k=0; k<9; k++) { ktmp[0] = k0[k]; ktmp[1] = k1[k]; ktmp[2] = k2[k]; ktmp[3] = k3[k]; ktmp[4] = k4[k]; ktmp[5] = k5[k]; ktmp[6] = k6[k]; ktmp[7] = k7[k]; ktmp += 8; } k0 += 9; k1 += 9; k2 += 9; k3 += 9; k4 += 9; k5 += 9; k6 += 9; k7 += 9; } } for (; p<outch; p++) { const signed char* k0 = kernel + (p+0)*inch*9; signed char* ktmp = kernel_tm.channel(p/8 + p%8); for (int q=0; q<inch; q++) { for (int k=0; k<9; k++) { ktmp[k] = k0[k]; } ktmp += 9; k0 += 9; } } } static void conv3x3s1_winograd23_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat> &kernel_tm_test, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 2n+2, winograd F(2,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 1) / 2 * 2; outh = (outh + 1) / 2 * 2; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in FeatherCNN int nRowBlocks = w_tm/4; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4, inch, tiles*4, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {1.0f, 0.0f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 0.00f, 1.0f} // }; #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); for (int j=0; j<nColBlocks; j++) { const signed char* r0 = img + w * j * 2; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; for (int i = 0; i<nRowBlocks; i++) { short* out_tm0 = bottom_blob_tm.channel(tiles*0+j*nRowBlocks+i).row<short>(q); short* out_tm1 = bottom_blob_tm.channel(tiles*1+j*nRowBlocks+i).row<short>(q); short* out_tm2 = bottom_blob_tm.channel(tiles*2+j*nRowBlocks+i).row<short>(q); short* out_tm3 = bottom_blob_tm.channel(tiles*3+j*nRowBlocks+i).row<short>(q); #if __ARM_NEON #if __aarch64__ asm volatile( // load "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.8b}, [%0] \n" "prfm pldl1keep, [%1, #64] \n" "ld1 {v1.8b}, [%1] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v2.8b}, [%2] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v3.8b}, [%3] \n" // w = B_t * d, trans int8 to int16 "ssubl v4.8h, v0.8b, v2.8b \n" // d4 "saddl v5.8h, v1.8b, v2.8b \n" // d6 "ssubl v6.8h, v2.8b, v1.8b \n" // d8 "ssubl v7.8h, v3.8b, v1.8b \n" // d10 // transpose w to w_t "trn1 v8.4h, v4.4h, v5.4h \n" "trn2 v9.4h, v4.4h, v5.4h \n" "trn1 v10.4h, v6.4h, v7.4h \n" "trn2 v11.4h, v6.4h, v7.4h \n" "trn1 v0.2s, v8.2s, v10.2s \n" "trn2 v2.2s, v8.2s, v10.2s \n" "trn1 v1.2s, v9.2s, v11.2s \n" "trn2 v3.2s, v9.2s, v11.2s \n" // U = B_t * d_t "sub v4.4h, v0.4h, v2.4h \n" "add v5.4h, v1.4h, v2.4h \n" "sub v6.4h, v2.4h, v1.4h \n" "sub v7.4h, v3.4h, v1.4h \n" // save "st1 {v4.4h}, [%4] \n" "st1 {v5.4h}, [%5] \n" "st1 {v6.4h}, [%6] \n" "st1 {v7.4h}, [%7] \n" : "=r"(r0), // %0 "=r"(r1), // %1 "=r"(r2), // %2 "=r"(r3), // %3 "=r"(out_tm0), // %4 "=r"(out_tm1), // %5 "=r"(out_tm2), // %6 "=r"(out_tm3) // %7 : "0"(r0), "1"(r1), "2"(r2), "3"(r3), "4"(out_tm0), "5"(out_tm1), "6"(out_tm2), "7"(out_tm3) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11" ); #else asm volatile( // load "pld [%0, #64] \n" "vld1.s8 {d0}, [%0] \n" "pld [%1, #64] \n" "vld1.s8 {d1}, [%1] \n" "pld [%2, #64] \n" "vld1.s8 {d2}, [%2] \n" "pld [%3, #64] \n" "vld1.s8 {d3}, [%3] \n" // w = B_t * d, trans int8 to int16 "vsubl.s8 q2, d0, d2 \n" // d4 "vaddl.s8 q3, d1, d2 \n" // d6 "vsubl.s8 q4, d2, d1 \n" // d8 "vsubl.s8 q5, d3, d1 \n" // d10 // transpose w to w_t "vtrn.s16 d4, d6 \n" "vtrn.s16 d8, d10 \n" "vtrn.s32 d4, d8 \n" "vtrn.s32 d6, d10 \n" // U = B_t * d_t "vsub.s16 d11, d4, d8 \n" "vadd.s16 d12, d6, d8 \n" "vsub.s16 d13, d8, d6 \n" "vsub.s16 d14, d10, d6 \n" // save "vst1.s32 {d11}, [%4] \n" "vst1.s32 {d12}, [%5] \n" "vst1.s32 {d13}, [%6] \n" "vst1.s32 {d14}, [%7] \n" : "=r"(r0), // %0 "=r"(r1), // %1 "=r"(r2), // %2 "=r"(r3), // %3 "=r"(out_tm0), // %4 "=r"(out_tm1), // %5 "=r"(out_tm2), // %6 "=r"(out_tm3) // %7 : "0"(r0), "1"(r1), "2"(r2), "3"(r3), "4"(out_tm0), "5"(out_tm1), "6"(out_tm2), "7"(out_tm3) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7" ); #endif // __aarch64__ #else short d0[4],d1[4],d2[4],d3[4]; short w0[4],w1[4],w2[4],w3[4]; short t0[4],t1[4],t2[4],t3[4]; // load for (int n = 0; n < 4; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; } // w = B_t * d for (int n = 0; n < 4; n++) { w0[n] = d0[n] - d2[n]; w1[n] = d1[n] + d2[n]; w2[n] = d2[n] - d1[n]; w3[n] = d3[n] - d1[n]; } // transpose d to d_t { t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; } // U = B_t * d_t for (int n = 0; n < 4; n++) { d0[n] = t0[n] - t2[n]; d1[n] = t1[n] + t2[n]; d2[n] = t2[n] - t1[n]; d3[n] = t3[n] - t1[n]; } // save to out_tm for (int n = 0; n < 4; n++) { out_tm0[n] = d0[n]; out_tm1[n] = d1[n]; out_tm2[n] = d2[n]; out_tm3[n] = d3[n]; } #endif r0 += 2; r1 += 2; r2 += 2; r3 += 2; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in FeatherCNN int nRowBlocks = w_tm/4; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r=0; r<4; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); int* output4_tm = top_blob_tm.channel(p+4); int* output5_tm = top_blob_tm.channel(p+5); int* output6_tm = top_blob_tm.channel(p+6); int* output7_tm = top_blob_tm.channel(p+7); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; output4_tm = output4_tm + r*4; output5_tm = output5_tm + r*4; output6_tm = output6_tm + r*4; output7_tm = output7_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "mov w4, %w20 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%8] \n" "ld1 {v9.4h, v10.4h}, [%9] \n" // _k0 = vld1q_s16(kptr); "add %9, %9, #16 \n" "ld1 {v11.4h, v12.4h}, [%9] \n" // _k0n = vld1q_s16(kptr+8); "add %9, %9, #16 \n" "ld1 {v13.4h, v14.4h}, [%9] \n" // _k1 = vld1q_s16(kptr+16); "add %9, %9, #16 \n" "ld1 {v15.4h, v16.4h}, [%9] \n" // _k1n = vld1q_s16(kptr+24); "add %8, %8, #8 \n" "add %9, %9, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43) "smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53) "smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63) "smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // "st1 {v4.4s}, [%4] \n" // "st1 {v5.4s}, [%5] \n" // "st1 {v6.4s}, [%6] \n" // "st1 {v7.4s}, [%7] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "vmov.s32 q4, #0 \n" "vmov.s32 q5, #0 \n" "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "mov r4, %20 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%9] \n" // _k0 = vld1q_s16(kptr); "add %9, #16 \n" "vld1.s16 {d20-d21}, [%9] \n" // _k0n = vld1q_s16(kptr+8); "add %9, #16 \n" "vld1.s16 {d22-d23}, [%9] \n" // _k1 = vld1q_s16(kptr+16); "add %9, #16 \n" "vld1.s16 {d24-d25}, [%9] \n" // _k1n = vld1q_s16(kptr+24); "add %9, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43) "vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53) "vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63) "vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" "vst1.s32 {d8-d9}, [%4] \n" "vst1.s32 {d10-d11}, [%5] \n" "vst1.s32 {d12-d13}, [%6] \n" "vst1.s32 {d14-d15}, [%7] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int sum4[4] = {0}; int sum5[4] = {0}; int sum6[4] = {0}; int sum7[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; sum4[n] += (int)r0[n] * kptr[n+16]; sum5[n] += (int)r0[n] * kptr[n+20]; sum6[n] += (int)r0[n] * kptr[n+24]; sum7[n] += (int)r0[n] * kptr[n+28]; } kptr += 32; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __ARM_NEON output0_tm += 16; output1_tm += 16; output2_tm += 16; output3_tm += 16; output4_tm += 16; output5_tm += 16; output6_tm += 16; output7_tm += 16; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "mov w4, %w12 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%4] \n" "ld1 {v9.4h, v10.4h}, [%5] \n" // _k0 = vld1q_s16(kptr); "add %5, %5, #16 \n" "ld1 {v11.4h, v12.4h}, [%5] \n" // _k0n = vld1q_s16(kptr+8); "add %4, %4, #8 \n" "add %5, %5, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "mov r4, %12 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%5] \n" // _k0 = vld1q_s16(kptr); "add %5, #16 \n" "vld1.s16 {d20-d21}, [%5] \n" // _k0n = vld1q_s16(kptr+8); "add %5, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; } kptr += 16; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __ARM_NEON output0_tm += 16; output1_tm += 16; output2_tm += 16; output3_tm += 16; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { int* output0_tm = top_blob_tm.channel(p); output0_tm = output0_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4 + p%4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "mov w4, %w6 \n" "0: \n" // for (int q=0; q<inch; q++) //"prfm pldl1keep, [%2, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%1] \n" "ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %1, %1, #8 \n" "add %2, %2, #8 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "mov r4, %6 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "add %1, #8 \n" "vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %2, #8 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q8", "q9" ); #endif // __aarch64__ #else int sum0[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; } #endif output0_tm += 16; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[2][4] = { // {1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 1.0f} // }; int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in FeatherCNN int nRowBlocks = w_tm/4; int32x2_t _shift = vdup_n_s32(-2); #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { int* out_tile = top_blob_tm.channel(p); int* outRow0 = top_blob_bordered.channel(p); int* outRow1 = outRow0 + outw; for (int j=0; j<nColBlocks; j++) { for(int i=0; i<nRowBlocks; i++) { #if __ARM_NEON #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "add v0.4s, v0.4s, v1.4s \n" // s0 = s0 + s1 + s2; "sub v1.4s, v1.4s, v2.4s \n" "add v0.4s, v0.4s, v2.4s \n" // s1 = s1 - s2 + s3; "add v1.4s, v1.4s, v3.4s \n" "trn1 v4.4s, v0.4s, v1.4s \n" "trn2 v5.4s, v0.4s, v1.4s \n" "dup v6.2d, v4.d[1] \n" "dup v7.2d, v5.d[1] \n" "add v0.2s, v4.2s, v5.2s \n" // o0 = d0 + d1 + d2; "sub v1.2s, v5.2s, v6.2s \n" "add v0.2s, v0.2s, v6.2s \n" // o1 = d1 - d2 + d3; "add v1.2s, v1.2s, v7.2s \n" "sshl v0.2s, v0.2s, %6.2s \n" // o0 = o0 >> 2 "sshl v1.2s, v1.2s, %6.2s \n" // o1 = o1 >> 2 "st1 {v0.2s}, [%1], #8 \n" "st1 {v1.2s}, [%2], #8 \n" : "=r"(out_tile), // %0 "=r"(outRow0), // %1 "=r"(outRow1) // %2 : "0"(out_tile), "1"(outRow0), "2"(outRow1), "w"(_shift) // %6 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" ); #else asm volatile( "pld [%0, #512] \n" "vldm %0!, {d0-d7} \n" "vaddq.s32 q0, q0, q1 \n" // s0 = s0 + s1 + s2; "vsubq.s32 q1, q1, q2 \n" "vaddq.s32 q0, q0, q2 \n" // s1 = s1 - s2 + s3; "vaddq.s32 q1, q1, q3 \n" "vtrn.s32 q0, q1 \n" "vadd.s32 d8, d0, d2 \n" // o0 = d0 + d1 + d2; "vsub.s32 d9, d2, d1 \n" "vadd.s32 d8, d8, d1 \n" // o1 = d1 - d2 + d3; "vadd.s32 d9, d9, d3 \n" "vshl.s32 d8, d8, %P6 \n" // o0 = o0 >> 2 "vshl.s32 d9, d9, %P6 \n" // o1 = o1 >> 2 "vst1.s32 {d8}, [%1]! \n" "vst1.s32 {d9}, [%2]! \n" : "=r"(out_tile), // %0 "=r"(outRow0), // %1 "=r"(outRow1) // %2 : "0"(out_tile), "1"(outRow0), "2"(outRow1), "w"(_shift) // %6 : "cc", "memory", "q0", "q1", "q2", "q3", "q4" ); #endif // __aarch64__ #else int s0[4],s1[4],s2[4],s3[4]; int w0[4],w1[4]; int d0[2],d1[2],d2[2],d3[2]; int o0[2],o1[2]; // load for (int n = 0; n < 4; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n+ 4]; s2[n] = out_tile[n+ 8]; s3[n] = out_tile[n+12]; } // w = A_T * W for (int n = 0; n < 4; n++) { w0[n] = s0[n] + s1[n] + s2[n]; w1[n] = s1[n] - s2[n] + s3[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d1[0] = w0[1]; d1[1] = w1[1]; d2[0] = w0[2]; d2[1] = w1[2]; d3[0] = w0[3]; d3[1] = w1[3]; } // Y = A_T * w_t for (int n = 0; n < 2; n++) { o0[n] = d0[n] + d1[n] + d2[n]; o1[n] = d1[n] - d2[n] + d3[n]; } // save to top blob tm,why right 2,because the G' = G*2 outRow0[0] = o0[0] >> 2; outRow0[1] = o0[1] >> 2; outRow1[0] = o1[0] >> 2; outRow1[1] = o1[1] >> 2; out_tile += 16; outRow0 += 2; outRow1 += 2; #endif // __ARM_NEON } outRow0 += outw; outRow1 += outw; } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads); } static void conv3x3s2_packed_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; int nn_outch = outch >> 3; int remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; Mat out0 = top_blob.channel(p+0); Mat out1 = top_blob.channel(p+1); Mat out2 = top_blob.channel(p+2); Mat out3 = top_blob.channel(p+3); Mat out4 = top_blob.channel(p+4); Mat out5 = top_blob.channel(p+5); Mat out6 = top_blob.channel(p+6); Mat out7 = top_blob.channel(p+7); out0.fill(0); out1.fill(0); out2.fill(0); out3.fill(0); out4.fill(0); out5.fill(0); out6.fill(0); out7.fill(0); const signed char* ktmp = _kernel.channel(p/8); for (int q=0; q<inch; q++) { int* outptr0 = out0; int* outptr1 = out1; int* outptr2 = out2; int* outptr3 = out3; int* outptr4 = out4; int* outptr5 = out5; int* outptr6 = out6; int* outptr7 = out7; const signed char* img0 = bottom_blob.channel(q); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w*2; int i = 0; for (; i < outh; i++) { #if __ARM_NEON #if __aarch64__ int nn = outw >> 3; int remain = outw & 7; #else int nn = outw >> 2; int remain = outw & 3; #endif // __aarch64__ #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%9], #16 \n"//r0-r2 "ld2 {v5.8b, v6.8b}, [%9] \n" "ld1 {v8.4s, v9.4s}, [%1] \n"//out0 "ld1 {v10.4s, v11.4s}, [%2] \n"//out1 "ld1 {v12.4s, v13.4s}, [%3] \n"//out2 "ld1 {v14.4s, v15.4s}, [%4] \n"//out3 "ld1 {v16.4s, v17.4s}, [%5] \n"//out4 "ld1 {v18.4s, v19.4s}, [%6] \n"//out5 "ld1 {v20.4s, v21.4s}, [%7] \n"//out6 "ld1 {v22.4s, v23.4s}, [%8] \n"//out7 "ext v7.8b, v3.8b, v5.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k00-k70) "sshll v1.8h, v1.8b, #0 \n"//(k01-k71) "sshll v2.8h, v2.8b, #0 \n"//(k02-k72) "sshll v3.8h, v3.8b, #0 \n"// r0 "sshll v4.8h, v4.8b, #0 \n"// r1 "sshll v7.8h, v7.8b, #0 \n"// r2 // r0 "smlal v8.4s, v3.4h, v0.h[0] \n"// out0 += (r00-r07)*k00 "smlal2 v9.4s, v3.8h, v0.h[0] \n" "smlal v10.4s, v3.4h, v0.h[1] \n"// out1 += (r00-r07)*k10 "smlal2 v11.4s, v3.8h, v0.h[1] \n" "smlal v12.4s, v3.4h, v0.h[2] \n"// out2 += (r00-r07)*k20 "smlal2 v13.4s, v3.8h, v0.h[2] \n" "smlal v14.4s, v3.4h, v0.h[3] \n"// out3 += (r00-r07)*k30 "smlal2 v15.4s, v3.8h, v0.h[3] \n" "smlal v16.4s, v3.4h, v0.h[4] \n"// out4 += (r00-r07)*k40 "smlal2 v17.4s, v3.8h, v0.h[4] \n" "smlal v18.4s, v3.4h, v0.h[5] \n"// out5 += (r00-r07)*k50 "smlal2 v19.4s, v3.8h, v0.h[5] \n" "smlal v20.4s, v3.4h, v0.h[6] \n"// out6 += (r00-r07)*k60 "smlal2 v21.4s, v3.8h, v0.h[6] \n" "smlal v22.4s, v3.4h, v0.h[7] \n"// out7 += (r00-r07)*k70 "smlal2 v23.4s, v3.8h, v0.h[7] \n" // r1 "smlal v8.4s, v4.4h, v1.h[0] \n"// out0 += (r10-r17)*k01 "smlal2 v9.4s, v4.8h, v1.h[0] \n" "smlal v10.4s, v4.4h, v1.h[1] \n"// out1 += (r10-r17)*k11 "smlal2 v11.4s, v4.8h, v1.h[1] \n" "smlal v12.4s, v4.4h, v1.h[2] \n"// out2 += (r10-r17)*k21 "smlal2 v13.4s, v4.8h, v1.h[2] \n" "smlal v14.4s, v4.4h, v1.h[3] \n"// out3 += (r10-r17)*k31 "smlal2 v15.4s, v4.8h, v1.h[3] \n" "smlal v16.4s, v4.4h, v1.h[4] \n"// out4 += (r10-r17)*k41 "smlal2 v17.4s, v4.8h, v1.h[4] \n" "smlal v18.4s, v4.4h, v1.h[5] \n"// out5 += (r10-r17)*k51 "smlal2 v19.4s, v4.8h, v1.h[5] \n" "smlal v20.4s, v4.4h, v1.h[6] \n"// out6 += (r10-r17)*k61 "smlal2 v21.4s, v4.8h, v1.h[6] \n" "smlal v22.4s, v4.4h, v1.h[7] \n"// out7 += (r10-r17)*k71 "smlal2 v23.4s, v4.8h, v1.h[7] \n" // r2 "smlal v8.4s, v7.4h, v2.h[0] \n"// out0 += (r20-r27)*k02 "smlal2 v9.4s, v7.8h, v2.h[0] \n" "smlal v10.4s, v7.4h, v2.h[1] \n"// out1 += (r20-r27)*k12 "smlal2 v11.4s, v7.8h, v2.h[1] \n" "smlal v12.4s, v7.4h, v2.h[2] \n"// out2 += (r20-r27)*k22 "smlal2 v13.4s, v7.8h, v2.h[2] \n" "smlal v14.4s, v7.4h, v2.h[3] \n"// out3 += (r20-r27)*k32 "smlal2 v15.4s, v7.8h, v2.h[3] \n" "smlal v16.4s, v7.4h, v2.h[4] \n"// out4 += (r20-r27)*k42 "smlal2 v17.4s, v7.8h, v2.h[4] \n" "smlal v18.4s, v7.4h, v2.h[5] \n"// out5 += (r20-r27)*k52 "smlal2 v19.4s, v7.8h, v2.h[5] \n" "smlal v20.4s, v7.4h, v2.h[6] \n"// out6 += (r20-r27)*k62 "smlal2 v21.4s, v7.8h, v2.h[6] \n" "smlal v22.4s, v7.4h, v2.h[7] \n"// out7 += (r20-r27)*k72 "smlal2 v23.4s, v7.8h, v2.h[7] \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%10], #16 \n"//r3-r5 "ld2 {v5.8b, v6.8b}, [%10] \n" "ext v7.8b, v3.8b, v5.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k03-k73) "sshll v1.8h, v1.8b, #0 \n"//(k04-k74) "sshll v2.8h, v2.8b, #0 \n"//(k05-k75) "sshll v3.8h, v3.8b, #0 \n"// r3 "sshll v4.8h, v4.8b, #0 \n"// r4 "sshll v7.8h, v7.8b, #0 \n"// r5 // r3 "smlal v8.4s, v3.4h, v0.h[0] \n"// out0 += (r30-r37)*k03 "smlal2 v9.4s, v3.8h, v0.h[0] \n" "smlal v10.4s, v3.4h, v0.h[1] \n"// out1 += (r30-r37)*k13 "smlal2 v11.4s, v3.8h, v0.h[1] \n" "smlal v12.4s, v3.4h, v0.h[2] \n"// out2 += (r30-r37)*k23 "smlal2 v13.4s, v3.8h, v0.h[2] \n" "smlal v14.4s, v3.4h, v0.h[3] \n"// out3 += (r30-r37)*k33 "smlal2 v15.4s, v3.8h, v0.h[3] \n" "smlal v16.4s, v3.4h, v0.h[4] \n"// out4 += (r30-r37)*k43 "smlal2 v17.4s, v3.8h, v0.h[4] \n" "smlal v18.4s, v3.4h, v0.h[5] \n"// out5 += (r30-r37)*k53 "smlal2 v19.4s, v3.8h, v0.h[5] \n" "smlal v20.4s, v3.4h, v0.h[6] \n"// out6 += (r30-r37)*k63 "smlal2 v21.4s, v3.8h, v0.h[6] \n" "smlal v22.4s, v3.4h, v0.h[7] \n"// out7 += (r30-r37)*k73 "smlal2 v23.4s, v3.8h, v0.h[7] \n" // r4 "smlal v8.4s, v4.4h, v1.h[0] \n"// out0 += (r40-r47)*k04 "smlal2 v9.4s, v4.8h, v1.h[0] \n" "smlal v10.4s, v4.4h, v1.h[1] \n"// out1 += (r40-r47)*k14 "smlal2 v11.4s, v4.8h, v1.h[1] \n" "smlal v12.4s, v4.4h, v1.h[2] \n"// out2 += (r40-r47)*k24 "smlal2 v13.4s, v4.8h, v1.h[2] \n" "smlal v14.4s, v4.4h, v1.h[3] \n"// out3 += (r40-r47)*k34 "smlal2 v15.4s, v4.8h, v1.h[3] \n" "smlal v16.4s, v4.4h, v1.h[4] \n"// out4 += (r40-r47)*k44 "smlal2 v17.4s, v4.8h, v1.h[4] \n" "smlal v18.4s, v4.4h, v1.h[5] \n"// out5 += (r40-r47)*k54 "smlal2 v19.4s, v4.8h, v1.h[5] \n" "smlal v20.4s, v4.4h, v1.h[6] \n"// out6 += (r40-r47)*k64 "smlal2 v21.4s, v4.8h, v1.h[6] \n" "smlal v22.4s, v4.4h, v1.h[7] \n"// out7 += (r40-r47)*k74 "smlal2 v23.4s, v4.8h, v1.h[7] \n" // r5 "smlal v8.4s, v7.4h, v2.h[0] \n"// out0 += (r50-r57)*k05 "smlal2 v9.4s, v7.8h, v2.h[0] \n" "smlal v10.4s, v7.4h, v2.h[1] \n"// out1 += (r50-r57)*k15 "smlal2 v11.4s, v7.8h, v2.h[1] \n" "smlal v12.4s, v7.4h, v2.h[2] \n"// out2 += (r50-r57)*k25 "smlal2 v13.4s, v7.8h, v2.h[2] \n" "smlal v14.4s, v7.4h, v2.h[3] \n"// out3 += (r50-r57)*k35 "smlal2 v15.4s, v7.8h, v2.h[3] \n" "smlal v16.4s, v7.4h, v2.h[4] \n"// out4 += (r50-r57)*k45 "smlal2 v17.4s, v7.8h, v2.h[4] \n" "smlal v18.4s, v7.4h, v2.h[5] \n"// out5 += (r50-r57)*k55 "smlal2 v19.4s, v7.8h, v2.h[5] \n" "smlal v20.4s, v7.4h, v2.h[6] \n"// out6 += (r50-r57)*k65 "smlal2 v21.4s, v7.8h, v2.h[6] \n" "smlal v22.4s, v7.4h, v2.h[7] \n"// out7 += (r50-r57)*k75 "smlal2 v23.4s, v7.8h, v2.h[7] \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%11], #16 \n"//r6-r8 "ld2 {v5.8b, v6.8b}, [%11] \n" "ext v7.8b, v3.8b, v5.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k06-k76) "sshll v1.8h, v1.8b, #0 \n"//(k07-k77) "sshll v2.8h, v2.8b, #0 \n"//(k08-k78) "sshll v3.8h, v3.8b, #0 \n"// r6 "sshll v4.8h, v4.8b, #0 \n"// r7 "sshll v7.8h, v7.8b, #0 \n"// r8 // r6 "smlal v8.4s, v3.4h, v0.h[0] \n"// out0 += (r60-r67)*k06 "smlal2 v9.4s, v3.8h, v0.h[0] \n" "smlal v10.4s, v3.4h, v0.h[1] \n"// out1 += (r60-r67)*k16 "smlal2 v11.4s, v3.8h, v0.h[1] \n" "smlal v12.4s, v3.4h, v0.h[2] \n"// out2 += (r60-r67)*k26 "smlal2 v13.4s, v3.8h, v0.h[2] \n" "smlal v14.4s, v3.4h, v0.h[3] \n"// out3 += (r60-r67)*k36 "smlal2 v15.4s, v3.8h, v0.h[3] \n" "smlal v16.4s, v3.4h, v0.h[4] \n"// out4 += (r60-r67)*k46 "smlal2 v17.4s, v3.8h, v0.h[4] \n" "smlal v18.4s, v3.4h, v0.h[5] \n"// out5 += (r60-r67)*k56 "smlal2 v19.4s, v3.8h, v0.h[5] \n" "smlal v20.4s, v3.4h, v0.h[6] \n"// out6 += (r60-r67)*k66 "smlal2 v21.4s, v3.8h, v0.h[6] \n" "smlal v22.4s, v3.4h, v0.h[7] \n"// out7 += (r60-r67)*k76 "smlal2 v23.4s, v3.8h, v0.h[7] \n" // r7 "smlal v8.4s, v4.4h, v1.h[0] \n"// out0 += (r70-r77)*k07 "smlal2 v9.4s, v4.8h, v1.h[0] \n" "smlal v10.4s, v4.4h, v1.h[1] \n"// out1 += (r70-r77)*k17 "smlal2 v11.4s, v4.8h, v1.h[1] \n" "smlal v12.4s, v4.4h, v1.h[2] \n"// out2 += (r70-r77)*k27 "smlal2 v13.4s, v4.8h, v1.h[2] \n" "smlal v14.4s, v4.4h, v1.h[3] \n"// out3 += (r70-r77)*k37 "smlal2 v15.4s, v4.8h, v1.h[3] \n" "smlal v16.4s, v4.4h, v1.h[4] \n"// out4 += (r70-r77)*k47 "smlal2 v17.4s, v4.8h, v1.h[4] \n" "smlal v18.4s, v4.4h, v1.h[5] \n"// out5 += (r70-r77)*k57 "smlal2 v19.4s, v4.8h, v1.h[5] \n" "smlal v20.4s, v4.4h, v1.h[6] \n"// out6 += (r70-r77)*k67 "smlal2 v21.4s, v4.8h, v1.h[6] \n" "smlal v22.4s, v4.4h, v1.h[7] \n"// out7 += (r70-r77)*k77 "smlal2 v23.4s, v4.8h, v1.h[7] \n" // r8 "smlal v8.4s, v7.4h, v2.h[0] \n"// out0 += (r80-r87)*k08 "smlal2 v9.4s, v7.8h, v2.h[0] \n" "smlal v10.4s, v7.4h, v2.h[1] \n"// out1 += (r80-r87)*k18 "smlal2 v11.4s, v7.8h, v2.h[1] \n" "smlal v12.4s, v7.4h, v2.h[2] \n"// out2 += (r80-r87)*k28 "smlal2 v13.4s, v7.8h, v2.h[2] \n" "smlal v14.4s, v7.4h, v2.h[3] \n"// out3 += (r80-r87)*k38 "smlal2 v15.4s, v7.8h, v2.h[3] \n" "smlal v16.4s, v7.4h, v2.h[4] \n"// out4 += (r80-r87)*k48 "smlal2 v17.4s, v7.8h, v2.h[4] \n" "smlal v18.4s, v7.4h, v2.h[5] \n"// out5 += (r80-r87)*k58 "smlal2 v19.4s, v7.8h, v2.h[5] \n" "smlal v20.4s, v7.4h, v2.h[6] \n"// out6 += (r80-r87)*k68 "smlal2 v21.4s, v7.8h, v2.h[6] \n" "smlal v22.4s, v7.4h, v2.h[7] \n"// out7 += (r80-r87)*k78 "smlal2 v23.4s, v7.8h, v2.h[7] \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" "st1 {v16.4s, v17.4s}, [%5], #32 \n" "st1 {v18.4s, v19.4s}, [%6], #32 \n" "st1 {v20.4s, v21.4s}, [%7], #32 \n" "st1 {v22.4s, v23.4s}, [%8], #32 \n" "subs %w0, %w0, #1 \n" "sub %12, %12, #72 \n"// reset ktmp "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(ktmp) // %12 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(ktmp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); } #else // __aarch64__ if (nn > 0) { asm volatile( "0: \n" "pld [%1, #128] \n" "vld1.s32 {d16-d17}, [%1] \n"// out0 "pld [%2, #128] \n" "vld1.s32 {d18-d19}, [%2] \n"// out1 "pld [%3, #128] \n" "vld1.s32 {d20-d21}, [%3] \n"// out2 "pld [%4, #128] \n" "vld1.s32 {d22-d23}, [%4] \n"// out3 // r0 "pld [%9, #64] \n" "vld2.s8 {d8-d9}, [%9] \n"// d8(a00 a02 a04 a06 a08 a010 a012 a014), d9(a01 a03 a05 a07 a09 a011 a013 a015) "add %9, #8 \n" "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n"// d0(k00-k70) d1(k01-k71) d2(k02-k72) "pld [%5, #128] \n" "vld1.s32 {d24-d25}, [%5] \n"// out4 "pld [%6, #128] \n" "vld1.s32 {d26-d27}, [%6] \n"// out5 "vmovl.s8 q2, d2 \n"// q2(k02-k72) "vmovl.s8 q1, d1 \n"// q1(k01-k71) "vmovl.s8 q0, d0 \n"// q0(k00-k70) "vext.s8 d12, d8, d8, #1 \n"// d12(a02 a04 a06 a08 x x x x) "pld [%7, #128] \n" "vld1.s32 {d28-d29}, [%7] \n"// out6 "vmovl.s8 q5, d9 \n"// q5(a01 a03 a05 a07 a09 a011 a013 a015) d11 "vmovl.s8 q4, d8 \n"// q4(a00 a02 a04 a06 a08 a010 a012 a014) d9 "vmovl.s8 q6, d12 \n"// q6(a02 a04 a06 a08 a010 a012 a014 a016) d13 "pld [%8, #128] \n" "vld1.s32 {d30-d31}, [%8] \n"// out7 "vmlal.s16 q8, d8, d0[0] \n"// sum0 += (a00 a02 a04 a06) * k00 "vmlal.s16 q9, d8, d0[1] \n"// sum1 += (a00 a02 a04 a06) * k10 "vmlal.s16 q10, d8, d0[2] \n"// sum2 += (a00 a02 a04 a06) * k20 "vmlal.s16 q11, d8, d0[3] \n"// sum3 += (a00 a02 a04 a06) * k30 "vmlal.s16 q12, d8, d1[0] \n"// sum4 += (a00 a02 a04 a06) * k40 "vmlal.s16 q13, d8, d1[1] \n"// sum5 += (a00 a02 a04 a06) * k50 "vmlal.s16 q14, d8, d1[2] \n"// sum6 += (a00 a02 a04 a06) * k60 "vmlal.s16 q15, d8, d1[3] \n"// sum7 += (a00 a02 a04 a06) * k70 "vmlal.s16 q8, d10, d2[0] \n"// sum0 += (a01-a07) * k01 "vmlal.s16 q9, d10, d2[1] \n"// sum1 += (a01-a07) * k11 "vmlal.s16 q10, d10, d2[2] \n"// sum2 += (a01-a07) * k21 "vmlal.s16 q11, d10, d2[3] \n"// sum3 += (a01-a07) * k31 "vmlal.s16 q12, d10, d3[0] \n"// sum4 += (a01-a07) * k41 "vmlal.s16 q13, d10, d3[1] \n"// sum5 += (a01-a07) * k51 "vmlal.s16 q14, d10, d3[2] \n"// sum6 += (a01-a07) * k61 "vmlal.s16 q15, d10, d3[3] \n"// sum7 += (a01-a07) * k71 "pld [%10, #64] \n" "vld2.s8 {d8-d9}, [%10] \n"// d8(a10 a12 a14 a16 a18 a110 a112 a114), d9(a11 a13 a15 a17 a19 a111 a113 a115) "add %10, #8 \n" "vmlal.s16 q8, d12, d4[0] \n"// sum0 += (a02-a08) * k02 "vmlal.s16 q9, d12, d4[1] \n"// sum1 += (a02-a08) * k12 "vmlal.s16 q10, d12, d4[2] \n"// sum2 += (a02-a08) * k22 "vmlal.s16 q11, d12, d4[3] \n"// sum3 += (a02-a08) * k32 "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n"// d0(k03-k73) d1(k04-k74) d2(k05-k75) "vmlal.s16 q12, d12, d5[0] \n"// sum4 += (a02-a08) * k42 "vmlal.s16 q13, d12, d5[1] \n"// sum5 += (a02-a08) * k52 "vmlal.s16 q14, d12, d5[2] \n"// sum6 += (a02-a08) * k62 "vmlal.s16 q15, d12, d5[3] \n"// sum7 += (a02-a08) * k72 // r1 "vext.s8 d12, d8, d8, #1 \n"// d12(a12 a14 a16 a18 x x x x) "vmovl.s8 q2, d2 \n"// q2(k05-k75) "vmovl.s8 q1, d1 \n"// q1(k04-k74) "vmovl.s8 q0, d0 \n"// q0(k03-k73) "vmovl.s8 q5, d9 \n"// q5(a11-a115) "vmovl.s8 q4, d8 \n"// q4(a10-a114) "vmovl.s8 q6, d12 \n"// q6(a12-a116) "vmlal.s16 q8, d8, d0[0] \n"// sum0 += (a10-a16) * k03 "vmlal.s16 q9, d8, d0[1] \n"// sum1 += (a10-a16) * k13 "vmlal.s16 q10, d8, d0[2] \n"// sum2 += (a10-a16) * k23 "vmlal.s16 q11, d8, d0[3] \n"// sum3 += (a10-a16) * k33 "vmlal.s16 q12, d8, d1[0] \n"// sum4 += (a10-a16) * k43 "vmlal.s16 q13, d8, d1[1] \n"// sum5 += (a10-a16) * k53 "vmlal.s16 q14, d8, d1[2] \n"// sum6 += (a10-a16) * k63 "vmlal.s16 q15, d8, d1[3] \n"// sum7 += (a10-a16) * k73 "vmlal.s16 q8, d10, d2[0] \n"// sum0 += (a11-a17) * k04 "vmlal.s16 q9, d10, d2[1] \n"// sum1 += (a11-a17) * k14 "vmlal.s16 q10, d10, d2[2] \n"// sum2 += (a11-a17) * k24 "vmlal.s16 q11, d10, d2[3] \n"// sum3 += (a11-a17) * k34 "vmlal.s16 q12, d10, d3[0] \n"// sum4 += (a11-a17) * k44 "vmlal.s16 q13, d10, d3[1] \n"// sum5 += (a11-a17) * k54 "vmlal.s16 q14, d10, d3[2] \n"// sum6 += (a11-a17) * k64 "vmlal.s16 q15, d10, d3[3] \n"// sum7 += (a11-a17) * k74 "pld [%11, #64] \n" "vld2.s8 {d8-d9}, [%11] \n"// d8(a20 a22 a24 a26 a28 a210 a212 a214), d9(a21 a23 a25 a27 a29 a211 a213 a215) "add %11, #8 \n" "vmlal.s16 q8, d12, d4[0] \n"// sum0 += (a12-a18) * k05 "vmlal.s16 q9, d12, d4[1] \n"// sum1 += (a12-a18) * k15 "vmlal.s16 q10, d12, d4[2] \n"// sum2 += (a12-a18) * k25 "vmlal.s16 q11, d12, d4[3] \n"// sum3 += (a12-a18) * k35 "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n"// d0(k06-k76) d1(k07-k77) d2(k08-k78) "vmlal.s16 q12, d12, d5[0] \n"// sum4 += (a12-a18) * k45 "vmlal.s16 q13, d12, d5[1] \n"// sum5 += (a12-a18) * k55 "vmlal.s16 q14, d12, d5[2] \n"// sum6 += (a12-a18) * k65 "vmlal.s16 q15, d12, d5[3] \n"// sum7 += (a12-a18) * k75 // r2 "vext.s8 d12, d8, d8, #1 \n"// d12(a22 a24 a26 a28 x x x x) "vmovl.s8 q2, d2 \n"// q2(k08-k78) "vmovl.s8 q1, d1 \n"// q1(k07-k77) "vmovl.s8 q0, d0 \n"// q0(k06-k76) "vmovl.s8 q5, d9 \n"// q5(a21-a215) "vmovl.s8 q4, d8 \n"// q4(a20-a214) "vmovl.s8 q6, d12 \n"// q6(a22-a216) "vmlal.s16 q8, d8, d0[0] \n"// sum0 += (a20-a26) * k06 "vmlal.s16 q9, d8, d0[1] \n"// sum1 += (a20-a26) * k16 "vmlal.s16 q10, d8, d0[2] \n"// sum2 += (a20-a26) * k26 "vmlal.s16 q11, d8, d0[3] \n"// sum3 += (a20-a26) * k36 "vmlal.s16 q12, d8, d1[0] \n"// sum4 += (a20-a26) * k46 "vmlal.s16 q13, d8, d1[1] \n"// sum5 += (a20-a26) * k56 "vmlal.s16 q14, d8, d1[2] \n"// sum6 += (a20-a26) * k66 "vmlal.s16 q15, d8, d1[3] \n"// sum7 += (a20-a26) * k76 "vmlal.s16 q8, d10, d2[0] \n"// sum0 += (a21-a27) * k07 "vmlal.s16 q9, d10, d2[1] \n"// sum1 += (a21-a27) * k17 "vmlal.s16 q10, d10, d2[2] \n"// sum2 += (a21-a27) * k27 "vmlal.s16 q11, d10, d2[3] \n"// sum3 += (a21-a27) * k37 "vmlal.s16 q12, d10, d3[0] \n"// sum4 += (a21-a27) * k47 "vmlal.s16 q13, d10, d3[1] \n"// sum5 += (a21-a27) * k57 "vmlal.s16 q14, d10, d3[2] \n"// sum6 += (a21-a27) * k67 "vmlal.s16 q15, d10, d3[3] \n"// sum7 += (a21-a27) * k77 "vmlal.s16 q8, d12, d4[0] \n"// sum0 += (a22-a28) * k08 "vmlal.s16 q9, d12, d4[1] \n"// sum1 += (a22-a28) * k18 "vmlal.s16 q10, d12, d4[2] \n"// sum2 += (a22-a28) * k28 "vmlal.s16 q11, d12, d4[3] \n"// sum3 += (a22-a28) * k38 "vmlal.s16 q12, d12, d5[0] \n"// sum4 += (a22-a28) * k48 "vmlal.s16 q13, d12, d5[1] \n"// sum5 += (a22-a28) * k58 "vmlal.s16 q14, d12, d5[2] \n"// sum6 += (a22-a28) * k68 "vmlal.s16 q15, d12, d5[3] \n"// sum7 += (a22-a28) * k78 // save s32 to memory "sub %12, %12, #72 \n" "vst1.s32 {d16-d17}, [%1]! \n"// out0 "vst1.s32 {d18-d19}, [%2]! \n"// out1 "vst1.s32 {d20-d21}, [%3]! \n"// out2 "vst1.s32 {d22-d23}, [%4]! \n"// out3 "subs %0, #1 \n" "vst1.s32 {d24-d25}, [%5]! \n"// out4 "vst1.s32 {d26-d27}, [%6]! \n"// out5 "vst1.s32 {d28-d29}, [%7]! \n"// out6 "vst1.s32 {d30-d31}, [%8]! \n"// out7 "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(ktmp) // %12 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(ktmp) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ int8x8_t _r0_s8 = vld1_s8(r0);// (a00 a01 a02 ....) int8x8_t _r1_s8 = vld1_s8(r1);// (a10 a11 a12 ....) int8x8_t _r2_s8 = vld1_s8(r2);// (a20 a21 a22 ....) int16x8_t _r0 = vmovl_s8(_r0_s8); int16x8_t _r1 = vmovl_s8(_r1_s8); int16x8_t _r2 = vmovl_s8(_r2_s8); int32x4_t _sum03, _sum47; _sum03 = vld1q_lane_s32(outptr0, _sum03, 0);// out0 _sum03 = vld1q_lane_s32(outptr1, _sum03, 1);// out1 _sum03 = vld1q_lane_s32(outptr2, _sum03, 2);// out2 _sum03 = vld1q_lane_s32(outptr3, _sum03, 3);// out3 _sum47 = vld1q_lane_s32(outptr4, _sum47, 0);// out4 _sum47 = vld1q_lane_s32(outptr5, _sum47, 1);// out5 _sum47 = vld1q_lane_s32(outptr6, _sum47, 2);// out6 _sum47 = vld1q_lane_s32(outptr7, _sum47, 3);// out7 // k0 - k2 int8x8_t _k0_8 = vld1_s8(ktmp); //(k00-k70) int8x8_t _k1_8 = vld1_s8(ktmp+8); //(k01-k71) int8x8_t _k2_8 = vld1_s8(ktmp+16); //(k02-k72) int16x8_t _k0 = vmovl_s8(_k0_8); int16x8_t _k1 = vmovl_s8(_k1_8); int16x8_t _k2 = vmovl_s8(_k2_8); int32x4_t _sum0 = vmull_laneq_s16(vget_low_s16(_k0), _r0, 0); int32x4_t _sum0n = vmull_laneq_s16(vget_high_s16(_k0), _r0, 0); int32x4_t _sum1 = vmull_laneq_s16(vget_low_s16(_k1), _r0, 1); int32x4_t _sum1n = vmull_laneq_s16(vget_high_s16(_k1), _r0, 1); _sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r0, 2); _sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r0, 2); // k3 - k5 _k0_8 = vld1_s8(ktmp+24); //(k03-k73) _k1_8 = vld1_s8(ktmp+32); //(k04-k74) _k2_8 = vld1_s8(ktmp+40); //(k05-k75) _k0 = vmovl_s8(_k0_8); _k1 = vmovl_s8(_k1_8); _k2 = vmovl_s8(_k2_8); _sum0 = vmlal_laneq_s16(_sum0, vget_low_s16(_k0), _r1, 0); _sum0n = vmlal_laneq_s16(_sum0n, vget_high_s16(_k0), _r1, 0); _sum1 = vmlal_laneq_s16(_sum1, vget_low_s16(_k1), _r1, 1); _sum1n = vmlal_laneq_s16(_sum1n, vget_high_s16(_k1), _r1, 1); _sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r1, 2); _sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r1, 2); // k6 - k8 _k0_8 = vld1_s8(ktmp+48); //(k06-k76) _k1_8 = vld1_s8(ktmp+56); //(k07-k77) _k2_8 = vld1_s8(ktmp+64); //(k08-k78) _k0 = vmovl_s8(_k0_8); _k1 = vmovl_s8(_k1_8); _k2 = vmovl_s8(_k2_8); _sum0 = vmlal_laneq_s16(_sum0, vget_low_s16(_k0), _r2, 0); _sum0n = vmlal_laneq_s16(_sum0n, vget_high_s16(_k0), _r2, 0); _sum1 = vmlal_laneq_s16(_sum1, vget_low_s16(_k1), _r2, 1); _sum1n = vmlal_laneq_s16(_sum1n, vget_high_s16(_k1), _r2, 1); _sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r2, 2); _sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r2, 2); _sum0 = vaddq_s32(_sum0, _sum1); _sum0n = vaddq_s32(_sum0n, _sum1n); _sum03 = vaddq_s32(_sum03, _sum0); _sum47 = vaddq_s32(_sum47, _sum0n); vst1q_lane_s32(outptr0, _sum03, 0); vst1q_lane_s32(outptr1, _sum03, 1); vst1q_lane_s32(outptr2, _sum03, 2); vst1q_lane_s32(outptr3, _sum03, 3); vst1q_lane_s32(outptr4, _sum47, 0); vst1q_lane_s32(outptr5, _sum47, 1); vst1q_lane_s32(outptr6, _sum47, 2); vst1q_lane_s32(outptr7, _sum47, 3); outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; #else // __aarch64__ asm volatile( "pld [%8, #64] \n" "vld1.s8 {d0}, [%8] \n"// d0(a00 a01 a02 ....) "pld [%9, #64] \n" "vld1.s8 {d2}, [%9] \n"// d2(a10 a11 a12 ....) "pld [%10, #64] \n" "vld1.s8 {d4}, [%10] \n"// d4(a20 a21 a22 ....) "pld [%11, #64] \n" "vld1.s8 {d6-d8}, [%11]! \n"// d6(k00-k70) d7(k01-k71) d8(k02-k72) "vmovl.s8 q0, d0 \n"// d0(a00 a01 a02 x) "vmovl.s8 q1, d2 \n"// d2(a10 a11 a12 x) "vmovl.s8 q2, d4 \n"// d4(a20 a21 a22 x) "vmovl.s8 q5, d8 \n"// d10(k02-k32) d11(k42-k72) "vmovl.s8 q4, d7 \n"// d8(k01-k31) d9(k41-k71) "vmovl.s8 q3, d6 \n"// d6(k00-k30) d7(k40-k70) "vld1.s32 {d20[0]}, [%0] \n"// out0 q10 "vld1.s32 {d20[1]}, [%1] \n"// out1 "vld1.s32 {d21[0]}, [%2] \n"// out2 "vld1.s32 {d21[1]}, [%3] \n"// out3 "pld [%11, #64] \n" "vld1.s8 {d24-d26}, [%11]! \n" "vmovl.s8 q14, d26 \n"// d28(k05-k35) d29(k45-k75) "vmovl.s8 q13, d25 \n"// d26(k04-k34) d27(k44-k74) "vmovl.s8 q12, d24 \n"// d24(k03-k33) d25(k43-k73) "vld1.s32 {d22[0]}, [%4] \n"// out4 q11 "vld1.s32 {d22[1]}, [%5] \n"// out5 "vld1.s32 {d23[0]}, [%6] \n"// out6 "vld1.s32 {d23[1]}, [%7] \n"// out7 "vmull.s16 q6, d6, d0[0] \n"// a00 x (k00-k30) "vmull.s16 q7, d7, d0[0] \n"// a00 x (k40-k70) "vmull.s16 q8, d8, d0[1] \n"// a01 x (k01-k31) "vmull.s16 q9, d9, d0[1] \n"// a01 x (k41-k71) "vmlal.s16 q10, d10, d0[2] \n"// a02 x (k02-k32) "vmlal.s16 q11, d11, d0[2] \n"// a02 x (k42-k72) "pld [%11, #64] \n" "vld1.s8 {d6-d8}, [%11]! \n" "vmovl.s8 q5, d8 \n"// d10(k08-k38) d11(k48-k78) "vmovl.s8 q4, d7 \n"// d8(k07-k37) d9(k47-k77) "vmovl.s8 q3, d6 \n"// d6(k06-k36) d7(k46-k76) "vmlal.s16 q6, d24, d2[0] \n"// a10 x (k03-k33) "vmlal.s16 q7, d25, d2[0] \n"// a10 x (k43-k73) "vmlal.s16 q8, d26, d2[1] \n"// a11 x (k04-k34) "vmlal.s16 q9, d27, d2[1] \n"// a11 x (k44-k74) "vmlal.s16 q10, d28, d2[2] \n"// a12 x (k05-k35) "vmlal.s16 q11, d29, d2[2] \n"// a12 x (k45-k75) "vmlal.s16 q6, d6, d4[0] \n"// a20 x (k06-k36) "vmlal.s16 q7, d7, d4[0] \n"// a20 x (k46-k76) "vmlal.s16 q8, d8, d4[1] \n"// a21 x (k07-k37) "vmlal.s16 q9, d9, d4[1] \n"// a21 x (k47-k77) "vmlal.s16 q10, d10, d4[2] \n"// a22 x (k08-k38) "vmlal.s16 q11, d11, d4[2] \n"// a22 x (k48-k78) "vadd.s32 q8, q8, q6 \n" "vadd.s32 q9, q9, q7 \n" "sub %11, %11, #72 \n" "vadd.s32 q10, q10, q8 \n" "vadd.s32 q11, q11, q9 \n" "vst1.s32 {d20[0]}, [%0]! \n"// out0 "vst1.s32 {d20[1]}, [%1]! \n"// out1 "vst1.s32 {d21[0]}, [%2]! \n"// out2 "vst1.s32 {d21[1]}, [%3]! \n"// out3 "vst1.s32 {d22[0]}, [%4]! \n"// out4 "vst1.s32 {d22[1]}, [%5]! \n"// out5 "vst1.s32 {d23[0]}, [%6]! \n"// out6 "vst1.s32 {d23[1]}, [%7]! \n"// out7 : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(ktmp) // %11 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(r0), "9"(r1), "10"(r2), "11"(ktmp) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else // __ARM_NEON int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int sum4 = 0; int sum5 = 0; int sum6 = 0; int sum7 = 0; sum0 += (int)r0[0] * ktmp[0]; sum1 += (int)r0[0] * ktmp[1]; sum2 += (int)r0[0] * ktmp[2]; sum3 += (int)r0[0] * ktmp[3]; sum4 += (int)r0[0] * ktmp[4]; sum5 += (int)r0[0] * ktmp[5]; sum6 += (int)r0[0] * ktmp[6]; sum7 += (int)r0[0] * ktmp[7]; ktmp += 8; sum0 += (int)r0[1] * ktmp[0]; sum1 += (int)r0[1] * ktmp[1]; sum2 += (int)r0[1] * ktmp[2]; sum3 += (int)r0[1] * ktmp[3]; sum4 += (int)r0[1] * ktmp[4]; sum5 += (int)r0[1] * ktmp[5]; sum6 += (int)r0[1] * ktmp[6]; sum7 += (int)r0[1] * ktmp[7]; ktmp += 8; sum0 += (int)r0[2] * ktmp[0]; sum1 += (int)r0[2] * ktmp[1]; sum2 += (int)r0[2] * ktmp[2]; sum3 += (int)r0[2] * ktmp[3]; sum4 += (int)r0[2] * ktmp[4]; sum5 += (int)r0[2] * ktmp[5]; sum6 += (int)r0[2] * ktmp[6]; sum7 += (int)r0[2] * ktmp[7]; ktmp += 8; sum0 += (int)r1[0] * ktmp[0]; sum1 += (int)r1[0] * ktmp[1]; sum2 += (int)r1[0] * ktmp[2]; sum3 += (int)r1[0] * ktmp[3]; sum4 += (int)r1[0] * ktmp[4]; sum5 += (int)r1[0] * ktmp[5]; sum6 += (int)r1[0] * ktmp[6]; sum7 += (int)r1[0] * ktmp[7]; ktmp += 8; sum0 += (int)r1[1] * ktmp[0]; sum1 += (int)r1[1] * ktmp[1]; sum2 += (int)r1[1] * ktmp[2]; sum3 += (int)r1[1] * ktmp[3]; sum4 += (int)r1[1] * ktmp[4]; sum5 += (int)r1[1] * ktmp[5]; sum6 += (int)r1[1] * ktmp[6]; sum7 += (int)r1[1] * ktmp[7]; ktmp += 8; sum0 += (int)r1[2] * ktmp[0]; sum1 += (int)r1[2] * ktmp[1]; sum2 += (int)r1[2] * ktmp[2]; sum3 += (int)r1[2] * ktmp[3]; sum4 += (int)r1[2] * ktmp[4]; sum5 += (int)r1[2] * ktmp[5]; sum6 += (int)r1[2] * ktmp[6]; sum7 += (int)r1[2] * ktmp[7]; ktmp += 8; sum0 += (int)r2[0] * ktmp[0]; sum1 += (int)r2[0] * ktmp[1]; sum2 += (int)r2[0] * ktmp[2]; sum3 += (int)r2[0] * ktmp[3]; sum4 += (int)r2[0] * ktmp[4]; sum5 += (int)r2[0] * ktmp[5]; sum6 += (int)r2[0] * ktmp[6]; sum7 += (int)r2[0] * ktmp[7]; ktmp += 8; sum0 += (int)r2[1] * ktmp[0]; sum1 += (int)r2[1] * ktmp[1]; sum2 += (int)r2[1] * ktmp[2]; sum3 += (int)r2[1] * ktmp[3]; sum4 += (int)r2[1] * ktmp[4]; sum5 += (int)r2[1] * ktmp[5]; sum6 += (int)r2[1] * ktmp[6]; sum7 += (int)r2[1] * ktmp[7]; ktmp += 8; sum0 += (int)r2[2] * ktmp[0]; sum1 += (int)r2[2] * ktmp[1]; sum2 += (int)r2[2] * ktmp[2]; sum3 += (int)r2[2] * ktmp[3]; sum4 += (int)r2[2] * ktmp[4]; sum5 += (int)r2[2] * ktmp[5]; sum6 += (int)r2[2] * ktmp[6]; sum7 += (int)r2[2] * ktmp[7]; ktmp += 8; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; *outptr4 += sum4; *outptr5 += sum5; *outptr6 += sum6; *outptr7 += sum7; ktmp -= 8*9; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } ktmp += 8*9; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char* ktmp = _kernel.channel(p/8 + p%8); for (int q=0; q<inch; q++) { int* outptr = out; const signed char* img0 = bottom_blob.channel(q); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w*2; int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "ld1 {v0.8b, v1.8b}, [%5] \n"//ktmp "ld2 {v2.8b, v3.8b}, [%2], #16 \n"//r0-r2 "ld2 {v4.8b, v5.8b}, [%2] \n" "ld2 {v6.8b, v7.8b}, [%3], #16 \n"//r3-r5 "ld2 {v8.8b, v9.8b}, [%3] \n" "ld2 {v10.8b, v11.8b}, [%4], #16 \n"//r6-r8 "ld2 {v12.8b, v13.8b}, [%4] \n" "ld1 {v14.4s, v15.4s}, [%1] \n"//out0 "ext v4.8b, v2.8b, v4.8b, #1 \n" "ext v8.8b, v6.8b, v8.8b, #1 \n" "ext v12.8b, v10.8b, v12.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k0-k7) "sshll v1.8h, v1.8b, #0 \n"//(k8) "sshll v2.8h, v2.8b, #0 \n"// r0 "sshll v3.8h, v3.8b, #0 \n"// r1 "sshll v4.8h, v4.8b, #0 \n"// r2 "sshll v6.8h, v6.8b, #0 \n"// r3 "sshll v7.8h, v7.8b, #0 \n"// r4 "sshll v8.8h, v8.8b, #0 \n"// r5 "sshll v10.8h, v10.8b, #0 \n"// r6 "sshll v11.8h, v11.8b, #0 \n"// r7 "sshll v12.8h, v12.8b, #0 \n"// r8 // r0 "smull v16.4s, v2.4h, v0.h[0] \n"// out = r0*k0 "smull2 v17.4s, v2.8h, v0.h[0] \n" "smull v18.4s, v3.4h, v0.h[1] \n"// outn = r1*k1 "smull2 v19.4s, v3.8h, v0.h[1] \n" "smlal v16.4s, v4.4h, v0.h[2] \n"// out = r2*k2 "smlal2 v17.4s, v4.8h, v0.h[2] \n" "smlal v18.4s, v6.4h, v0.h[3] \n"// outn = r3*k3 "smlal2 v19.4s, v6.8h, v0.h[3] \n" "smlal v16.4s, v7.4h, v0.h[4] \n"// out = r4*k4 "smlal2 v17.4s, v7.8h, v0.h[4] \n" "smlal v18.4s, v8.4h, v0.h[5] \n"// outn = r5*k5 "smlal2 v19.4s, v8.8h, v0.h[5] \n" "smlal v16.4s, v10.4h, v0.h[6] \n"// out = r6*k6 "smlal2 v17.4s, v10.8h, v0.h[6] \n" "smlal v18.4s, v11.4h, v0.h[7] \n"// outn = r7*k7 "smlal2 v19.4s, v11.8h, v0.h[7] \n" "smlal v16.4s, v12.4h, v1.h[0] \n"// out = r8*k8 "smlal2 v17.4s, v12.8h, v1.h[0] \n" "add v8.4s, v16.4s, v18.4s \n" "add v9.4s, v17.4s, v19.4s \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(ktmp) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(ktmp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); } #else if (nn > 0) { asm volatile( "vld1.s8 {d0-d1}, [%5] \n"// d0(k0 - k7) d1(k8 ...) "vmovl.s8 q1, d1 \n"// d2(k8 ...) "vmovl.s8 q0, d0 \n"// d0(k0 - k3) d1(k4 - k7) "0: \n" "pld [%2, #192] \n" "vld2.s8 {d4-d5}, [%2]! \n"// r0 d4(a00 a02 ... a014) d5(a01 a03 ... a015) "vld2.s8 {d8-d9}, [%2] \n"// d8(a016 ....) "vld2.s8 {d10-d11}, [%3]! \n"// r1 d10(a10 a12 ... a114) d11(a11 a13 ... a115) "vld2.s8 {d14-d15}, [%3] \n"// d14(a116 ....) "vld2.s8 {d16-d17}, [%4]! \n"// r2 d16(a20 a22 ... a214) d17(a21 a23 ... a215) "vld2.s8 {d20-d21}, [%4] \n"// d20(a216 ....) "vld1.s32 {d22-d25}, [%1] \n"// q11(out0 - out3) q12(out4 - out7) "vext.s8 d8, d4, d8, #1 \n"// d8(a02 a04 ... a016) "vext.s8 d14, d10, d14, #1 \n"// d14(a12 a14 ... a116) "vext.s8 d20, d16, d20, #1 \n"// d20(a22 a24 ... a216) "vmovl.s8 q3, d5 \n"// q3(a01 a03 ... a015) "vmovl.s8 q2, d4 \n"// q2(a00 a02 ... a014) "vmovl.s8 q4, d8 \n"// q4(a02 a04 ... a016) "vmovl.s8 q6, d11 \n"// q6(a11 a13 ... a115) "vmovl.s8 q5, d10 \n"// q5(a10 a12 ... a114) "vmovl.s8 q7, d14 \n"// q7(a12 a14 ... a116) "vmovl.s8 q9, d17 \n"// q9(a21 a23 ... a215) "vmovl.s8 q8, d16 \n"// q8(a20 a22 ... a214) "vmovl.s8 q10, d20 \n"// q10(a22 a24 ... a216) "vmlal.s16 q11, d4, d0[0] \n"// k0 "vmlal.s16 q12, d5, d0[0] \n" "vmull.s16 q13, d6, d0[1] \n"// k1 "vmull.s16 q14, d7, d0[1] \n" "vmlal.s16 q11, d8, d0[2] \n"// k2 "vmlal.s16 q12, d9, d0[2] \n" "vmlal.s16 q13, d12, d1[0] \n"// k4 "vmlal.s16 q14, d13, d1[0] \n" "vmlal.s16 q11, d10, d0[3] \n"// k3 "vmlal.s16 q12, d11, d0[3] \n" "vmlal.s16 q13, d14, d1[1] \n"// k5 "vmlal.s16 q14, d15, d1[1] \n" "vmlal.s16 q11, d16, d1[2] \n"// k6 "vmlal.s16 q12, d17, d1[2] \n" "vmlal.s16 q13, d18, d1[3] \n"// k7 "vmlal.s16 q14, d19, d1[3] \n" "vmlal.s16 q11, d20, d2[0] \n"// k8 "vmlal.s16 q12, d21, d2[0] \n" "vadd.s32 q11, q11, q13 \n" "vadd.s32 q12, q12, q14 \n" "vst1.32 {d22-d25}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(ktmp) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(ktmp) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON if (remain > 0) { #if __ARM_NEON int8x8_t _k01234567s8 = vld1_s8(ktmp); int8x8_t _k8xxxxxxxs8 = vld1_s8(ktmp+8); int8x8_t _k34567xxxs8 = vext_s8(_k01234567s8, _k01234567s8, 3); int8x8_t _k678xxxxxs8 = vext_s8(_k01234567s8, _k8xxxxxxxs8, 6); int16x8_t _k0123_s16 = vmovl_s8(_k01234567s8); int16x8_t _k3456_s16 = vmovl_s8(_k34567xxxs8); int16x8_t _k678x_s16 = vmovl_s8(_k678xxxxxs8); #endif for (; remain>0; remain--) { #if __ARM_NEON int8x8_t _r00s8 = vld1_s8(r0); int8x8_t _r10s8 = vld1_s8(r1); int8x8_t _r20s8 = vld1_s8(r2); int16x8_t _r00s16 = vmovl_s8(_r00s8); int16x8_t _r10s16 = vmovl_s8(_r10s8); int16x8_t _r20s16 = vmovl_s8(_r20s8); int32x4_t _sum = vmull_s16(vget_low_s16(_r00s16), vget_low_s16(_k0123_s16)); _sum = vmlal_s16(_sum, vget_low_s16(_r10s16), vget_low_s16(_k3456_s16)); _sum = vmlal_s16(_sum, vget_low_s16(_r20s16), vget_low_s16(_k678x_s16)); _sum = vsetq_lane_s32(*outptr, _sum, 3); #if __aarch64__ *outptr = vaddvq_s32(_sum); #else int32x2_t _ss = vadd_s32(vget_low_s32(_sum), vget_high_s32(_sum)); _ss = vpadd_s32(_ss, _ss); *outptr = vget_lane_s32(_ss, 0); #endif // __aarch64__ #else int sum = 0; sum += (int)r0[0] * ktmp[0]; sum += (int)r0[1] * ktmp[1]; sum += (int)r0[2] * ktmp[2]; sum += (int)r1[0] * ktmp[3]; sum += (int)r1[1] * ktmp[4]; sum += (int)r1[2] * ktmp[5]; sum += (int)r2[0] * ktmp[6]; sum += (int)r2[1] * ktmp[7]; sum += (int)r2[2] * ktmp[8]; *outptr += sum; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr++; } } r0 += tailstep; r1 += tailstep; r2 += tailstep; } ktmp += 9; } } } static void conv3x3s1_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 1; int stride_h = 1; conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt); } static void conv3x3s2_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 2; int stride_h = 2; conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt); }
GB_binop__rdiv_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_int16) // A.*B function (eWiseMult): GB (_AemultB_08__rdiv_int16) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int16) // A.*B function (eWiseMult): GB (_AemultB_04__rdiv_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int16) // A*D function (colscale): GB (_AxD__rdiv_int16) // D*A function (rowscale): GB (_DxB__rdiv_int16) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int16) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int16) // C=scalar+B GB (_bind1st__rdiv_int16) // C=scalar+B' GB (_bind1st_tran__rdiv_int16) // C=A+scalar GB (_bind2nd__rdiv_int16) // C=A'+scalar GB (_bind2nd_tran__rdiv_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 16) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_SIGNED (y, x, 16) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_INT16 || GxB_NO_RDIV_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rdiv_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_SIGNED (bij, x, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_SIGNED (y, aij, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (aij, x, 16) ; \ } GrB_Info GB (_bind1st_tran__rdiv_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (y, aij, 16) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB049-fprintf-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Example use of fprintf */ #include <stdio.h> #include <omp.h> int main(int argc,char *argv[]) { int i; int ret; FILE *pfile; int len = 1000; int A[1000]; #pragma omp parallel for private (i) for (i = 0; i <= len - 1; i += 1) { A[i] = i; } pfile = fopen("mytempfile.txt","a+"); if (pfile == ((void *)0)) { fprintf(stderr,"Error in fopen()\n"); } for (i = 0; i <= len - 1; i += 1) { fprintf(pfile,"%d\n",A[i]); } fclose(pfile); ret = remove("mytempfile.txt"); if (ret != 0) { fprintf(stderr,"Error: unable to delete mytempfile.txt\n"); } return 0; }
9785.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) { int t10; int t8; int t6; int t4; int t2; for (t2 = 0; t2 <= tmax - 1; t2 += 1) { for (t4 = 0; t4 <= ny - 1; t4 += 1) ey[0][t4] = _fict_[t2]; #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 1; t4 <= nx - 1; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < nx - 1 ? t4 + 31 : nx - 1); t6 += 1) for (t8 = 0; t8 <= ny - 1; t8 += 128) for (t10 = t8; t10 <= (ny - 1 < t8 + 127 ? ny - 1 : t8 + 127); t10 += 1) ey[t6][t10] = ey[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6 - 1][t10]); #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 0; t4 <= nx - 1; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < nx - 1 ? t4 + 31 : nx - 1); t6 += 1) for (t8 = 1; t8 <= ny - 1; t8 += 128) for (t10 = t8; t10 <= (ny - 1 < t8 + 127 ? ny - 1 : t8 + 127); t10 += 1) ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]); #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 0; t4 <= nx - 2; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < nx - 2 ? t4 + 31 : nx - 2); t6 += 1) for (t8 = 0; t8 <= ny - 2; t8 += 128) for (t10 = t8; t10 <= (ny - 2 < t8 + 127 ? ny - 2 : t8 + 127); t10 += 1) hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]); } }
GB_binop__lor_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lor_int16 // A.*B function (eWiseMult): GB_AemultB__lor_int16 // A*D function (colscale): GB_AxD__lor_int16 // D*A function (rowscale): GB_DxB__lor_int16 // C+=B function (dense accum): GB_Cdense_accumB__lor_int16 // C+=b function (dense accum): GB_Cdense_accumb__lor_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lor_int16 // C=scalar+B GB_bind1st__lor_int16 // C=scalar+B' GB_bind1st_tran__lor_int16 // C=A+scalar GB_bind2nd__lor_int16 // C=A'+scalar GB_bind2nd_tran__lor_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ((x != 0) || (y != 0)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_INT16 || GxB_NO_LOR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lor_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lor_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lor_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lor_int16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lor_int16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__lor_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lor_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lor_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = Bx [p] ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lor_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = Ax [p] ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB_bind1st_tran__lor_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB_bind2nd_tran__lor_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ceil_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: qtang@openailab.com */ #include "sys_port.h" #include "module.h" #include "tengine_errno.h" #include "tengine_log.h" #include "tengine_ir.h" #include "../../cpu_node_ops.h" #include "tengine_op.h" #include <math.h> int ref_ceil_fp32(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, int num_thread) { // dims size = 2 or 3 if (input_tensor->dim_num < 4) { float* input_data = input_tensor->data; float* out_data = output_tensor->data; int total_size = input_tensor->elem_num; for (int i = 0; i < total_size; i++) { input_data[i] = ceil(out_data[i]); } return 0; } // dims size 3 else if (input_tensor->dim_num == 4) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; float* input_data = input_tensor->data; float* out_data = output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = ceil(src[i]); } } return 0; } return -1; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { // exec_node->inplace_map[0] = 0; // exec_node->inplace_map[1] = 0; // exec_node->inplace_map_num = 1; return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { // exec_node->inplace_map_num = 0; return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor; struct ir_tensor* output_tensor; int layout = ir_graph->graph_layout; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); // inplace inference // if(input_tensor->data != output_tensor->data) // { // TLOG_ERR("input and output are not the same mem\n"); // set_tengine_errno(EFAULT); // return -1; // } int ret = ref_ceil_fp32(input_tensor, output_tensor, exec_graph->num_thread); if (ret != 0) return -1; return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; static int reg_ceil_hcl_ops(void* arg) { return register_builtin_node_ops(OP_CEIL, &hcl_node_ops); } static int unreg_ceil_hcl_ops(void* arg) { return unregister_builtin_node_ops(OP_CEIL, &hcl_node_ops); } AUTO_REGISTER_OPS(reg_ceil_hcl_ops); AUTO_UNREGISTER_OPS(unreg_ceil_hcl_ops);
9672.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i*(j+1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i*(j+3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i*(j+2)) / nk; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl), DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j, k; #pragma scop #pragma omp parallel private (j, k) num_threads(2) { /* E := A*B */ #pragma omp for schedule(dynamic, 1) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } /* F := C*D */ #pragma omp for schedule(dynamic, 1) for (i = 0; i < _PB_NJ; i++) for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } /* G := E*F */ #pragma omp for schedule(dynamic, 1) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
ex02.c
#include <stdio.h> #include <omp.h> static long num_steps = 1000000; double step; int main(int argv, char* argc) { int num_threads; double pi, total_sum = 0.0; step = 1.0 / (double) num_steps; int num_procs = omp_get_num_procs(); // omp_set_num_threads(num_procs); double* sum; int steps_per_thread; // int num_threads = omp_get_num_threads(); // Sequential section always returns 1 thread -> Move to parallel section double startTime = omp_get_wtime(); #pragma omp parallel { #pragma omp single { num_threads = omp_get_num_threads(); steps_per_thread = num_steps / num_threads; sum = (double*) malloc(sizeof(double) * num_threads); printf ("Found %d CPUs. Using %d threads and computing %d steps per thread.\n", num_procs, num_threads, steps_per_thread); // Implicit barrier at the end } int i, id = omp_get_thread_num(); printf("Executing thread %d out of %d\n", id, num_threads); double x; for (i = id * steps_per_thread; i < (id + 1) * steps_per_thread; i++) { x = (i + 0.5) * step; sum[id] += 4.0 / (1.0 + x * x); } } int i; for (i = 0; i < num_procs; i++) total_sum += sum[i]; pi = step * total_sum; double endTime = omp_get_wtime(); printf ("Computed integral: %f\n", pi); printf ("Time elapsed: %f secs\n", (endTime - startTime)); return 0; } int main_serial(int argv, char* argc) { int i; double x, pi, sum = 0.0; step = 1.0 / (double) num_steps; for (i = 0; i < num_steps; i++) { x = (i + 0.5) * step; sum = sum + 4.0 / (1.0 + x * x); } pi = step * sum; printf ("Computed integral: %f\n", pi); return 0; }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 4; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
convolution_5x5.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv5x5s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*25 + q*25; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* r4 = img0 + w*4; const float* r5 = img0 + w*5; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k4567 = vld1q_f32(kernel0+4); float32x4_t _k891011 = vld1q_f32(kernel0+8); float32x4_t _k12131415 = vld1q_f32(kernel0+12); float32x4_t _k16171819 = vld1q_f32(kernel0+16); float32x4_t _k20212223 = vld1q_f32(kernel0+20); float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]); #endif // __ARM_NEON int i = 0; for (; i+1 < outh; i+=2) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum = vdupq_n_f32(0.f); float32x4_t _sum2 = vdupq_n_f32(0.f); float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r04 = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r04, 1); float32x4_t _r02 = vextq_f32(_r00, _r04, 2); float32x4_t _r03 = vextq_f32(_r00, _r04, 3); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r14 = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r14, 1); float32x4_t _r12 = vextq_f32(_r10, _r14, 2); float32x4_t _r13 = vextq_f32(_r10, _r14, 3); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r24 = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r24, 1); float32x4_t _r22 = vextq_f32(_r20, _r24, 2); float32x4_t _r23 = vextq_f32(_r20, _r24, 3); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r34 = vld1q_f32(r3 + 4); float32x4_t _r31 = vextq_f32(_r30, _r34, 1); float32x4_t _r32 = vextq_f32(_r30, _r34, 2); float32x4_t _r33 = vextq_f32(_r30, _r34, 3); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r44 = vld1q_f32(r4 + 4); float32x4_t _r41 = vextq_f32(_r40, _r44, 1); float32x4_t _r42 = vextq_f32(_r40, _r44, 2); float32x4_t _r43 = vextq_f32(_r40, _r44, 3); float32x4_t _r50 = vld1q_f32(r5); float32x4_t _r54 = vld1q_f32(r5 + 4); float32x4_t _r51 = vextq_f32(_r50, _r54, 1); float32x4_t _r52 = vextq_f32(_r50, _r54, 2); float32x4_t _r53 = vextq_f32(_r50, _r54, 3); _sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0); _sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1); _sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2); _sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3); _sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0); _sum = vfmaq_laneq_f32(_sum, _r10, _k4567, 1); _sum = vfmaq_laneq_f32(_sum, _r11, _k4567, 2); _sum = vfmaq_laneq_f32(_sum, _r12, _k4567, 3); _sum = vfmaq_laneq_f32(_sum, _r13, _k891011, 0); _sum = vfmaq_laneq_f32(_sum, _r14, _k891011, 1); _sum = vfmaq_laneq_f32(_sum, _r20, _k891011, 2); _sum = vfmaq_laneq_f32(_sum, _r21, _k891011, 3); _sum = vfmaq_laneq_f32(_sum, _r22, _k12131415, 0); _sum = vfmaq_laneq_f32(_sum, _r23, _k12131415, 1); _sum = vfmaq_laneq_f32(_sum, _r24, _k12131415, 2); _sum = vfmaq_laneq_f32(_sum, _r30, _k12131415, 3); _sum = vfmaq_laneq_f32(_sum, _r31, _k16171819, 0); _sum = vfmaq_laneq_f32(_sum, _r32, _k16171819, 1); _sum = vfmaq_laneq_f32(_sum, _r33, _k16171819, 2); _sum = vfmaq_laneq_f32(_sum, _r34, _k16171819, 3); _sum = vfmaq_laneq_f32(_sum, _r40, _k20212223, 0); _sum = vfmaq_laneq_f32(_sum, _r41, _k20212223, 1); _sum = vfmaq_laneq_f32(_sum, _r42, _k20212223, 2); _sum = vfmaq_laneq_f32(_sum, _r43, _k20212223, 3); _sum = vfmaq_laneq_f32(_sum, _r44, _k24242424, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r10, _k0123, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r11, _k0123, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r12, _k0123, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r13, _k0123, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r14, _k4567, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r20, _k4567, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r21, _k4567, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r22, _k4567, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r23, _k891011, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r24, _k891011, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r30, _k891011, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r31, _k891011, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r32, _k12131415, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r33, _k12131415, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r34, _k12131415, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r40, _k12131415, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r41, _k16171819, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r42, _k16171819, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r43, _k16171819, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r44, _k16171819, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r50, _k20212223, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r51, _k20212223, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r52, _k20212223, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r53, _k20212223, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r54, _k24242424, 0); vst1q_f32(outptr, _sum); vst1q_f32(outptr2, _sum2); r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; outptr += 4; outptr2 += 4; } #else if (nn > 0) { asm volatile( // "veor q13, q13 \n" // "veor q14, q14 \n" "pld [%1, #128] \n" "vld1.f32 {d14-d15}, [%1] \n"// q7 = out "0: \n" // q11 = rx1 / rx3 // q12 = rx2 // q13 q14 = intermediate sum register "pld [%2, #128] \n" "vld1.f32 {d16-d17}, [%2] \n"// q8 = out2 "pld [%4, #256] \n" // r1 "vld1.f32 {d18-d21}, [%4] \n"// q9 q10 = r10 r14 "add %4, #16 \n" "vext.32 q11, q9, q10, #1 \n"// r11 "vmul.f32 q13, q9, %e19[1] \n" "vmla.f32 q8, q9, %e18[0] \n" "vext.32 q12, q9, q10, #2 \n"// r12 "vmla.f32 q7, q11, %f19[0] \n" "vmul.f32 q14, q11, %e18[1] \n" "vext.32 q11, q9, q10, #3 \n"// r13 "vmla.f32 q13, q12, %f19[1] \n" "vmla.f32 q8, q12, %f18[0] \n" "vmla.f32 q7, q11, %e20[0] \n" "vmla.f32 q14, q11, %f18[1] \n" "pld [%5, #256] \n" "vmla.f32 q13, q10, %e20[1] \n" "vmla.f32 q8, q10, %e19[0] \n" // r2 "vld1.f32 {d18-d21}, [%5] \n"// q9 q10 = r20 r24 "add %5, #16 \n" "vext.32 q11, q9, q10, #1 \n"// r21 "vmla.f32 q7, q9, %f20[0] \n" "vmla.f32 q14, q9, %e19[1] \n" "vext.32 q12, q9, q10, #2 \n"// r22 "vmla.f32 q13, q11, %f20[1] \n" "vmla.f32 q8, q11, %f19[0] \n" "vext.32 q11, q9, q10, #3 \n"// r23 "vmla.f32 q7, q12, %e21[0] \n" "vmla.f32 q14, q12, %f19[1] \n" "vmla.f32 q13, q11, %e21[1] \n" "vmla.f32 q8, q11, %e20[0] \n" "pld [%6, #256] \n" "vmla.f32 q7, q10, %f21[0] \n" "vmla.f32 q14, q10, %e20[1] \n" // r3 "vld1.f32 {d18-d21}, [%6] \n"// q9 q10 = r30 r34 "add %6, #16 \n" "vext.32 q11, q9, q10, #1 \n"// r31 "vmla.f32 q13, q9, %f21[1] \n" "vmla.f32 q8, q9, %f20[0] \n" "vext.32 q12, q9, q10, #2 \n"// r32 "vmla.f32 q7, q11, %e22[0] \n" "vmla.f32 q14, q11, %f20[1] \n" "vext.32 q11, q9, q10, #3 \n"// r33 "vmla.f32 q13, q12, %e22[1] \n" "vmla.f32 q8, q12, %e21[0] \n" "vmla.f32 q7, q11, %f22[0] \n" "vmla.f32 q14, q11, %e21[1] \n" "pld [%7, #256] \n" "vmla.f32 q13, q10, %f22[1] \n" "vmla.f32 q8, q10, %f21[0] \n" // r4 "vld1.f32 {d18-d21}, [%7] \n"// q9 q10 = r40 r44 "add %7, #16 \n" "vext.32 q11, q9, q10, #1 \n"// r41 "vmla.f32 q7, q9, %e23[0] \n" "vmla.f32 q14, q9, %f21[1] \n" "vext.32 q12, q9, q10, #2 \n"// r42 "vmla.f32 q13, q11, %e23[1] \n" "vmla.f32 q8, q11, %e22[0] \n" "vext.32 q11, q9, q10, #3 \n"// r43 "vmla.f32 q7, q12, %f23[0] \n" "vmla.f32 q14, q12, %e22[1] \n" "vmla.f32 q13, q11, %f23[1] \n" "vmla.f32 q8, q11, %f22[0] \n" "pld [%3, #256] \n" "vmla.f32 q7, q10, %e24[0] \n" "vmla.f32 q14, q10, %f22[1] \n" // r0 and r5 "vld1.f32 {d18-d21}, [%3] \n"// q9 q10 = r00 r04 "add %3, #16 \n" "vext.32 q11, q9, q10, #1 \n"// r01 "vmla.f32 q13, q11, %e18[1] \n" "vext.32 q12, q9, q10, #2 \n"// r02 "vmla.f32 q7, q12, %f18[0] \n" "vext.32 q11, q9, q10, #3 \n"// r03 "pld [%8, #256] \n" "vmla.f32 q13, q11, %f18[1] \n" // r5 "vld1.f32 {d22-d25}, [%8] \n"// q11 q12 = r50 r54 "add %8, #16 \n" "vmla.f32 q8, q11, %e23[0] \n" "vmla.f32 q14, q12, %e24[0] \n" "vmla.f32 q7, q9, %e18[0] \n" "vmla.f32 q13, q10, %e19[0] \n" "vext.32 q9, q11, q12, #1 \n"// r51 "vext.32 q10, q11, q12, #2 \n"// r52 "vmla.f32 q14, q9, %e23[1] \n" "vext.32 q9, q11, q12, #3 \n"// r53 "vmla.f32 q8, q10, %f23[0] \n" "vmla.f32 q14, q9, %f23[1] \n" "vadd.f32 q7, q7, q13 \n" // "veor q13, q13 \n" "vst1.f32 {d14-d15}, [%1]! \n" "vadd.f32 q8, q8, q14 \n" "pld [%1, #128] \n" "vld1.f32 {d14-d15}, [%1] \n"// q7 = out // "veor q14, q14 \n" "vst1.f32 {d16-d17}, [%2]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3), // %6 "=r"(r4), // %7 "=r"(r5) // %8 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "7"(r4), "8"(r5), "w"(_k0123), // %18 "w"(_k4567), // %19 "w"(_k891011), // %20 "w"(_k12131415), // %21 "w"(_k16171819), // %22 "w"(_k20212223), // %23 "w"(_k24242424) // %24 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = 0; float sum2 = 0; #if __ARM_NEON float32x4_t _r1 = vld1q_f32(r1); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _sum = vmulq_f32(_r1, _k1); float32x4_t _sum2 = vmulq_f32(_r1, _k0123); float32x4_t _r2 = vld1q_f32(r2); float32x4_t _k2 = vld1q_f32(k2); _sum = vmlaq_f32(_sum, _r2, _k2); _sum2 = vmlaq_f32(_sum2, _r2, _k1); float32x4_t _r3 = vld1q_f32(r3); _sum = vmlaq_f32(_sum, _r3, _k20212223); _sum2 = vmlaq_f32(_sum2, _r3, _k2); float32x4_t _r4 = vld1q_f32(r4); float32x4_t _k4 = vld1q_f32(k4); _sum = vmlaq_f32(_sum, _r4, _k4); _sum2 = vmlaq_f32(_sum2, _r4, _k20212223); float32x4_t _r0 = vld1q_f32(r0); _sum = vmlaq_f32(_sum, _r0, _k0123); float32x4_t _r5 = vld1q_f32(r5); _sum2 = vmlaq_f32(_sum2, _r5, _k4); float32x4_t _k_t4; _k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0); _k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1); _k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2); _k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3); float32x4_t _r_t4; _r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0); _r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1); _r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2); _r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3); _sum = vmlaq_f32(_sum, _r_t4, _k_t4); sum = r4[4] * k4[4]; _r_t4 = vextq_f32(_r_t4, _r_t4, 1); _r_t4 = vsetq_lane_f32(r4[4], _r_t4, 3); _sum2 = vmlaq_f32(_sum2, _r_t4, _k_t4); sum2 = r5[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2)); float32x2_t _ss_ss2 = vpadd_f32(_ss, _ss2); sum += vget_lane_f32(_ss_ss2, 0); sum2 += vget_lane_f32(_ss_ss2, 1); #else sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r1[3] * k0[3]; sum2 += r1[4] * k0[4]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r2[3] * k1[3]; sum2 += r2[4] * k1[4]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; sum2 += r3[3] * k2[3]; sum2 += r3[4] * k2[4]; sum2 += r4[0] * k3[0]; sum2 += r4[1] * k3[1]; sum2 += r4[2] * k3[2]; sum2 += r4[3] * k3[3]; sum2 += r4[4] * k3[4]; sum2 += r5[0] * k4[0]; sum2 += r5[1] * k4[1]; sum2 += r5[2] * k4[2]; sum2 += r5[3] * k4[3]; sum2 += r5[4] * k4[4]; #endif // __ARM_NEON *outptr += sum; *outptr2 += sum2; r0++; r1++; r2++; r3++; r4++; r5++; outptr++; outptr2++; } r0 += 4 + w; r1 += 4 + w; r2 += 4 + w; r3 += 4 + w; r4 += 4 + w; r5 += 4 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum = vdupq_n_f32(0.f); float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r04 = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r04, 1); float32x4_t _r02 = vextq_f32(_r00, _r04, 2); float32x4_t _r03 = vextq_f32(_r00, _r04, 3); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r14 = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r14, 1); float32x4_t _r12 = vextq_f32(_r10, _r14, 2); float32x4_t _r13 = vextq_f32(_r10, _r14, 3); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r24 = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r24, 1); float32x4_t _r22 = vextq_f32(_r20, _r24, 2); float32x4_t _r23 = vextq_f32(_r20, _r24, 3); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r34 = vld1q_f32(r3 + 4); float32x4_t _r31 = vextq_f32(_r30, _r34, 1); float32x4_t _r32 = vextq_f32(_r30, _r34, 2); float32x4_t _r33 = vextq_f32(_r30, _r34, 3); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r44 = vld1q_f32(r4 + 4); float32x4_t _r41 = vextq_f32(_r40, _r44, 1); float32x4_t _r42 = vextq_f32(_r40, _r44, 2); float32x4_t _r43 = vextq_f32(_r40, _r44, 3); _sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0); _sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1); _sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2); _sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3); _sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0); _sum = vfmaq_laneq_f32(_sum, _r10, _k4567, 1); _sum = vfmaq_laneq_f32(_sum, _r11, _k4567, 2); _sum = vfmaq_laneq_f32(_sum, _r12, _k4567, 3); _sum = vfmaq_laneq_f32(_sum, _r13, _k891011, 0); _sum = vfmaq_laneq_f32(_sum, _r14, _k891011, 1); _sum = vfmaq_laneq_f32(_sum, _r20, _k891011, 2); _sum = vfmaq_laneq_f32(_sum, _r21, _k891011, 3); _sum = vfmaq_laneq_f32(_sum, _r22, _k12131415, 0); _sum = vfmaq_laneq_f32(_sum, _r23, _k12131415, 1); _sum = vfmaq_laneq_f32(_sum, _r24, _k12131415, 2); _sum = vfmaq_laneq_f32(_sum, _r30, _k12131415, 3); _sum = vfmaq_laneq_f32(_sum, _r31, _k16171819, 0); _sum = vfmaq_laneq_f32(_sum, _r32, _k16171819, 1); _sum = vfmaq_laneq_f32(_sum, _r33, _k16171819, 2); _sum = vfmaq_laneq_f32(_sum, _r34, _k16171819, 3); _sum = vfmaq_laneq_f32(_sum, _r40, _k20212223, 0); _sum = vfmaq_laneq_f32(_sum, _r41, _k20212223, 1); _sum = vfmaq_laneq_f32(_sum, _r42, _k20212223, 2); _sum = vfmaq_laneq_f32(_sum, _r43, _k20212223, 3); _sum = vfmaq_laneq_f32(_sum, _r44, _k24242424, 0); vst1q_f32(outptr, _sum); r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; outptr += 4; } #else if (nn > 0) { asm volatile( // "veor q15, q15 \n"// _sum3 = 0; "pld [%1, #128] \n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2] \n"// _r00 = vld1q_f32(r0+j); "add %2, #16 \n" "0: \n" "vld1.f32 {d14-d15}, [%1] \n"// _sum = vld1q_f32(outptr+j); "veor q13, q13 \n"// _sum2 = 0; "veor q14, q14 \n"// _sum3 = 0; "vext.32 q10, q8, q9, #1 \n"// _r01 "vext.32 q11, q8, q9, #2 \n"// _r02 "vext.32 q12, q8, q9, #3 \n"// _r03 "vmla.f32 q7, q8, %e14[0] \n" "vmla.f32 q13, q10, %e14[1] \n" "pld [%3, #256] \n" "vmla.f32 q14, q11, %f14[0] \n" "vmul.f32 q15, q12, %f14[1] \n" "vmla.f32 q7, q9, %e15[0] \n" "vld1.f32 {d16-d19}, [%3] \n" "add %3, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vext.32 q12, q8, q9, #3 \n" "vmla.f32 q7, q8, %e15[1] \n" "vmla.f32 q13, q10, %f15[0] \n" "pld [%4, #256] \n" "vmla.f32 q14, q11, %f15[1] \n" "vmla.f32 q15, q12, %e16[0] \n" "vmla.f32 q7, q9, %e16[1] \n" "vld1.f32 {d16-d19}, [%4] \n" "add %4, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vext.32 q12, q8, q9, #3 \n" "vmla.f32 q7, q8, %f16[0] \n" "vmla.f32 q13, q10, %f16[1] \n" "pld [%5, #256] \n" "vmla.f32 q14, q11, %e17[0] \n" "vmla.f32 q15, q12, %e17[1] \n" "vmla.f32 q7, q9, %f17[0] \n" "vld1.f32 {d16-d19}, [%5] \n" "add %5, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vext.32 q12, q8, q9, #3 \n" "vmla.f32 q7, q8, %f17[1] \n" "vmla.f32 q13, q10, %e18[0] \n" "pld [%6, #256] \n" "vmla.f32 q14, q11, %e18[1] \n" "vmla.f32 q15, q12, %f18[0] \n" "vmla.f32 q7, q9, %f18[1] \n" "vld1.f32 {d16-d19}, [%6] \n" "add %6, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vext.32 q12, q8, q9, #3 \n" "vmla.f32 q7, q8, %e19[0] \n" "vmla.f32 q13, q10, %e19[1] \n" "vmla.f32 q14, q11, %f19[0] \n" "vmla.f32 q15, q12, %f19[1] \n" "vmla.f32 q7, q9, %e20[0] \n" "vadd.f32 q14, q14, q15 \n" "vadd.f32 q7, q7, q13 \n" // "veor q15, q15 \n"// _sum3 = 0; "pld [%2, #256] \n" "vadd.f32 q7, q7, q14 \n" "vld1.f32 {d16-d19}, [%2] \n"// _r00 = vld1q_f32(r0+j); "add %2, #16 \n" "vst1.f32 {d14-d15}, [%1]! \n" "pld [%1, #128] \n" "subs %0, #1 \n" "bne 0b \n" "sub %2, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424) // %20 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = 0; #if __ARM_NEON float32x4_t _r0 = vld1q_f32(r0); float32x4_t _sum = vmulq_f32(_r0, _k0123); float32x4_t _r1 = vld1q_f32(r1); _sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1)); float32x4_t _r2 = vld1q_f32(r2); _sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2)); float32x4_t _r3 = vld1q_f32(r3); _sum = vmlaq_f32(_sum, _r3, _k20212223); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, vld1q_f32(k4)); float32x4_t _k_t4; _k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0); _k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1); _k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2); _k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3); float32x4_t _r_t4; _r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0); _r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1); _r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2); _r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3); _sum = vmlaq_f32(_sum, _r_t4, _k_t4); sum = r4[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); sum += vget_lane_f32(_ss, 0); #else sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; #endif *outptr += sum; r0++; r1++; r2++; r3++; r4++; outptr++; } r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; } } } } static void conv5x5s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*25 + q*25; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* r4 = img0 + w*4; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k4567 = vld1q_f32(kernel0+4); float32x4_t _k891011 = vld1q_f32(kernel0+8); float32x4_t _k12131415 = vld1q_f32(kernel0+12); float32x4_t _k16171819 = vld1q_f32(kernel0+16); float32x4_t _k20212223 = vld1q_f32(kernel0+20); float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]); #endif // __ARM_NEON for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum = vdupq_n_f32(0.f); float32x4x2_t _r00_02461357 = vld2q_f32(r0); float32x4x2_t _r00nx2 = vld2q_f32(r0 + 8); float32x4_t _r0_8101214 = _r00nx2.val[0];// 8 10 12 14 float32x4_t _r0_9111315 = _r00nx2.val[1];// 9 11 13 15 float32x4_t _r00 = _r00_02461357.val[0];// 0 2 4 6 float32x4_t _r01 = _r00_02461357.val[1];// 1 3 5 7 float32x4_t _r02 = vextq_f32(_r00, _r0_8101214, 1);// 2 4 6 8 float32x4_t _r03 = vextq_f32(_r01, _r0_9111315, 1);// 3 5 7 9 float32x4_t _r04 = vextq_f32(_r00, _r0_8101214, 2);// 4 6 8 10 float32x4x2_t _r10_02461357 = vld2q_f32(r1); float32x4x2_t _r10nx2 = vld2q_f32(r1 + 8); float32x4_t _r1_8101214 = _r10nx2.val[0]; float32x4_t _r1_9111315 = _r10nx2.val[1]; float32x4_t _r10 = _r10_02461357.val[0]; float32x4_t _r11 = _r10_02461357.val[1]; float32x4_t _r12 = vextq_f32(_r10, _r1_8101214, 1); float32x4_t _r13 = vextq_f32(_r11, _r1_9111315, 1); float32x4_t _r14 = vextq_f32(_r10, _r1_8101214, 2); float32x4x2_t _r20_02461357 = vld2q_f32(r2); float32x4x2_t _r20nx2 = vld2q_f32(r2 + 8); float32x4_t _r2_8101214 = _r20nx2.val[0]; float32x4_t _r2_9111315 = _r20nx2.val[1]; float32x4_t _r20 = _r20_02461357.val[0]; float32x4_t _r21 = _r20_02461357.val[1]; float32x4_t _r22 = vextq_f32(_r20, _r2_8101214, 1); float32x4_t _r23 = vextq_f32(_r21, _r2_9111315, 1); float32x4_t _r24 = vextq_f32(_r20, _r2_8101214, 2); float32x4x2_t _r30_02461357 = vld2q_f32(r3); float32x4x2_t _r30nx2 = vld2q_f32(r3 + 8); float32x4_t _r3_8101214 = _r30nx2.val[0]; float32x4_t _r3_9111315 = _r30nx2.val[1]; float32x4_t _r30 = _r30_02461357.val[0]; float32x4_t _r31 = _r30_02461357.val[1]; float32x4_t _r32 = vextq_f32(_r30, _r3_8101214, 1); float32x4_t _r33 = vextq_f32(_r31, _r3_9111315, 1); float32x4_t _r34 = vextq_f32(_r30, _r3_8101214, 2); float32x4x2_t _r40_02461357 = vld2q_f32(r4); float32x4x2_t _r40nx2 = vld2q_f32(r4 + 8); float32x4_t _r4_8101214 = _r40nx2.val[0]; float32x4_t _r4_9111315 = _r40nx2.val[1]; float32x4_t _r40 = _r40_02461357.val[0]; float32x4_t _r41 = _r40_02461357.val[1]; float32x4_t _r42 = vextq_f32(_r40, _r4_8101214, 1); float32x4_t _r43 = vextq_f32(_r41, _r4_9111315, 1); float32x4_t _r44 = vextq_f32(_r40, _r4_8101214, 2); _sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0); _sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1); _sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2); _sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3); _sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0); _sum = vfmaq_laneq_f32(_sum, _r10, _k4567, 1); _sum = vfmaq_laneq_f32(_sum, _r11, _k4567, 2); _sum = vfmaq_laneq_f32(_sum, _r12, _k4567, 3); _sum = vfmaq_laneq_f32(_sum, _r13, _k891011, 0); _sum = vfmaq_laneq_f32(_sum, _r14, _k891011, 1); _sum = vfmaq_laneq_f32(_sum, _r20, _k891011, 2); _sum = vfmaq_laneq_f32(_sum, _r21, _k891011, 3); _sum = vfmaq_laneq_f32(_sum, _r22, _k12131415, 0); _sum = vfmaq_laneq_f32(_sum, _r23, _k12131415, 1); _sum = vfmaq_laneq_f32(_sum, _r24, _k12131415, 2); _sum = vfmaq_laneq_f32(_sum, _r30, _k12131415, 3); _sum = vfmaq_laneq_f32(_sum, _r31, _k16171819, 0); _sum = vfmaq_laneq_f32(_sum, _r32, _k16171819, 1); _sum = vfmaq_laneq_f32(_sum, _r33, _k16171819, 2); _sum = vfmaq_laneq_f32(_sum, _r34, _k16171819, 3); _sum = vfmaq_laneq_f32(_sum, _r40, _k20212223, 0); _sum = vfmaq_laneq_f32(_sum, _r41, _k20212223, 1); _sum = vfmaq_laneq_f32(_sum, _r42, _k20212223, 2); _sum = vfmaq_laneq_f32(_sum, _r43, _k20212223, 3); _sum = vfmaq_laneq_f32(_sum, _r44, _k24242424, 0); vst1q_f32(outptr, _sum); r0 += 8; r1 += 8; r2 += 8; r3 += 8; r4 += 8; outptr += 4; } #else if (nn > 0) { asm volatile( "veor q15, q15 \n"// _sump3 = 0; "pld [%1, #128] \n" "veor q13, q13 \n"// _sump2 = 0; "pld [%2, #256] \n" "veor q14, q14 \n"// _sump3 = 0; "vld2.f32 {d16-d19}, [%2]! \n"// q8 = 0 2 4 6 q9 = 1 3 5 7 "pld [%2, #256] \n" "vld2.f32 {d20-d23}, [%2] \n"// q10 = 8 10 12 14 q11 = 9 11 13 15 "0: \n" "vld1.f32 {d14-d15}, [%1] \n"// q7 = outptr "vext.32 q12, q8, q10, #1 \n"// q12 = 2 4 6 8 "vext.32 q11, q9, q11, #1 \n"// q11 = 3 5 7 9 "vext.32 q10, q8, q10, #2 \n"// q10 = 4 6 8 10 "vmla.f32 q7, q8, %e14[0] \n" "vmla.f32 q13, q9, %e14[1] \n" "pld [%3, #256] \n" "vmla.f32 q14, q12, %f14[0] \n" "vmla.f32 q15, q11, %f14[1] \n" "vmla.f32 q7, q10, %e15[0] \n" "vld2.f32 {d16-d19}, [%3]! \n" "pld [%3, #256] \n" "vld2.f32 {d20-d23}, [%3] \n" "vext.32 q12, q8, q10, #1 \n" "vext.32 q11, q9, q11, #1 \n" "vext.32 q10, q8, q10, #2 \n" "vmla.f32 q7, q8, %e15[1] \n" "vmla.f32 q13, q9, %f15[0] \n" "pld [%4, #256] \n" "vmla.f32 q14, q12, %f15[1] \n" "vmla.f32 q15, q11, %e16[0] \n" "vmla.f32 q7, q10, %e16[1] \n" "vld2.f32 {d16-d19}, [%4]! \n" "pld [%4, #256] \n" "vld2.f32 {d20-d23}, [%4] \n" "vext.32 q12, q8, q10, #1 \n" "vext.32 q11, q9, q11, #1 \n" "vext.32 q10, q8, q10, #2 \n" "vmla.f32 q7, q8, %f16[0] \n" "vmla.f32 q13, q9, %f16[1] \n" "pld [%5, #256] \n" "vmla.f32 q14, q12, %e17[0] \n" "vmla.f32 q15, q11, %e17[1] \n" "vmla.f32 q7, q10, %f17[0] \n" "vld2.f32 {d16-d19}, [%5]! \n" "pld [%5, #256] \n" "vld2.f32 {d20-d23}, [%5] \n" "vext.32 q12, q8, q10, #1 \n" "vext.32 q11, q9, q11, #1 \n" "vext.32 q10, q8, q10, #2 \n" "vmla.f32 q7, q8, %f17[1] \n" "vmla.f32 q13, q9, %e18[0] \n" "pld [%6, #256] \n" "vmla.f32 q14, q12, %e18[1] \n" "vmla.f32 q15, q11, %f18[0] \n" "vmla.f32 q7, q10, %f18[1] \n" "vld2.f32 {d16-d19}, [%6]! \n" "pld [%6, #256] \n" "vld2.f32 {d20-d23}, [%6] \n" "vext.32 q12, q8, q10, #1 \n" "vext.32 q11, q9, q11, #1 \n" "vext.32 q10, q8, q10, #2 \n" "vmla.f32 q7, q8, %e19[0] \n" "vmla.f32 q13, q9, %e19[1] \n" "vmla.f32 q14, q12, %f19[0] \n" "vmla.f32 q15, q11, %f19[1] \n" "vmla.f32 q7, q10, %e20[0] \n" "pld [%2, #256] \n" "vld2.f32 {d16-d19}, [%2]! \n"// q8 = 0 2 4 6 q9 = 1 3 5 7 "vadd.f32 q14, q14, q15 \n" "vadd.f32 q7, q7, q13 \n" "veor q15, q15 \n"// _sump3 = 0; "veor q13, q13 \n"// _sump2 = 0; "pld [%2, #256] \n" "vadd.f32 q7, q7, q14 \n" "vld2.f32 {d20-d23}, [%2] \n"// q10 = 8 10 12 14 q11 = 9 11 13 15 "veor q14, q14 \n"// _sump3 = 0; "vst1.f32 {d14-d15}, [%1]! \n" "pld [%1, #128] \n" "subs %0, #1 \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424) // %20 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = 0; #if __ARM_NEON float32x4_t _r0 = vld1q_f32(r0); float32x4_t _sum = vmulq_f32(_r0, _k0123); float32x4_t _r1 = vld1q_f32(r1); _sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1)); float32x4_t _r2 = vld1q_f32(r2); _sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2)); float32x4_t _r3 = vld1q_f32(r3); _sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3)); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); sum += r0[4] * k0[4]; sum += r1[4] * k1[4]; sum += r2[4] * k2[4]; sum += r3[4] * k3[4]; sum += r4[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); sum += vget_lane_f32(_ss, 0); #else sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; #endif *outptr += sum; r0 += 2; r1 += 2; r2 += 2; r3 += 2; r4 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; } } } }
omp_parallel_private.c
<ompts:test> <ompts:testdescription>Test which checks the omp parallel private directive.</ompts:testdescription> <ompts:ompversion>3.0</ompts:ompversion> <ompts:directive>omp parallel private</ompts:directive> <ompts:dependences>omp for omp critical</ompts:dependences> <ompts:testcode> #include <stdio.h> #include <stdlib.h> #include "omp_testsuite.h" //static int sum1 = 789; int <ompts:testcode:functionname>omp_parallel_private</ompts:testcode:functionname>(FILE * logFile) { <ompts:orphan:vars> int sum, num_threads,sum1; </ompts:orphan:vars> int known_sum; sum = 0; <ompts:crosscheck> sum1=0; </ompts:crosscheck> num_threads = 0; #pragma omp parallel <ompts:check>private(sum1)</ompts:check> { <ompts:check> sum1 = 7; </ompts:check> /*printf("sum1=%d\n",sum1);*/ <ompts:orphan> int i; #pragma omp for for (i = 1; i < 1000; i++) { sum1 = sum1 + i; } /*end of for*/ #pragma omp critical { sum = sum + sum1; num_threads++; } /*end of critical*/ </ompts:orphan> } /* end of parallel*/ known_sum = (999 * 1000) / 2 + 7 * num_threads; return (known_sum == sum); } </ompts:testcode> </ompts:test>
polybench.c
/** * polybench.c: This file is part of the PolyBench/C 3.2 test suite. * * * Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://polybench.sourceforge.net * License: /LICENSE.OSU.txt */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <unistd.h> #include <assert.h> #include <time.h> #include <sys/time.h> #include <sys/resource.h> #include <sched.h> #include <math.h> #ifdef _OPENMP # include <omp.h> #endif /* By default, collect PAPI counters on thread 0. */ #ifndef POLYBENCH_THREAD_MONITOR # define POLYBENCH_THREAD_MONITOR 0 #endif /* Total LLC cache size. By default 32+MB.. */ #ifndef POLYBENCH_CACHE_SIZE_KB # define POLYBENCH_CACHE_SIZE_KB 32770 #endif int polybench_papi_counters_threadid = POLYBENCH_THREAD_MONITOR; double polybench_program_total_flops = 0; #ifdef POLYBENCH_PAPI # include <papi.h> # define POLYBENCH_MAX_NB_PAPI_COUNTERS 96 char* _polybench_papi_eventlist[] = { #include "papi_counters.list" NULL }; int polybench_papi_eventset; int polybench_papi_eventlist[POLYBENCH_MAX_NB_PAPI_COUNTERS]; long_long polybench_papi_values[POLYBENCH_MAX_NB_PAPI_COUNTERS]; #endif /* Timer code (gettimeofday). */ double polybench_t_start, polybench_t_end; /* Timer code (RDTSC). */ unsigned long long int polybench_c_start, polybench_c_end; static double rtclock() { #ifdef POLYBENCH_TIME struct timeval Tp; int stat; stat = gettimeofday (&Tp, NULL); if (stat != 0) printf ("Error return from gettimeofday: %d", stat); return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); #else return 0; #endif } #ifdef POLYBENCH_CYCLE_ACCURATE_TIMER static unsigned long long int rdtsc() { unsigned long long int ret = 0; unsigned int cycles_lo; unsigned int cycles_hi; __asm__ volatile ("RDTSC" : "=a" (cycles_lo), "=d" (cycles_hi)); ret = (unsigned long long int)cycles_hi << 32 | cycles_lo; return ret; } #endif void polybench_flush_cache() { int cs = POLYBENCH_CACHE_SIZE_KB * 1024 / sizeof(double); double* flush = (double*) calloc (cs, sizeof(double)); int i; double tmp = 0.0; #pragma omp parallel for reduction(+:tmp) for (i = 0; i < cs; i++) tmp += flush[i]; assert (tmp <= 10.0); free (flush); } #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER void polybench_linux_fifo_scheduler() { /* Use FIFO scheduler to limit OS interference. Program must be run as root, and this works only for Linux kernels. */ struct sched_param schedParam; schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO); sched_setscheduler (0, SCHED_FIFO, &schedParam); } void polybench_linux_standard_scheduler() { /* Restore to standard scheduler policy. */ struct sched_param schedParam; schedParam.sched_priority = sched_get_priority_max (SCHED_OTHER); sched_setscheduler (0, SCHED_OTHER, &schedParam); } #endif #ifdef POLYBENCH_PAPI static void test_fail(char *file, int line, char *call, int retval) { char buf[128]; memset(buf, '\0', sizeof(buf)); if (retval != 0) fprintf (stdout,"%-40s FAILED\nLine # %d\n", file, line); else { fprintf (stdout,"%-40s SKIPPED\n", file); fprintf (stdout,"Line # %d\n", line); } if (retval == PAPI_ESYS) { sprintf (buf, "System error in %s", call); perror (buf); } else if (retval > 0) fprintf (stdout,"Error: %s\n", call); else if (retval == 0) fprintf (stdout,"Error: %s\n", call); else { char errstring[PAPI_MAX_STR_LEN]; PAPI_perror (retval, errstring, PAPI_MAX_STR_LEN); fprintf (stdout,"Error in %s: %s\n", call, errstring); } fprintf (stdout,"\n"); if (PAPI_is_initialized ()) PAPI_shutdown (); exit (1); } void polybench_papi_init() { if (omp_get_max_threads () < polybench_papi_counters_threadid) polybench_papi_counters_threadid = omp_get_max_threads () - 1; if (omp_get_thread_num () == polybench_papi_counters_threadid) { int retval; polybench_papi_eventset = PAPI_NULL; if ((retval = PAPI_library_init (PAPI_VER_CURRENT)) != PAPI_VER_CURRENT) test_fail (__FILE__, __LINE__, "PAPI_library_init", retval); if ((retval = PAPI_create_eventset (&polybench_papi_eventset)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_create_eventset", retval); int k; for (k = 0; _polybench_papi_eventlist[k]; ++k) { if ((retval = PAPI_event_name_to_code (_polybench_papi_eventlist[k], &(polybench_papi_eventlist[k]))) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_event_name_to_code", retval); } polybench_papi_eventlist[k] = 0; } } void polybench_papi_close() { if (omp_get_thread_num () == polybench_papi_counters_threadid) { int retval; if ((retval = PAPI_destroy_eventset (&polybench_papi_eventset)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_destroy_eventset", retval); if (PAPI_is_initialized ()) PAPI_shutdown (); } } int polybench_papi_start_counter(int evid) { # ifndef POLYBENCH_NO_FLUSH_CACHE polybench_flush_cache(); # endif if (omp_get_thread_num () == polybench_papi_counters_threadid) { int retval = 1; char descr[PAPI_MAX_STR_LEN]; PAPI_event_info_t evinfo; PAPI_event_code_to_name (polybench_papi_eventlist[evid], descr); if (PAPI_add_event (polybench_papi_eventset, polybench_papi_eventlist[evid]) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_add_event", 1); if (PAPI_get_event_info (polybench_papi_eventlist[evid], &evinfo) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_get_event_info", retval); if ((retval = PAPI_start (polybench_papi_eventset)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_start", retval); } return 0; } void polybench_papi_stop_counter(int evid) { if (omp_get_thread_num () == polybench_papi_counters_threadid) { int retval; long_long values[1]; values[0] = 0; if ((retval = PAPI_read (polybench_papi_eventset, &values[0])) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_read", retval); if ((retval = PAPI_stop (polybench_papi_eventset, NULL)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_stop", retval); polybench_papi_values[evid] = values[0]; if ((retval = PAPI_remove_event (polybench_papi_eventset, polybench_papi_eventlist[evid])) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_remove_event", retval); } } void polybench_papi_print() { int verbose = 0; { if (omp_get_thread_num() == polybench_papi_counters_threadid) { #ifdef POLYBENCH_PAPI_VERBOSE verbose = 1; #endif if (verbose) printf ("On thread %d:\n", polybench_papi_counters_threadid); int evid; for (evid = 0; polybench_papi_eventlist[evid] != 0; ++evid) { if (verbose) printf ("%s=", _polybench_papi_eventlist[evid]); printf ("%llu ", polybench_papi_values[evid]); if (verbose) printf ("\n"); } printf ("\n"); } } } #endif /* ! POLYBENCH_PAPI */ void polybench_prepare_instruments() { #ifndef POLYBENCH_NO_FLUSH_CACHE polybench_flush_cache (); #endif #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER polybench_linux_fifo_scheduler (); #endif } void polybench_timer_start() { polybench_prepare_instruments (); #ifndef POLYBENCH_CYCLE_ACCURATE_TIMER polybench_t_start = rtclock (); #else polybench_c_start = rdtsc (); #endif } void polybench_timer_stop() { #ifndef POLYBENCH_CYCLE_ACCURATE_TIMER polybench_t_end = rtclock (); #else polybench_c_end = rdtsc (); #endif #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER polybench_linux_standard_scheduler (); #endif } void polybench_timer_print() { #ifdef POLYBENCH_GFLOPS if (__polybench_program_total_flops == 0) { printf ("[PolyBench][WARNING] Program flops not defined, use polybench_set_program_flops(value)\n"); printf ("%0.6lf\n", polybench_t_end - polybench_t_start); } else printf ("%0.2lf\n", (__polybench_program_total_flops / (double)(polybench_t_end - polybench_t_start)) / 1000000000); #else # ifndef POLYBENCH_CYCLE_ACCURATE_TIMER printf ("%0.6f\n", polybench_t_end - polybench_t_start); # else printf ("%Ld\n", polybench_c_end - polybench_c_start); # endif #endif } static void * xmalloc (size_t num) { void* nnew = NULL; int ret = posix_memalign (&nnew, 32, num); if (! nnew || ret) { fprintf (stderr, "[PolyBench] posix_memalign: cannot allocate memory"); exit (1); } return nnew; } void* polybench_alloc_data(unsigned long long int n, int elt_size) { /// FIXME: detect overflow! size_t val = n; val *= elt_size; void* ret = xmalloc (val); return ret; }
ballAlg.c
#include "ballAlg.h" #include "../lib/genPoints.h" #include "../lib/msort.h" #include "pointArith.h" #include <mpi.h> #include <omp.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> int nProcesses; int myRank; int nDims; int nNodes = 0; int nid = 1; int nodesCapacity; Node *nodes; MPI_Datatype mpiMedianInfo; int main(int argc, char *argv[]) { double execTime = -omp_get_wtime(); MPI_Init(&argc, &argv); defineMedianInfo(); MPI_Comm_size(MPI_COMM_WORLD, &nProcesses); MPI_Comm_rank(MPI_COMM_WORLD, &myRank); nid += myRank; int nPoints; double **points = getPoints(argc, argv, &nDims, &nPoints, myRank, nProcesses); double *pointsValues = *points; nodesCapacity = nPoints; nodes = (Node *) mallocSafe(sizeof(Node) * nodesCapacity); int nTeammates = nProcesses; int *teammatesRanks = nProcesses > 1 ? calcInitialTeammates(MY_STATE(nPoints), &nTeammates) : NULL; #pragma omp parallel #pragma omp single nTeammates == 1 ? buildTreeOMP(points, nPoints, omp_get_num_threads()) : buildTree(&points[nPoints], points, nPoints, teammatesRanks, nTeammates); if (myRank == 0) { if (nNodes > 0) nodes[nNodes - 1].nid = 0; execTime += omp_get_wtime(); fprintf(stderr, "%.1lf\n", execTime); fflush(stderr); } int nNodesGlobal = nNodes; if (nProcesses > 1) MPI_Reduce(&nNodes, &nNodesGlobal, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); if (myRank == 0) { printf("%d %d\n", nDims, nNodesGlobal); fflush(stdout); } dumpTree(); if (nProcesses > 1) free(teammatesRanks); free(pointsValues); free(points); MPI_Finalize(); exit(EXIT_SUCCESS); } void defineMedianInfo() { MedianInfo dummyMedianInfo; int lengths[3] = {1, 2, 2}; MPI_Aint displacements[3]; MPI_Aint baseAddress; MPI_Get_address(&dummyMedianInfo, &baseAddress); MPI_Get_address(&dummyMedianInfo.medX, &displacements[0]); MPI_Get_address(dummyMedianInfo.medRanks, &displacements[1]); MPI_Get_address(dummyMedianInfo.medIdx, &displacements[2]); displacements[0] = MPI_Aint_diff(displacements[0], baseAddress); displacements[1] = MPI_Aint_diff(displacements[1], baseAddress); displacements[2] = MPI_Aint_diff(displacements[2], baseAddress); MPI_Datatype datatypes[3] = {MPI_DOUBLE, MPI_INT, MPI_INT}; MPI_Type_create_struct(3, lengths, displacements, datatypes, &mpiMedianInfo); MPI_Type_commit(&mpiMedianInfo); } int buildTree(double **initialP, double **points, int nPoints, const int *teammatesRanks, int nTeammates) { if (nPoints == 0) return -1; double *center = (double *) mallocSafe(sizeof(double) * nDims); if (nPoints == 1 && nTeammates == 1) { copy(points[0], center); return newNode(center, 0, -1, -1); } double *pA = calcFurthestPoint(points, nPoints, *initialP, teammatesRanks, nTeammates, POINT_A_TAG); double *pB = calcFurthestPoint(points, nPoints, pA, teammatesRanks, nTeammates, POINT_B_TAG); double *subBA = (double *) mallocSafe(sizeof(double) * nDims); double *projectionsXs = (double *) mallocSafe(sizeof(double) * nPoints); double *projectionsPoints = (double *) mallocSafe(sizeof(double) * nDims * nPoints); double **projections = (double **) mallocSafe(sizeof(double *) * nPoints); double **pointsTmp = (double **) mallocSafe(sizeof(double *) * nPoints); double **pointsL = (double **) mallocSafe(sizeof(double *) * nPoints); double **pointsR = (double **) mallocSafe(sizeof(double *) * nPoints); sub(pB, pA, subBA); const double squaredSubBA = innerProduct(subBA, subBA); for (int i = 0; i < nPoints; i++) { projections[i] = projectionsPoints + (i * nDims); projection(points[i], pA, subBA, squaredSubBA, projections[i]); } msort(projections, nPoints, pointsTmp); for (int i = 0; i < nPoints; i++) projectionsXs[i] = projections[i][0]; MedianInfo medInfo; if (teammatesRanks[0] == myRank) { medInfo = bcastMedianInfo(teammatesRanks, nTeammates, projectionsXs, nPoints); } else { medInfo = recvMedianInfo(teammatesRanks[0], projections, projectionsXs, &nPoints); } int nPointsL = 0; int nPointsR = 0; partitionTree(projectionsPoints, medInfo.medX, points, nPoints, pointsL, &nPointsL, pointsR, &nPointsR); pointsL = realloc(pointsL, sizeof(double *) * nPointsL); pointsR = realloc(pointsR, sizeof(double *) * nPointsR); double radius = -1; if (teammatesRanks[0] == myRank) { calcCenter(medInfo, projections, center); calcRadius(points, nPoints, center, teammatesRanks, nTeammates, &radius); } else { calcCandidateRadius(teammatesRanks[0], points, nPoints, center); } free(pA); free(pB); free(subBA); free(projectionsXs); free(projectionsPoints); free(projections); free(pointsTmp); int myNid = buildTreeLoop(initialP, center, radius, &pointsL, &nPointsL, &pointsR, &nPointsR, teammatesRanks, nTeammates); free(pointsL); free(pointsR); return myNid; } void bcastToMyTeam(void *buf, int bufSize, const int *teammatesRanks, int nTeammates, MPI_Datatype datatype, int TAG) { MPI_Request request; for (int i = 0; i < nTeammates; i++) { if (teammatesRanks[i] != myRank) { MPI_Isend(buf, bufSize, datatype, teammatesRanks[i], TAG, MPI_COMM_WORLD, &request); MPI_Request_free(&request); } } } double *calcFurthestPoint(double **points, int nPoints, const double *pivot, const int *teammatesRanks, int nTeammates, int TAG) { double *P = (double *) mallocSafe(sizeof(double) * nDims); double *pCmp = (double *) mallocSafe(sizeof(double) * nDims); double maxD; int iFurthest = calcFurthestIdx(points, nPoints, pivot, &maxD); copy(points[iFurthest], P); bcastToMyTeam(P, nDims, teammatesRanks, nTeammates, MPI_DOUBLE, TAG); for (int i = 0; i < nTeammates; i++) { if (teammatesRanks[i] != myRank) { MPI_Recv(pCmp, nDims, MPI_DOUBLE, teammatesRanks[i], TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); double d = squareDistance(pivot, pCmp); if (d > maxD) { maxD = d; copy(pCmp, P); } } } free(pCmp); return P; } int calcFurthestIdx(double **points, int nPoints, const double *pivot, double *maxD) { int iFurthest = 0; *maxD = -1; for (int i = 0; i < nPoints; i++) { if (points[i] != pivot) { double d = squareDistance(points[i], pivot); if ((*maxD) < d) { *maxD = d; iFurthest = i; } } } return iFurthest; } MedianInfo bcastMedianInfo(const int *teammatesRanks, int nTeammates, double *projectionsXs, int nProjectionsXs) { int nTeammatesXsSum = 0; int *nTeammatesXs = (int *) calloc(nTeammates, sizeof(int)); int *iTeammatesXs = (int *) calloc(nTeammates, sizeof(int)); double **teammatesXs = (double **) mallocSafe(sizeof(double *) * nTeammates); teammatesXs[0] = projectionsXs; nTeammatesXs[0] = nProjectionsXs; nTeammatesXsSum += nTeammatesXs[0]; for (int i = 1; i < nTeammates; i++) { MPI_Recv(&nTeammatesXs[i], 1, MPI_INT, teammatesRanks[i], PROJECTIONS_LEN_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); teammatesXs[i] = (double *) mallocSafe(sizeof(double) * nTeammatesXs[i]); MPI_Recv(teammatesXs[i], nTeammatesXs[i], MPI_DOUBLE, teammatesRanks[i], PROJECTIONS_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); nTeammatesXsSum += nTeammatesXs[i]; } for (int i = 0; i < nTeammatesXsSum / 2 - 1; i++) { iTeammatesXs[teammateMinX(nTeammates, teammatesXs, iTeammatesXs, nTeammatesXs)]++; } MedianInfo medInfo; medInfo.medX = 0; for (int i = 0; i < 2; i++) { int teammateId = teammateMinX(nTeammates, teammatesXs, iTeammatesXs, nTeammatesXs); if (i == 1 || nTeammatesXsSum % 2 == 0) { medInfo.medX += teammatesXs[teammateId][iTeammatesXs[teammateId]]; } medInfo.medRanks[i] = teammatesRanks[teammateId]; medInfo.medIdx[i] = iTeammatesXs[teammateId]++; } if (nTeammatesXsSum % 2 == 0) medInfo.medX /= 2; else { medInfo.medRanks[0] = -1; medInfo.medIdx[0] = -1; } bcastToMyTeam(&medInfo, 1, teammatesRanks, nTeammates, mpiMedianInfo, MEDIAN_REQUEST); for (int i = 1; i < nTeammates; i++) free(teammatesXs[i]); free(nTeammatesXs); free(iTeammatesXs); free(teammatesXs); return medInfo; } int teammateMinX(int nTeammates, double **teammatesXs, const int *iTeammatesXs, const int *nTeammatesXs) { int iMinX = -1; for (int i = 0; i < nTeammates; i++) { if (iTeammatesXs[i] < nTeammatesXs[i] && (iMinX < 0 || teammatesXs[i][iTeammatesXs[i]] < teammatesXs[iMinX][iTeammatesXs[iMinX]])) { iMinX = i; } } return iMinX; } MedianInfo recvMedianInfo(int leaderRank, double **projections, const double *projectionsXs, int *nProjectionsXs) { MPI_Request request; MPI_Isend(nProjectionsXs, 1, MPI_INT, leaderRank, PROJECTIONS_LEN_TAG, MPI_COMM_WORLD, &request); MPI_Request_free(&request); MPI_Isend(projectionsXs, (*nProjectionsXs), MPI_DOUBLE, leaderRank, PROJECTIONS_TAG, MPI_COMM_WORLD, &request); MPI_Request_free(&request); MedianInfo medInfo; MPI_Recv(&medInfo, 1, mpiMedianInfo, leaderRank, MEDIAN_REQUEST, MPI_COMM_WORLD, MPI_STATUS_IGNORE); int i = 0; double *medPoints = (double *) mallocSafe(sizeof(double) * nDims * 2); for (int j = 0; j < 2; j++) { if (medInfo.medRanks[j] == myRank) copy(projections[medInfo.medIdx[j]], &medPoints[nDims * i++]); } if (i > 0) { MPI_Isend(medPoints, nDims * i, MPI_DOUBLE, leaderRank, MEDIAN_REPLY, MPI_COMM_WORLD, &request); MPI_Request_free(&request); } return medInfo; } void calcCenter(MedianInfo medInfo, double **projections, double *center) { double *medPoints = (double *) mallocSafe(sizeof(double) * nDims * 2); if (medInfo.medRanks[0] < 0) { if (medInfo.medRanks[1] == myRank) { copy(projections[medInfo.medIdx[1]], center); } else { MPI_Recv(medPoints, nDims, MPI_DOUBLE, medInfo.medRanks[1], MEDIAN_REPLY, MPI_COMM_WORLD, MPI_STATUS_IGNORE); copy(medPoints, center); } } else if (medInfo.medRanks[0] == medInfo.medRanks[1]) { if (medInfo.medRanks[0] == myRank) { middle(projections[medInfo.medIdx[0]], projections[medInfo.medIdx[1]], center); } else { MPI_Recv(medPoints, nDims * 2, MPI_DOUBLE, medInfo.medRanks[0], MEDIAN_REPLY, MPI_COMM_WORLD, MPI_STATUS_IGNORE); middle(medPoints, &medPoints[nDims], center); } } else { for (int i = 0; i < 2; i++) { if (medInfo.medRanks[i] == myRank) { copy(projections[medInfo.medIdx[i]], &medPoints[i * nDims]); } else { MPI_Recv(&medPoints[i * nDims], nDims, MPI_DOUBLE, medInfo.medRanks[i], MEDIAN_REPLY, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } } middle(medPoints, &medPoints[nDims], center); } free(medPoints); } void calcRadius(double **points, int nPoints, double *center, const int *teammatesRanks, int nTeammates, double *radius) { bcastToMyTeam(center, nDims, teammatesRanks, nTeammates, MPI_DOUBLE, RADIUS_TAG); double maxD; int iFurthest = calcFurthestIdx(points, nPoints, center, &maxD); *radius = distance(center, points[iFurthest]); double candidateRadius; for (int i = 0; i < nTeammates; i++) { if (teammatesRanks[i] != myRank) { MPI_Recv(&candidateRadius, 1, MPI_DOUBLE, teammatesRanks[i], RADIUS_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); if (candidateRadius > (*radius)) *radius = candidateRadius; } } } void calcCandidateRadius(int leaderRank, double **points, int nPoints, double *center) { MPI_Recv(center, nDims, MPI_DOUBLE, leaderRank, RADIUS_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); double maxD; int iFurthest = calcFurthestIdx(points, nPoints, center, &maxD); double candidateRadius = distance(center, points[iFurthest]); MPI_Request request; MPI_Isend(&candidateRadius, 1, MPI_DOUBLE, leaderRank, RADIUS_TAG, MPI_COMM_WORLD, &request); MPI_Request_free(&request); } void partitionTree(const double *projectionsPoints, double medX, double **points, int nPoints, double **pointsL, int *nPointsL, double **pointsR, int *nPointsR) { for (int i = 0; i < nPoints; i++) { // projectionsPoints[i * nDims] == (projectionsPoints + (i * nDims))[0] if (projectionsPoints[i * nDims] < medX) { pointsL[(*nPointsL)++] = points[i]; } else { pointsR[(*nPointsR)++] = points[i]; } } } int buildTreeLoop(double **initialP, double *center, double radius, double ***pointsL, int *nPointsL, double ***pointsR, int *nPointsR, const int *teammatesRanks, int nTeammates) { int nidL = -1; int nidR = -1; if (nTeammates == 1) { int nThreads = omp_get_num_threads(); #pragma omp task shared(nidL) nidL = buildTreeOMP((*pointsL), (*nPointsL), nThreads / 2); nidR = buildTreeOMP((*pointsR), (*nPointsR), nThreads - nThreads / 2); #pragma omp taskwait return newNode(center, radius, nidL, nidR); } int teammateId = 0; for (int i = 0; i < nTeammates; i++) { if (teammatesRanks[i] == myRank) { teammateId = i; break; } } if (teammateId % 2 == 0) { if (teammateId == nTeammates - 1) { exchangePoints(pointsR, nPointsR, NULL, NULL, teammatesRanks[teammateId - 1], false); } else { exchangePoints(pointsR, nPointsR, pointsL, nPointsL, teammatesRanks[teammateId + 1], false); } } else { exchangePoints(pointsL, nPointsL, pointsR, nPointsR, teammatesRanks[teammateId - 1], true); if (teammateId == nTeammates - 2) { exchangePoints(NULL, NULL, pointsR, nPointsR, teammatesRanks[teammateId + 1], false); } } int myState = MY_STATE(teammateId % 2 == 0 ? (*nPointsL) : (*nPointsR)); bcastToMyTeam(&myState, 1, teammatesRanks, nTeammates, MPI_INT, TEAMMATE_STATE_TAG); if (myState == FINISHED && teammatesRanks[0] != myRank) return -1; int newNTeammates[2] = {0, 0}; int *newTeammatesRanks[2]; for (int i = 0; i < 2; i++) { newTeammatesRanks[i] = calcNewTeammates(myState, teammatesRanks, nTeammates, &newNTeammates[i], i); } if (myState == FINISHED && teammatesRanks[0] == myRank) { if (newNTeammates[0] > 0) MPI_Recv(&nidL, 1, MPI_INT, newTeammatesRanks[0][0], BRANCH_ID_LEFT, MPI_COMM_WORLD, MPI_STATUS_IGNORE); if (newNTeammates[1] > 0) MPI_Recv(&nidR, 1, MPI_INT, newTeammatesRanks[1][0], BRANCH_ID_RIGHT, MPI_COMM_WORLD, MPI_STATUS_IGNORE); for (int i = 0; i < 2; i++) free(newTeammatesRanks[i]); return newNode(center, radius, nidL, nidR); } if (newTeammatesRanks[teammateId % 2][0] == myRank) { *initialP = teammateId % 2 == 0 ? (*pointsL)[0] : (*pointsR)[0]; bcastToMyTeam(*initialP, nDims, newTeammatesRanks[teammateId % 2], newNTeammates[teammateId % 2], MPI_DOUBLE, INITIAL_POINT_TAG); } else { MPI_Recv(*initialP, nDims, MPI_DOUBLE, newTeammatesRanks[teammateId % 2][0], INITIAL_POINT_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } int myNid; if (teammateId % 2 == 0) { nidL = buildTree(initialP, (*pointsL), (*nPointsL), newTeammatesRanks[0], newNTeammates[0]); myNid = nidL; if (newTeammatesRanks[0][0] == myRank) { if (teammatesRanks[0] == myRank) { if (newNTeammates[1] > 0) { MPI_Recv(&nidR, 1, MPI_INT, newTeammatesRanks[1][0], BRANCH_ID_RIGHT, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } myNid = newNode(center, radius, nidL, nidR); } else { MPI_Request request; MPI_Isend(&nidL, 1, MPI_INT, teammatesRanks[0], BRANCH_ID_LEFT, MPI_COMM_WORLD, &request); MPI_Request_free(&request); } } } else { nidR = buildTree(initialP, (*pointsR), (*nPointsR), newTeammatesRanks[1], newNTeammates[1]); myNid = nidR; if (newTeammatesRanks[1][0] == myRank) { MPI_Request request; MPI_Isend(&nidR, 1, MPI_INT, teammatesRanks[0], BRANCH_ID_RIGHT, MPI_COMM_WORLD, &request); MPI_Request_free(&request); } } for (int i = 0; i < 2; i++) free(newTeammatesRanks[i]); return myNid; } void exchangePoints(double ***pointsToSend, int *nPointsToSend, double ***pointsToRecv, int *nPointsToRecv, int teammateRank, bool toMergeLeft) { if (nPointsToSend != NULL) { MPI_Request request; int nFlattedPointsToSend = (*nPointsToSend) * nDims; MPI_Isend(&nFlattedPointsToSend, 1, MPI_INT, teammateRank, POINTS_LEN_TAG, MPI_COMM_WORLD, &request); MPI_Request_free(&request); double *flattedPointsToSend = (double *) mallocSafe(sizeof(double) * nFlattedPointsToSend); flat(pointsToSend, nPointsToSend, flattedPointsToSend); MPI_Isend(flattedPointsToSend, nFlattedPointsToSend, MPI_DOUBLE, teammateRank, POINTS_TAG, MPI_COMM_WORLD, &request); MPI_Request_free(&request); } if (nPointsToRecv != NULL) { int nFlattedPointsToRecv; MPI_Recv(&nFlattedPointsToRecv, 1, MPI_INT, teammateRank, POINTS_LEN_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); double *flattedPointsToRecv = (double *) mallocSafe(sizeof(double) * nFlattedPointsToRecv); MPI_Recv(flattedPointsToRecv, nFlattedPointsToRecv, MPI_DOUBLE, teammateRank, POINTS_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); unflat(pointsToRecv, nPointsToRecv, flattedPointsToRecv, nFlattedPointsToRecv, toMergeLeft); } } void flat(double ***points, int *nPoints, double *flattedPoints) { for (int i = 0; i < (*nPoints); i++) copy((*points)[i], &flattedPoints[i * nDims]); } void unflat(double ***points, int *nPoints, double *flattedPoints, int nFlattedPoints, bool toMergeLeft) { *points = (double **) realloc((*points), ((*nPoints) + nFlattedPoints / nDims) * sizeof(double *)); if (toMergeLeft) { for (int i = (*nPoints) - 1; i >= 0; i--) (*points)[i + nFlattedPoints / nDims] = (*points)[i]; for (int i = 0; i < nFlattedPoints / nDims; i++) (*points)[i] = &flattedPoints[i * nDims]; } else { for (int i = 0; i < nFlattedPoints / nDims; i++) (*points)[(*nPoints) + i] = &flattedPoints[i * nDims]; } *nPoints += nFlattedPoints / nDims; } int *calcInitialTeammates(int myState, int *nTeammates) { int *teammatesRanks = mallocSafe(sizeof(int) * (*nTeammates)); for (int i = 0; i < (*nTeammates); i++) teammatesRanks[i] = i; bcastToMyTeam(&myState, 1, teammatesRanks, (*nTeammates), MPI_INT, TEAMMATE_STATE_TAG); *nTeammates = calcWorkingTeammates(myState, teammatesRanks, (*nTeammates)); teammatesRanks = (int *) realloc(teammatesRanks, sizeof(int) * (*nTeammates)); return teammatesRanks; } int *calcNewTeammates(int myState, const int *teammatesRanks, int nTeammates, int *newNTeammates, int iParity) { int *newTeammatesRanks = (int *) mallocSafe(sizeof(int) * nTeammates); for (int i = 0; i < nTeammates; i++) { if (i % 2 == iParity) newTeammatesRanks[(*newNTeammates)++] = teammatesRanks[i]; } *newNTeammates = calcWorkingTeammates(myState, newTeammatesRanks, (*newNTeammates)); newTeammatesRanks = (int *) realloc(newTeammatesRanks, sizeof(int) * (*newNTeammates)); return newTeammatesRanks; } int calcWorkingTeammates(int myState, int *teammatesRanks, int nTeammates) { int teammateState; int nTeammatesWorking = 0; for (int i = 0; i < nTeammates; i++) { if (teammatesRanks[i] != myRank) { MPI_Recv(&teammateState, 1, MPI_INT, teammatesRanks[i], TEAMMATE_STATE_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } else { teammateState = myState; } if (teammateState == WORKING) teammatesRanks[nTeammatesWorking++] = teammatesRanks[i]; } return nTeammatesWorking; } int newNode(double *center, double radius, int nidL, int nidR) { int myNNodes, myNid; #pragma omp critical(newNode) { myNNodes = nNodes++; myNid = nid; nid += nProcesses; if (nNodes > nodesCapacity) { nodesCapacity *= 2; nodes = (Node *) realloc(nodes, sizeof(Node) * nodesCapacity); if (nodes == NULL) { fprintf(stderr, "FATAL: [realloc]!\n"); exit(EXIT_FAILURE); } } } Node *new = &nodes[myNNodes]; new->nid = myNid; new->center = center; new->radius = radius; new->nidL = nidL; new->nidR = nidR; return myNid; } void dumpTree() { if (myRank != 0) MPI_Recv(NULL, 0, MPI_INT, myRank - 1, PRINT_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); for (int i = 0; i < nNodes; i++) { printf("%d %d %d %.6lf", nodes[i].nid, nodes[i].nidL, nodes[i].nidR, nodes[i].radius); printPoint(nodes[i].center, nDims); free(nodes[i].center); } fflush(stdout); free(nodes); if (myRank != nProcesses - 1) MPI_Send(NULL, 0, MPI_INT, myRank + 1, PRINT_TAG, MPI_COMM_WORLD); } void *mallocSafe(size_t size) { void *allocBytes = malloc(size); if (allocBytes == NULL) { fprintf(stderr, "FATAL: [malloc]!\n"); exit(EXIT_FAILURE); } return allocBytes; }
GB_unaryop__ainv_int8_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int8_int16 // op(A') function: GB_tran__ainv_int8_int16 // C type: int8_t // A type: int16_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT8 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int8_int16 ( int8_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int8_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
deconvolution_packnto1_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void deconvolution_packnto1_fp16s_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } vfloat32m2_t _sum = vfmv_v_f_f32m2(0.f, vl); const __fp16* kptr = (const __fp16*)weight_data_fp16 + maxk * channels * p * packn; // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const __fp16* sptr = m.row<const __fp16>(sy) + sx * packn; int k = y * kernel_w + x; vfloat16m1_t _val = vle16_v_f16m1(sptr, vl); vfloat16m1_t _w = vle16_v_f16m1(kptr + k * packn, vl); _sum = vfwmacc_vv_f32m2(_sum, _val, _w, vl); } } kptr += maxk * packn; } #ifdef RVV_SPEC_0_7 // TODO std::vector<float> ss(packn); vse32_v_f32m2((float*)ss.data(), _sum, vl); for (int i = 0; i < packn; i++) { sum += ss[i]; } #else sum = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m2_f32m1(vfloat32m1_t(), _sum, vfmv_s_f_f32m1(vfloat32m1_t(), sum, vl), vl)); #endif sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } } static void deconvolution_packnto1_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const __fp16* bias_data_ptr = bias_data_fp16; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { __fp16 sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); const __fp16* kptr = (const __fp16*)weight_data_fp16 + maxk * channels * p * packn; // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const __fp16* sptr = m.row<const __fp16>(sy) + sx * packn; int k = y * kernel_w + x; vfloat16m1_t _val = vle16_v_f16m1(sptr, vl); vfloat16m1_t _w = vle16_v_f16m1(kptr + k * packn, vl); _sum = vfmacc_vv_f16m1(_sum, _val, _w, vl); } } kptr += maxk * packn; } sum = vfmv_f_s_f16m1_f16(vfredsum_vs_f16m1_f16m1(vfloat16m1_t(), _sum, vfmv_s_f_f16m1(vfloat16m1_t(), sum, vl), vl)); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }
GB_bitmap_add_template.c
//------------------------------------------------------------------------------ // GB_bitmap_add_template: C = A+B, C<M>=A+B, and C<!M>=A+B, C bitmap //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // C is bitmap. The mask M can have any sparsity structure, and is efficient // to apply (all methods are asymptotically optimal). All cases (no M, M, !M) // are handled. { // TODO: the input C can be modified in-place, if it is also bitmap int64_t cnvals = 0 ; if (M == NULL) { //---------------------------------------------------------------------- // M is not present //---------------------------------------------------------------------- // ------------------------------------------ // C = A + B // ------------------------------------------ // bitmap . sparse bitmap // bitmap . bitmap sparse // bitmap . bitmap bitmap ASSERT (A_is_bitmap || B_is_bitmap) ; ASSERT (!A_is_full) ; ASSERT (!B_is_full) ; if (A_is_bitmap && B_is_bitmap) { //------------------------------------------------------------------ // Method21: C, A, and B are all bitmap //------------------------------------------------------------------ int tid ; #pragma omp parallel for num_threads(C_nthreads) schedule(static) \ reduction(+:cnvals) for (tid = 0 ; tid < C_nthreads ; tid++) { int64_t pstart, pend, task_cnvals = 0 ; GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ; for (int64_t p = pstart ; p < pend ; p++) { int8_t c = 0 ; if (Ab [p] && Bb [p]) { // C (i,j) = A (i,j) + B (i,j) GB_GETA (aij, Ax, p) ; GB_GETB (bij, Bx, p) ; GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ; c = 1 ; } else if (Bb [p]) { // C (i,j) = B (i,j) GB_COPY_B_TO_C (GB_CX (p), Bx, p) ; c = 1 ; } else if (Ab [p]) { // C (i,j) = A (i,j) GB_COPY_A_TO_C (GB_CX (p), Ax, p) ; c = 1 ; } Cb [p] = c ; task_cnvals += c ; } cnvals += task_cnvals ; } } else if (A_is_bitmap) { //------------------------------------------------------------------ // Method22: C and A are bitmap; B is sparse or hypersparse //------------------------------------------------------------------ int64_t p ; #pragma omp parallel for num_threads(C_nthreads) schedule(static) for (p = 0 ; p < cnz ; p++) { // C (i,j) = A (i,j) int8_t a = Ab [p] ; if (a) GB_COPY_A_TO_C (GB_CX (p), Ax, p) ; Cb [p] = a ; } cnvals = A->nvals ; GB_SLICE_MATRIX (B, 8) ; #pragma omp parallel for num_threads(B_nthreads) \ schedule(dynamic,1) reduction(+:cnvals) for (taskid = 0 ; taskid < B_ntasks ; taskid++) { int64_t kfirst = kfirst_Bslice [taskid] ; int64_t klast = klast_Bslice [taskid] ; int64_t task_cnvals = 0 ; for (int64_t k = kfirst ; k <= klast ; k++) { // find the part of B(:,k) for this task int64_t j = GBH (Bh, k) ; int64_t pB_start, pB_end ; GB_get_pA (&pB_start, &pB_end, taskid, k, kfirst, klast, pstart_Bslice, Bp, vlen) ; int64_t pC_start = j * vlen ; // traverse over B(:,j), the kth vector of B for (int64_t pB = pB_start ; pB < pB_end ; pB++) { int64_t i = Bi [pB] ; int64_t p = pC_start + i ; if (Cb [p]) { // C (i,j) = A (i,j) + B (i,j) GB_GETA (aij, Ax, p) ; GB_GETB (bij, Bx, pB) ; GB_BINOP (GB_CX (p), aij, bij, i, j) ; } else { // C (i,j) = B (i,j) GB_COPY_B_TO_C (GB_CX (p), Bx, pB) ; Cb [p] = 1 ; task_cnvals++ ; } } } cnvals += task_cnvals ; } } else { //------------------------------------------------------------------ // Method23: C and B are bitmap; A is sparse or hypersparse //------------------------------------------------------------------ int64_t p ; #pragma omp parallel for num_threads(C_nthreads) schedule(static) for (p = 0 ; p < cnz ; p++) { // C (i,j) = B (i,j) int8_t b = Bb [p] ; if (b) GB_COPY_B_TO_C (GB_CX (p), Bx, p) ; Cb [p] = b ; } cnvals = B->nvals ; GB_SLICE_MATRIX (A, 8) ; #pragma omp parallel for num_threads(A_nthreads) \ schedule(dynamic,1) reduction(+:cnvals) for (taskid = 0 ; taskid < A_ntasks ; taskid++) { int64_t kfirst = kfirst_Aslice [taskid] ; int64_t klast = klast_Aslice [taskid] ; int64_t task_cnvals = 0 ; for (int64_t k = kfirst ; k <= klast ; k++) { // find the part of A(:,k) for this task int64_t j = GBH (Ah, k) ; int64_t pA_start, pA_end ; GB_get_pA (&pA_start, &pA_end, taskid, k, kfirst, klast, pstart_Aslice, Ap, vlen) ; int64_t pC_start = j * vlen ; // traverse over A(:,j), the kth vector of A for (int64_t pA = pA_start ; pA < pA_end ; pA++) { int64_t i = Ai [pA] ; int64_t p = pC_start + i ; if (Cb [p]) { // C (i,j) = A (i,j) + B (i,j) GB_GETA (aij, Ax, pA) ; GB_GETB (bij, Bx, p) ; GB_BINOP (GB_CX (p), aij, bij, i, j) ; } else { // C (i,j) = A (i,j) GB_COPY_A_TO_C (GB_CX (p), Ax, pA) ; Cb [p] = 1 ; task_cnvals++ ; } } } cnvals += task_cnvals ; } } } else if (M_is_sparse_or_hyper) { //---------------------------------------------------------------------- // C is bitmap, M is sparse or hyper and complemented //---------------------------------------------------------------------- // ------------------------------------------ // C <!M> = A + B // ------------------------------------------ // bitmap sparse sparse bitmap // bitmap sparse sparse full // bitmap sparse bitmap sparse // bitmap sparse bitmap bitmap // bitmap sparse bitmap full // bitmap sparse full sparse // bitmap sparse full bitmap // bitmap sparse full full // M is sparse and complemented. If M is sparse and not // complemented, then C is constructed as sparse, not bitmap. ASSERT (Mask_comp) ; // C(i,j) = A(i,j) + B(i,j) can only be computed where M(i,j) is // not present in the sparse pattern of M, and where it is present // but equal to zero. //---------------------------------------------------------------------- // scatter M into the C bitmap //---------------------------------------------------------------------- GB_SLICE_MATRIX (M, 8) ; #pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < M_ntasks ; taskid++) { int64_t kfirst = kfirst_Mslice [taskid] ; int64_t klast = klast_Mslice [taskid] ; for (int64_t k = kfirst ; k <= klast ; k++) { // find the part of M(:,k) for this task int64_t j = GBH (Mh, k) ; int64_t pM_start, pM_end ; GB_get_pA (&pM_start, &pM_end, taskid, k, kfirst, klast, pstart_Mslice, Mp, vlen) ; int64_t pC_start = j * vlen ; // traverse over M(:,j), the kth vector of M for (int64_t pM = pM_start ; pM < pM_end ; pM++) { // mark C(i,j) if M(i,j) is true bool mij = GB_mcast (Mx, pM, msize) ; if (mij) { int64_t i = Mi [pM] ; int64_t p = pC_start + i ; Cb [p] = 2 ; } } } } // C(i,j) has been marked, in Cb, with the value 2 where M(i,j)=1. // These positions will not be computed in C(i,j). C(i,j) can only // be modified where Cb [p] is zero. //---------------------------------------------------------------------- // compute C<!M>=A+B using the mask scattered in C //---------------------------------------------------------------------- bool M_cleared = false ; if ((A_is_bitmap || A_is_full) && (B_is_bitmap || B_is_full)) { //------------------------------------------------------------------ // Method24(!M,sparse): C is bitmap, both A and B are bitmap or full //------------------------------------------------------------------ int tid ; #pragma omp parallel for num_threads(C_nthreads) schedule(static) \ reduction(+:cnvals) for (tid = 0 ; tid < C_nthreads ; tid++) { int64_t pstart, pend, task_cnvals = 0 ; GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ; for (int64_t p = pstart ; p < pend ; p++) { int8_t c = Cb [p] ; if (c == 0) { // M(i,j) is zero, so C(i,j) can be computed int8_t a = GBB (Ab, p) ; int8_t b = GBB (Bb, p) ; if (a && b) { // C (i,j) = A (i,j) + B (i,j) GB_GETA (aij, Ax, p) ; GB_GETB (bij, Bx, p) ; GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ; c = 1 ; } else if (b) { // C (i,j) = B (i,j) GB_COPY_B_TO_C (GB_CX (p), Bx, p) ; c = 1 ; } else if (a) { // C (i,j) = A (i,j) GB_COPY_A_TO_C (GB_CX (p), Ax, p) ; c = 1 ; } Cb [p] = c ; task_cnvals += c ; } else { // M(i,j) == 1, so C(i,j) is not computed Cb [p] = 0 ; } } cnvals += task_cnvals ; } M_cleared = true ; // M has also been cleared from C } else if (A_is_bitmap || A_is_full) { //------------------------------------------------------------------ // Method25(!M,sparse): C bitmap, A bitmap or full, B sparse/hyper //------------------------------------------------------------------ int tid ; #pragma omp parallel for num_threads(C_nthreads) schedule(static) \ reduction(+:cnvals) for (tid = 0 ; tid < C_nthreads ; tid++) { int64_t pstart, pend, task_cnvals = 0 ; GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ; for (int64_t p = pstart ; p < pend ; p++) { if (Cb [p] == 0) { // C (i,j) = A (i,j) int8_t a = GBB (Ab, p) ; if (a) GB_COPY_A_TO_C (GB_CX (p), Ax, p) ; Cb [p] = a ; task_cnvals += a ; } } cnvals += task_cnvals ; } GB_SLICE_MATRIX (B, 8) ; #pragma omp parallel for num_threads(B_nthreads) \ schedule(dynamic,1) reduction(+:cnvals) for (taskid = 0 ; taskid < B_ntasks ; taskid++) { int64_t kfirst = kfirst_Bslice [taskid] ; int64_t klast = klast_Bslice [taskid] ; int64_t task_cnvals = 0 ; for (int64_t k = kfirst ; k <= klast ; k++) { // find the part of B(:,k) for this task int64_t j = GBH (Bh, k) ; int64_t pB_start, pB_end ; GB_get_pA (&pB_start, &pB_end, taskid, k, kfirst, klast, pstart_Bslice, Bp, vlen) ; int64_t pC_start = j * vlen ; // traverse over B(:,j), the kth vector of B for (int64_t pB = pB_start ; pB < pB_end ; pB++) { int64_t i = Bi [pB] ; int64_t p = pC_start + i ; int8_t c = Cb [p] ; if (c == 1) { // C (i,j) = A (i,j) + B (i,j) GB_GETA (aij, Ax, p) ; GB_GETB (bij, Bx, pB) ; GB_BINOP (GB_CX (p), aij, bij, i, j) ; } else if (c == 0) { // C (i,j) = B (i,j) GB_COPY_B_TO_C (GB_CX (p), Bx, pB) ; Cb [p] = 1 ; task_cnvals++ ; } } } cnvals += task_cnvals ; } } else { //------------------------------------------------------------------ // Method26: C bitmap, A sparse or hypersparse, B bitmap or full //------------------------------------------------------------------ int tid ; #pragma omp parallel for num_threads(C_nthreads) schedule(static) \ reduction(+:cnvals) for (tid = 0 ; tid < C_nthreads ; tid++) { int64_t pstart, pend, task_cnvals = 0 ; GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ; for (int64_t p = pstart ; p < pend ; p++) { if (Cb [p] == 0) { // C (i,j) = B (i,j) int8_t b = GBB (Bb, p) ; if (b) GB_COPY_B_TO_C (GB_CX (p), Bx, p) ; Cb [p] = b ; task_cnvals += b ; } } cnvals += task_cnvals ; } GB_SLICE_MATRIX (A, 8) ; #pragma omp parallel for num_threads(A_nthreads) \ schedule(dynamic,1) reduction(+:cnvals) for (taskid = 0 ; taskid < A_ntasks ; taskid++) { int64_t kfirst = kfirst_Aslice [taskid] ; int64_t klast = klast_Aslice [taskid] ; int64_t task_cnvals = 0 ; for (int64_t k = kfirst ; k <= klast ; k++) { // find the part of A(:,k) for this task int64_t j = GBH (Ah, k) ; int64_t pA_start, pA_end ; GB_get_pA (&pA_start, &pA_end, taskid, k, kfirst, klast, pstart_Aslice, Ap, vlen) ; int64_t pC_start = j * vlen ; // traverse over A(:,j), the kth vector of A for (int64_t pA = pA_start ; pA < pA_end ; pA++) { int64_t i = Ai [pA] ; int64_t p = pC_start + i ; int8_t c = Cb [p] ; if (c == 1) { // C (i,j) = A (i,j) + B (i,j) GB_GETA (aij, Ax, pA) ; GB_GETB (bij, Bx, p) ; GB_BINOP (GB_CX (p), aij, bij, i, j) ; } else if (c == 0) { // C (i,j) = A (i,j) GB_COPY_A_TO_C (GB_CX (p), Ax, pA) ; Cb [p] = 1 ; task_cnvals++ ; } } } cnvals += task_cnvals ; } } //--------------------------------------------------------------------- // clear M from C //--------------------------------------------------------------------- if (!M_cleared) { // This step is required if either A or B are sparse/hyper (if // one is sparse/hyper, the other must be bitmap). It requires // an extra pass over the mask M, so this might be slower than // postponing the application of the mask, and doing it later. #pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < M_ntasks ; taskid++) { int64_t kfirst = kfirst_Mslice [taskid] ; int64_t klast = klast_Mslice [taskid] ; for (int64_t k = kfirst ; k <= klast ; k++) { // find the part of M(:,k) for this task int64_t j = GBH (Mh, k) ; int64_t pM_start, pM_end ; GB_get_pA (&pM_start, &pM_end, taskid, k, kfirst, klast, pstart_Mslice, Mp, vlen) ; int64_t pC_start = j * vlen ; // traverse over M(:,j), the kth vector of M for (int64_t pM = pM_start ; pM < pM_end ; pM++) { // mark C(i,j) if M(i,j) is true bool mij = GB_mcast (Mx, pM, msize) ; if (mij) { int64_t i = Mi [pM] ; int64_t p = pC_start + i ; Cb [p] = 0 ; } } } } } } else { //---------------------------------------------------------------------- // C is bitmap; M is bitmap or full //---------------------------------------------------------------------- // ------------------------------------------ // C <M> = A + B // ------------------------------------------ // bitmap bitmap sparse bitmap // bitmap bitmap sparse full // bitmap bitmap bitmap sparse // bitmap bitmap bitmap bitmap // bitmap bitmap bitmap full // bitmap bitmap full sparse // bitmap bitmap full bitmap // bitmap bitmap full full // ------------------------------------------ // C <M> = A + B // ------------------------------------------ // bitmap full sparse bitmap // bitmap full sparse full // bitmap full bitmap sparse // bitmap full bitmap bitmap // bitmap full bitmap full // bitmap full full sparse // bitmap full full bitmap // bitmap full full full // ------------------------------------------ // C <!M> = A + B // ------------------------------------------ // bitmap bitmap sparse sparse // bitmap bitmap sparse bitmap // bitmap bitmap sparse full // bitmap bitmap bitmap sparse // bitmap bitmap bitmap bitmap // bitmap bitmap bitmap full // bitmap bitmap full sparse // bitmap bitmap full bitmap // bitmap bitmap full full // ------------------------------------------ // C <!M> = A + B // ------------------------------------------ // bitmap full sparse sparse // bitmap full sparse bitmap // bitmap full sparse full // bitmap full bitmap sparse // bitmap full bitmap bitmap // bitmap full bitmap full // bitmap full full sparse // bitmap full full bitmap // bitmap full full full ASSERT (M_is_bitmap || M_is_full) ; ASSERT (A_is_bitmap || A_is_full || B_is_bitmap || B_is_full) ; #undef GB_GET_MIJ #define GB_GET_MIJ(p) \ bool mij = GBB (Mb, p) && GB_mcast (Mx, p, msize) ; \ if (Mask_comp) mij = !mij ; if ((A_is_bitmap || A_is_full) && (B_is_bitmap || B_is_full)) { //------------------------------------------------------------------ // Method27: C is bitmap; M, A, and B are bitmap or full //------------------------------------------------------------------ int tid ; #pragma omp parallel for num_threads(C_nthreads) schedule(static) \ reduction(+:cnvals) for (tid = 0 ; tid < C_nthreads ; tid++) { int64_t pstart, pend, task_cnvals = 0 ; GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ; for (int64_t p = pstart ; p < pend ; p++) { GB_GET_MIJ (p) ; if (mij) { // M(i,j) is true, so C(i,j) can be computed int8_t a = GBB (Ab, p) ; int8_t b = GBB (Bb, p) ; int8_t c = 0 ; if (a && b) { // C (i,j) = A (i,j) + B (i,j) GB_GETA (aij, Ax, p) ; GB_GETB (bij, Bx, p) ; GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ; c = 1 ; } else if (b) { // C (i,j) = B (i,j) GB_COPY_B_TO_C (GB_CX (p), Bx, p) ; c = 1 ; } else if (a) { // C (i,j) = A (i,j) GB_COPY_A_TO_C (GB_CX (p), Ax, p) ; c = 1 ; } Cb [p] = c ; task_cnvals += c ; } else { // M(i,j) == 1, so C(i,j) is not computed Cb [p] = 0 ; } } cnvals += task_cnvals ; } } else if (A_is_bitmap || A_is_full) { //------------------------------------------------------------------ // Method28: C bitmap; M and A bitmap or full; B sparse or hyper //------------------------------------------------------------------ int tid ; #pragma omp parallel for num_threads(C_nthreads) schedule(static) \ reduction(+:cnvals) for (tid = 0 ; tid < C_nthreads ; tid++) { int64_t pstart, pend, task_cnvals = 0 ; GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ; for (int64_t p = pstart ; p < pend ; p++) { GB_GET_MIJ (p) ; if (mij) { // C (i,j) = A (i,j) int8_t a = GBB (Ab, p) ; if (a) GB_COPY_A_TO_C (GB_CX (p), Ax, p) ; Cb [p] = a ; task_cnvals += a ; } else { Cb [p] = 0 ; } } cnvals += task_cnvals ; } GB_SLICE_MATRIX (B, 8) ; #pragma omp parallel for num_threads(B_nthreads) \ schedule(dynamic,1) reduction(+:cnvals) for (taskid = 0 ; taskid < B_ntasks ; taskid++) { int64_t kfirst = kfirst_Bslice [taskid] ; int64_t klast = klast_Bslice [taskid] ; int64_t task_cnvals = 0 ; for (int64_t k = kfirst ; k <= klast ; k++) { // find the part of B(:,k) for this task int64_t j = GBH (Bh, k) ; int64_t pB_start, pB_end ; GB_get_pA (&pB_start, &pB_end, taskid, k, kfirst, klast, pstart_Bslice, Bp, vlen) ; int64_t pC_start = j * vlen ; // traverse over B(:,j), the kth vector of B for (int64_t pB = pB_start ; pB < pB_end ; pB++) { int64_t i = Bi [pB] ; int64_t p = pC_start + i ; GB_GET_MIJ (p) ; if (mij) { int8_t c = Cb [p] ; if (c == 1) { // C (i,j) = A (i,j) + B (i,j) GB_GETA (aij, Ax, p) ; GB_GETB (bij, Bx, pB) ; GB_BINOP (GB_CX (p), aij, bij, i, j) ; } else { // C (i,j) = B (i,j) GB_COPY_B_TO_C (GB_CX (p), Bx, pB) ; Cb [p] = 1 ; task_cnvals++ ; } } } } cnvals += task_cnvals ; } } else { //------------------------------------------------------------------ // Method29: C bitmap; M and B bitmap or full; A sparse or hyper //------------------------------------------------------------------ int tid ; #pragma omp parallel for num_threads(C_nthreads) schedule(static) \ reduction(+:cnvals) for (tid = 0 ; tid < C_nthreads ; tid++) { int64_t pstart, pend, task_cnvals = 0 ; GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ; for (int64_t p = pstart ; p < pend ; p++) { GB_GET_MIJ (p) ; if (mij) { // C (i,j) = B (i,j) int8_t b = GBB (Bb, p) ; if (b) GB_COPY_B_TO_C (GB_CX (p), Bx, p) ; Cb [p] = b ; task_cnvals += b ; } else { Cb [p] = 0 ; } } cnvals += task_cnvals ; } GB_SLICE_MATRIX (A, 8) ; #pragma omp parallel for num_threads(A_nthreads) \ schedule(dynamic,1) reduction(+:cnvals) for (taskid = 0 ; taskid < A_ntasks ; taskid++) { int64_t kfirst = kfirst_Aslice [taskid] ; int64_t klast = klast_Aslice [taskid] ; int64_t task_cnvals = 0 ; for (int64_t k = kfirst ; k <= klast ; k++) { // find the part of A(:,k) for this task int64_t j = GBH (Ah, k) ; int64_t pA_start, pA_end ; GB_get_pA (&pA_start, &pA_end, taskid, k, kfirst, klast, pstart_Aslice, Ap, vlen) ; int64_t pC_start = j * vlen ; // traverse over A(:,j), the kth vector of A for (int64_t pA = pA_start ; pA < pA_end ; pA++) { int64_t i = Ai [pA] ; int64_t p = pC_start + i ; GB_GET_MIJ (p) ; if (mij) { int8_t c = Cb [p] ; if (c == 1) { // C (i,j) = A (i,j) + B (i,j) GB_GETA (aij, Ax, pA) ; GB_GETB (bij, Bx, p) ; GB_BINOP (GB_CX (p), aij, bij, i, j) ; } else { // C (i,j) = A (i,j) GB_COPY_A_TO_C (GB_CX (p), Ax, pA) ; Cb [p] = 1 ; task_cnvals++ ; } } } } cnvals += task_cnvals ; } } } C->nvals = cnvals ; }
nco_s1d.c
/* $Header$ */ /* Purpose: NCO utilities for Sparse-1D (S1D) datasets */ /* Copyright (C) 2020--present Charlie Zender This file is part of NCO, the netCDF Operators. NCO is free software. You may redistribute and/or modify NCO under the terms of the 3-Clause BSD License with exceptions described in the LICENSE file */ #include "nco_s1d.h" /* Sparse-1D datasets */ const char * /* O [sng] String describing sparse-type */ nco_s1d_sng /* [fnc] Convert sparse-1D type enum to string */ (const nco_s1d_typ_enm nco_s1d_typ) /* I [enm] Sparse-1D type enum */ { /* Purpose: Convert sparse-type enum to string */ switch(nco_s1d_typ){ case nco_s1d_clm: return "Sparse Column (cols1d) format"; case nco_s1d_grd: return "Sparse Gridcell (grid1d) format"; case nco_s1d_lnd: return "Sparse Landunit (land1d) format"; case nco_s1d_pft: return "Sparse PFT (pfts1d) format" ; default: nco_dfl_case_generic_err(); break; } /* !nco_s1d_typ_enm */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* !nco_s1d_sng() */ int /* O [rcd] Return code */ nco_s1d_unpack /* [fnc] Unpack sparse-1D CLM/ELM variables into full file */ (rgr_sct * const rgr, /* I/O [sct] Regridding structure */ trv_tbl_sct * const trv_tbl) /* I/O [sct] Traversal Table */ { /* Purpose: Read sparse CLM/ELM input file, inflate and write into output file */ /* Usage: ncks -D 1 -O -C --s1d ~/data/bm/elm_mali_bg_hst.nc ~/foo.nc ncks -D 1 -O -C --s1d -v cols1d_topoglc --hrz=${DATA}/bm/elm_mali_ig_hst.nc ${DATA}/bm/elm_mali_rst.nc ~/foo.nc ncks -D 1 -O -C --s1d -v GPP,pfts1d_wtgcell ~/beth_in.nc ~/foo.nc ncremap --dbg=1 --vrb=3 --devnull=No --nco='--dbg=1' -P elm -m ${DATA}/maps/map_ne30np4_to_fv128x256_aave.20160301.nc ~/foo.nc ~/foo_rgr.nc */ const char fnc_nm[]="nco_s1d_unpack()"; /* [sng] Function name */ char *fl_in; char *fl_out; char *fl_tpl; /* [sng] Template file (contains horizontal grid) */ char dmn_nm[NC_MAX_NAME]; /* [sng] Dimension name */ char *grd_nm_in=(char *)strdup("gridcell"); char *lnd_nm_in=(char *)strdup("landunit"); char *clm_nm_in=(char *)strdup("column"); char *pft_nm_in=(char *)strdup("pft"); char *mec_nm_out=(char *)strdup("mec"); int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */ int fll_md_old; /* [enm] Old fill mode */ int in_id; /* I [id] Input netCDF file ID */ int md_open; /* [enm] Mode flag for nc_open() call */ int out_id; /* I [id] Output netCDF file ID */ int rcd=NC_NOERR; int tpl_id; /* [id] Input netCDF file ID (for horizontal grid template) */ long int clm_idx; long int grd_idx_out; long int idx_out; //long int lat_idx; //long int lon_idx; long int pft_idx; int dmn_idx; /* [idx] Dimension index */ /* Initialize local copies of command-line values */ dfl_lvl=rgr->dfl_lvl; fl_in=rgr->fl_in; fl_out=rgr->fl_out; in_id=rgr->in_id; out_id=rgr->out_id; /* Search for horizontal grid */ char *bnd_nm_in=rgr->bnd_nm; /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */ char *col_nm_in=rgr->col_nm_in; /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */ char *lat_nm_in=rgr->lat_nm_in; /* [sng] Name of input dimension to recognize as latitude */ char *lon_nm_in=rgr->lon_nm_in; /* [sng] Name of input dimension to recognize as longitude */ int dmn_id_bnd_in=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_col_in=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_lat_in=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_lon_in=NC_MIN_INT; /* [id] Dimension ID */ nco_bool FL_RTR_RMT_LCN; nco_bool flg_grd_1D=False; /* [flg] Unpacked data are on unstructured (1D) grid */ nco_bool flg_grd_2D=False; /* [flg] Unpacked data are on rectangular (2D) grid */ nco_bool flg_grd_dat=False; /* [flg] Use horizontal grid from required input data file */ nco_bool flg_grd_tpl=False; /* [flg] Use horizontal grid from optional horizontal grid template file */ nco_bool flg_nm_hst=False; /* [flg] Names in data file are as in history files ("ltype_"...) */ nco_bool flg_nm_rst=False; /* [flg] Names in data file are as in restart files ("ilun_"...) */ /* Does data file have unstructured grid? MB: Routine must handle two semantically distinct meanings of "column": 1. The horizontal dimension in an unstructured grid 2. A fraction of a landunit, which is a fraction of a CTSM/ELM gridcell In particular, a column is a fraction of a vegetated, urban, glacier, or crop landunit This routine distinguishes these meanings by abbreviating (1) as "col" and (2) as "clm" This usage maintains the precedent that "col" is the horizontal unstructured dimension in nco_rgr.c It is necessary though unintuitive that "cols1d" variable metadata will use the "clm" abbreviation */ if(col_nm_in && (rcd=nco_inq_dimid_flg(in_id,col_nm_in,&dmn_id_col_in)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(in_id,"lndgrid",&dmn_id_col_in)) == NC_NOERR) col_nm_in=strdup("lndgrid"); /* CLM */ if(dmn_id_col_in != NC_MIN_INT) flg_grd_1D=True; /* Does data file have RLL grid? */ if(!flg_grd_1D){ if(lat_nm_in && (rcd=nco_inq_dimid_flg(in_id,lat_nm_in,&dmn_id_lat_in)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(in_id,"latitude",&dmn_id_lat_in)) == NC_NOERR) lat_nm_in=strdup("lndgrid"); /* CF */ if(lon_nm_in && (rcd=nco_inq_dimid_flg(in_id,lon_nm_in,&dmn_id_lon_in)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(in_id,"longitude",&dmn_id_lon_in)) == NC_NOERR) lon_nm_in=strdup("lndgrid"); /* CF */ } /* !flg_grd_1D */ if(dmn_id_lat_in != NC_MIN_INT && dmn_id_lon_in != NC_MIN_INT) flg_grd_2D=True; /* Set where to obtain horizontal grid */ if(flg_grd_1D || flg_grd_2D) flg_grd_dat=True; else flg_grd_tpl=True; if(flg_grd_tpl && !rgr->fl_hrz){ (void)fprintf(stderr,"%s: ERROR %s did not locate horizontal grid in input data file and no optional horizontal gridfile was provided.\nHINT: Use option --hrz to specify file with horizontal grid used by input data.\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !flg_grd_tpl */ /* Open grid template file iff necessary */ if(flg_grd_tpl && rgr->fl_hrz){ char *fl_pth_lcl=NULL; nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ /* Duplicate (because nco_fl_mk_lcl() free()'s its fl_in) */ fl_tpl=(char *)strdup(rgr->fl_hrz); /* Make sure file is on local system and is readable or die trying */ fl_tpl=nco_fl_mk_lcl(fl_tpl,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; rcd+=nco_fl_open(fl_tpl,md_open,&bfr_sz_hnt,&tpl_id); /* Same logic used to search for grid in data file and to search for grid in template file... Does template file have unstructured grid? */ if(col_nm_in && (rcd=nco_inq_dimid_flg(tpl_id,col_nm_in,&dmn_id_col_in)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(tpl_id,"lndgrid",&dmn_id_col_in)) == NC_NOERR) col_nm_in=strdup("lndgrid"); /* CLM */ if(dmn_id_col_in != NC_MIN_INT) flg_grd_1D=True; /* Does template file have RLL grid? */ if(!flg_grd_1D){ if(lat_nm_in && (rcd=nco_inq_dimid_flg(tpl_id,lat_nm_in,&dmn_id_lat_in)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(tpl_id,"latitude",&dmn_id_lat_in)) == NC_NOERR) lat_nm_in=strdup("lndgrid"); /* CF */ if(lon_nm_in && (rcd=nco_inq_dimid_flg(tpl_id,lon_nm_in,&dmn_id_lon_in)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(tpl_id,"longitude",&dmn_id_lon_in)) == NC_NOERR) lon_nm_in=strdup("lndgrid"); /* CF */ } /* !flg_grd_1D */ if(dmn_id_lat_in != NC_MIN_INT && dmn_id_lon_in != NC_MIN_INT) flg_grd_2D=True; /* Set where to obtain horizontal grid */ if(!flg_grd_1D && !flg_grd_2D){ (void)fprintf(stderr,"%s: ERROR %s did not locate horizontal grid in input data file %s or in template file %s.\nHINT: One of those files must contain the grid dimensions and coordinates used by the packed data in the input data file.\n",nco_prg_nm_get(),fnc_nm,fl_in,fl_tpl); nco_exit(EXIT_FAILURE); } /* !flg_grd_1D */ } /* !flg_grd_tpl */ int cols1d_gridcell_index_id=NC_MIN_INT; /* [id] Gridcell index of column */ int cols1d_ixy_id=NC_MIN_INT; /* [id] Column 2D longitude index */ int cols1d_jxy_id=NC_MIN_INT; /* [id] Column 2D latitude index */ int cols1d_lat_id=NC_MIN_INT; /* [id] Column latitude */ int cols1d_lon_id=NC_MIN_INT; /* [id] Column longitude */ int cols1d_ityp_id=NC_MIN_INT; /* [id] Column type */ int cols1d_ityplun_id=NC_MIN_INT; /* [id] Column landunit type */ int grid1d_ixy_id=NC_MIN_INT; /* [id] Gridcell 2D longitude index */ int grid1d_jxy_id=NC_MIN_INT; /* [id] Gridcell 2D latitude index */ int grid1d_lat_id=NC_MIN_INT; /* [id] Gridcell latitude */ int grid1d_lon_id=NC_MIN_INT; /* [id] Gridcell longitude */ int land1d_gridcell_index_id=NC_MIN_INT; /* [id] Gridcell index of landunit */ int land1d_ixy_id=NC_MIN_INT; /* [id] Landunit 2D longitude index */ int land1d_jxy_id=NC_MIN_INT; /* [id] Landunit 2D latitude index */ int land1d_lat_id=NC_MIN_INT; /* [id] Landunit latitude */ int land1d_lon_id=NC_MIN_INT; /* [id] Landunit longitude */ int pfts1d_column_index_id=NC_MIN_INT; /* [id] Column index of PFT */ int pfts1d_gridcell_index_id=NC_MIN_INT; /* [id] Gridcell index of PFT */ int pfts1d_ityp_veg_id=NC_MIN_INT; /* [id] PFT vegetation type */ int pfts1d_ityplun_id=NC_MIN_INT; /* [id] PFT landunit type */ int pfts1d_ixy_id=NC_MIN_INT; /* [id] PFT 2D longitude index */ int pfts1d_jxy_id=NC_MIN_INT; /* [id] PFT 2D latitude index */ int pfts1d_lat_id=NC_MIN_INT; /* [id] PFT latitude */ int pfts1d_lon_id=NC_MIN_INT; /* [id] PFT longitude */ //int pfts1d_wtgcell_id=NC_MIN_INT; /* [id] PFT weight relative to corresponding gridcell */ int dmn_id_clm_in=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_grd_in=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_lnd_in=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_pft_in=NC_MIN_INT; /* [id] Dimension ID */ nco_bool flg_s1d_clm=False; /* [flg] Dataset contains sparse variables for columns */ nco_bool flg_s1d_grd=False; /* [flg] Dataset contains sparse variables for gridcells */ nco_bool flg_s1d_lnd=False; /* [flg] Dataset contains sparse variables for landunits */ nco_bool flg_s1d_pft=False; /* [flg] Dataset contains sparse variables for PFTs */ rcd=nco_inq_att_flg(in_id,NC_GLOBAL,"ilun_vegetated_or_bare_soil",(nc_type *)NULL,(long *)NULL); if(rcd == NC_NOERR) flg_nm_rst=True; rcd=nco_inq_att_flg(in_id,NC_GLOBAL,"ltype_vegetated_or_bare_soil",(nc_type *)NULL,(long *)NULL); if(rcd == NC_NOERR) flg_nm_hst=True; assert(!(flg_nm_hst && flg_nm_rst)); if(!flg_nm_hst && !flg_nm_rst){ (void)fprintf(stderr,"%s: ERROR %s reports input data file lacks expected global attributes\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !flg_nm_hst */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s will assume input attributes and variables use CLM/ELM %s naming conventions like %s\n",nco_prg_nm_get(),fnc_nm,flg_nm_hst ? "history file" : "restart file",flg_nm_hst ? "\"ltype_...\"" : "\"ilun_...\""); rcd=nco_inq_varid_flg(in_id,"cols1d_lat",&cols1d_lat_id); if(cols1d_lat_id != NC_MIN_INT) flg_s1d_clm=True; if(flg_s1d_clm){ rcd=nco_inq_varid(in_id,"cols1d_ixy",&cols1d_ixy_id); rcd=nco_inq_varid(in_id,"cols1d_jxy",&cols1d_jxy_id); rcd=nco_inq_varid(in_id,"cols1d_lon",&cols1d_lon_id); rcd=nco_inq_varid_flg(in_id,"cols1d_gridcell_index",&cols1d_gridcell_index_id); /* ELM/MALI restart */ rcd=nco_inq_varid_flg(in_id,"cols1d_ityp",&cols1d_ityp_id); /* ELM/MALI restart */ if(flg_nm_hst) rcd=nco_inq_varid(in_id,"cols1d_itype_lunit",&cols1d_ityplun_id); else rcd=nco_inq_varid(in_id,"cols1d_ityplun",&cols1d_ityplun_id); } /* !flg_s1d_clm */ rcd=nco_inq_varid_flg(in_id,"grid1d_lat",&grid1d_lat_id); if(grid1d_lat_id != NC_MIN_INT) flg_s1d_grd=True; if(flg_s1d_grd){ rcd=nco_inq_varid(in_id,"grid1d_ixy",&grid1d_ixy_id); rcd=nco_inq_varid(in_id,"grid1d_jxy",&grid1d_jxy_id); rcd=nco_inq_varid(in_id,"grid1d_lon",&grid1d_lon_id); } /* !flg_s1d_grd */ rcd=nco_inq_varid_flg(in_id,"land1d_lat",&land1d_lat_id); if(land1d_lat_id != NC_MIN_INT) flg_s1d_lnd=True; if(flg_s1d_lnd){ rcd=nco_inq_varid_flg(in_id,"land1d_gridcell_index",&land1d_gridcell_index_id); rcd=nco_inq_varid(in_id,"land1d_ixy",&land1d_ixy_id); rcd=nco_inq_varid(in_id,"land1d_jxy",&land1d_jxy_id); rcd=nco_inq_varid(in_id,"land1d_lon",&land1d_lon_id); } /* !flg_s1d_lnd */ rcd=nco_inq_varid_flg(in_id,"pfts1d_lat",&pfts1d_lat_id); if(pfts1d_lat_id != NC_MIN_INT) flg_s1d_pft=True; if(flg_s1d_pft){ rcd=nco_inq_varid(in_id,"pfts1d_ixy",&pfts1d_ixy_id); rcd=nco_inq_varid(in_id,"pfts1d_jxy",&pfts1d_jxy_id); rcd=nco_inq_varid(in_id,"pfts1d_lon",&pfts1d_lon_id); rcd=nco_inq_varid_flg(in_id,"pfts1d_column_index",&pfts1d_column_index_id); rcd=nco_inq_varid_flg(in_id,"pfts1d_gridcell_index",&pfts1d_gridcell_index_id); //if(flg_nm_hst) rcd=nco_inq_varid(in_id,"pfts1d_wtgcell",&pfts1d_wtgcell_id); else rcd=nco_inq_varid(in_id,"pfts1d_wtxy",&pfts1d_wtgcell_id); if(flg_nm_hst) rcd=nco_inq_varid(in_id,"pfts1d_itype_lunit",&pfts1d_ityplun_id); else rcd=nco_inq_varid(in_id,"pfts1d_ityplun",&pfts1d_ityplun_id); if(flg_nm_hst) rcd=nco_inq_varid(in_id,"pfts1d_itype_veg",&pfts1d_ityp_veg_id); else rcd=nco_inq_varid(in_id,"pfts1d_itypveg",&pfts1d_ityp_veg_id); } /* !flg_s1d_pft */ if(!(flg_s1d_clm || flg_s1d_lnd || flg_s1d_pft)){ (void)fprintf(stderr,"%s: ERROR %s does not detect any of the key variables (currently cols1d_lat, land1d_lat, pfts1d_lat) used to indicate presence of sparse-packed (S1D) variables\nHINT: Be sure the target dataset (file) contains S1D variables---not all CLM/ELM history (as opposed to restart) files do\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !flg_s1d_clm... */ if(flg_s1d_clm) rcd=nco_inq_dimid(in_id,clm_nm_in,&dmn_id_clm_in); if(flg_s1d_grd) rcd=nco_inq_dimid(in_id,grd_nm_in,&dmn_id_grd_in); if(flg_s1d_lnd) rcd=nco_inq_dimid(in_id,lnd_nm_in,&dmn_id_lnd_in); if(flg_s1d_pft) rcd=nco_inq_dimid(in_id,pft_nm_in,&dmn_id_pft_in); if(nco_dbg_lvl_get() >= nco_dbg_std){ (void)fprintf(stderr,"%s: INFO %s necessary information to unpack cols1d variables\n",nco_prg_nm_get(),flg_s1d_clm ? "Found all" : "Could not find"); (void)fprintf(stderr,"%s: INFO %s necessary information to unpack land1d variables\n",nco_prg_nm_get(),flg_s1d_lnd ? "Found all" : "Could not find"); (void)fprintf(stderr,"%s: INFO %s necessary information to unpack pfts1d variables\n",nco_prg_nm_get(),flg_s1d_pft ? "Found all" : "Could not find"); } /* !dbg */ /* Collect other information from data and template files */ int dmn_nbr_in; /* [nbr] Number of dimensions in input file */ int dmn_nbr_out; /* [nbr] Number of dimensions in output file */ int var_nbr; /* [nbr] Number of variables in file */ rcd=nco_inq(in_id,&dmn_nbr_in,&var_nbr,(int *)NULL,(int *)NULL); const unsigned int trv_nbr=trv_tbl->nbr; /* [idx] Number of traversal table entries */ int var_cpy_nbr=0; /* [nbr] Number of copied variables */ int var_rgr_nbr=0; /* [nbr] Number of unpacked variables */ int var_xcl_nbr=0; /* [nbr] Number of deleted variables */ int var_crt_nbr=0; /* [nbr] Number of created variables */ //long idx; /* [idx] Generic index */ unsigned int idx_tbl; /* [idx] Counter for traversal table */ char *dmn_nm_cp; /* [sng] Dimension name as char * to reduce indirection */ nco_bool has_clm; /* [flg] Contains column dimension */ nco_bool has_grd; /* [flg] Contains gridcell dimension */ nco_bool has_lnd; /* [flg] Contains landunit dimension */ nco_bool has_pft; /* [flg] Contains PFT dimension */ nco_bool need_clm=False; /* [flg] At least one variable to unpack needs column dimension */ nco_bool need_grd=False; /* [flg] At least one variable to unpack needs gridcell dimension */ nco_bool need_lnd=False; /* [flg] At least one variable to unpack needs landunit dimension */ // nco_bool need_mec=False; /* [flg] At least one variable to unpack needs MEC dimension */ nco_bool need_pft=False; /* [flg] At least one variable to unpack needs PFT dimension */ trv_sct trv; /* [sct] Traversal table object structure to reduce indirection */ /* Define unpacking flag for each variable */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ dmn_nbr_in=trv_tbl->lst[idx_tbl].nbr_dmn; has_clm=has_grd=has_lnd=has_pft=False; for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ /* Pre-determine flags necessary during next loop */ dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm; if(!has_clm && clm_nm_in) has_clm=!strcmp(dmn_nm_cp,clm_nm_in); if(!has_grd && grd_nm_in) has_grd=!strcmp(dmn_nm_cp,grd_nm_in); if(!has_lnd && lnd_nm_in) has_lnd=!strcmp(dmn_nm_cp,lnd_nm_in); if(!has_pft && pft_nm_in) has_pft=!strcmp(dmn_nm_cp,pft_nm_in); } /* !dmn_idx */ /* Unpack variables that contain a sparse-1D dimension */ if(has_clm || has_grd || has_lnd || has_pft){ trv_tbl->lst[idx_tbl].flg_rgr=True; var_rgr_nbr++; if(has_clm) need_clm=True; if(has_grd) need_grd=True; if(has_lnd) need_lnd=True; if(has_pft) need_pft=True; } /* endif */ /* Copy all variables that are not regridded or omitted */ if(!trv_tbl->lst[idx_tbl].flg_rgr) var_cpy_nbr++; } /* end nco_obj_typ_var */ } /* end idx_tbl */ if(!var_rgr_nbr) (void)fprintf(stdout,"%s: WARNING %s reports no variables fit unpacking criteria. The sparse data unpacker expects at least one variable to unpack, and variables not unpacked are copied straight to output. HINT: If the name(s) of the input sparse-1D dimensions (e.g., \"column\", \"landunit\", and \"pft\") do not match NCO's preset defaults (case-insensitive unambiguous forms and abbreviations of \"column\", \"landunit\", and/or \"pft\", respectively) then change the dimension names that NCO looks for. Instructions are at http://nco.sf.net/nco.html#sparse. For CTSM/ELM sparse-1D coordinate grids, the \"column\", \"landunit\", and \"pft\" variable names can be set with, e.g., \"ncks --rgr column_nm=clm#landunit_nm=lnd#pft_nm=pft\" or \"ncremap -R '--rgr clm=clm#lnd=lnd#pft=pft'\".\n",nco_prg_nm_get(),fnc_nm); if(nco_dbg_lvl_get() >= nco_dbg_fl){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr) (void)fprintf(stderr,"Unpack %s? %s\n",trv.nm,trv.flg_rgr ? "Yes" : "No"); } /* end idx_tbl */ } /* end dbg */ long clm_nbr_in=NC_MIN_INT; /* [nbr] Number of columns in input data */ long grd_nbr_in=NC_MIN_INT; /* [nbr] Number of gridcells in input data */ long lnd_nbr_in=NC_MIN_INT; /* [nbr] Number of landunits in input data */ long pft_nbr_in=NC_MIN_INT; /* [nbr] Number of PFTs in input data */ long clm_nbr_out=NC_MIN_INT; /* [nbr] Number of columns in output data */ long grd_nbr_out=NC_MIN_INT; /* [nbr] Number of gridcells in output data */ long lnd_nbr_out=NC_MIN_INT; /* [nbr] Number of landunits in output data */ long mec_nbr_out=NC_MIN_INT; /* [nbr] Number of MECs in output data */ long pft_nbr_out=NC_MIN_INT; /* [nbr] Number of PFTs in output data */ if(need_clm) rcd=nco_inq_dimlen(in_id,dmn_id_clm_in,&clm_nbr_in); if(need_grd) rcd=nco_inq_dimlen(in_id,dmn_id_grd_in,&grd_nbr_in); if(need_lnd) rcd=nco_inq_dimlen(in_id,dmn_id_lnd_in,&lnd_nbr_in); if(need_pft) rcd=nco_inq_dimlen(in_id,dmn_id_pft_in,&pft_nbr_in); int hrz_id; /* [id] Horizontal grid netCDF file ID */ long bnd_nbr=int_CEWI; /* [nbr] Number of boundaries for output time and rectangular grid coordinates, and number of vertices for output non-rectangular grid coordinates */ long col_nbr; /* [nbr] Number of columns */ long lon_nbr; /* [nbr] Number of longitudes */ long lat_nbr; /* [nbr] Number of latitudes */ size_t grd_sz_in; /* [nbr] Number of elements in single layer of input grid */ size_t grd_sz_out; /* [nbr] Number of elements in single layer of output grid */ if(flg_grd_dat) hrz_id=in_id; else hrz_id=tpl_id; /* Locate bounds dimension, if any, in file containing horizontal grid */ if(bnd_nm_in && (rcd=nco_inq_dimid_flg(hrz_id,bnd_nm_in,&dmn_id_bnd_in)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(hrz_id,"nv",&dmn_id_bnd_in)) == NC_NOERR) bnd_nm_in=strdup("nv"); /* fxm */ else if((rcd=nco_inq_dimid_flg(hrz_id,"nvertices",&dmn_id_bnd_in)) == NC_NOERR) bnd_nm_in=strdup("nvertices"); /* CICE */ else if((rcd=nco_inq_dimid_flg(hrz_id,"maxEdges",&dmn_id_bnd_in)) == NC_NOERR) bnd_nm_in=strdup("maxEdges"); /* MPAS */ if(flg_grd_1D) rcd=nco_inq_dimlen(hrz_id,dmn_id_col_in,&col_nbr); if(flg_grd_2D){ rcd=nco_inq_dimlen(hrz_id,dmn_id_lat_in,&lat_nbr); rcd=nco_inq_dimlen(hrz_id,dmn_id_lon_in,&lon_nbr); } /* !flg_grd_2D */ if(dmn_id_bnd_in != NC_MIN_INT) rcd=nco_inq_dimlen(hrz_id,dmn_id_bnd_in,&bnd_nbr); if(grd_nbr_in != NC_MIN_INT){ grd_sz_in=grd_nbr_in; }else{ grd_sz_in= flg_grd_1D ? col_nbr : lat_nbr*lon_nbr; } /* !grd_nbr_in */ grd_sz_out= flg_grd_1D ? col_nbr : lat_nbr*lon_nbr; /* Lay-out unpacked file */ char *bnd_nm_out=NULL; char *col_nm_out=NULL; char *lat_nm_out=NULL; char *lon_nm_out=NULL; char *lat_dmn_nm_out; char *lon_dmn_nm_out; int dmn_id_bnd_out=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_col_out=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_lat_out=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_lon_out=NC_MIN_INT; /* [id] Dimension ID */ if(rgr->bnd_nm) bnd_nm_out=rgr->bnd_nm; else bnd_nm_out=bnd_nm_in; if(rgr->col_nm_out) col_nm_out=rgr->col_nm_out; else col_nm_out=col_nm_in; if(rgr->lat_dmn_nm) lat_dmn_nm_out=rgr->lat_dmn_nm; else lat_dmn_nm_out=lat_nm_in; if(rgr->lon_dmn_nm) lon_dmn_nm_out=rgr->lon_dmn_nm; else lon_dmn_nm_out=lon_nm_in; if(rgr->lat_nm_out) lat_nm_out=rgr->lat_nm_out; else lat_nm_out=lat_nm_in; if(rgr->lon_nm_out) lon_nm_out=rgr->lon_nm_out; else lon_nm_out=lon_nm_in; /* Define horizontal dimensions before all else */ if(flg_grd_1D){ rcd=nco_def_dim(out_id,col_nm_out,col_nbr,&dmn_id_col_out); } /* !flg_grd_1D */ if(flg_grd_2D){ rcd=nco_def_dim(out_id,lat_nm_out,lat_nbr,&dmn_id_lat_out); rcd=nco_def_dim(out_id,lon_nm_out,lon_nbr,&dmn_id_lon_out); } /* !flg_grd_2D */ if(dmn_id_bnd_in != NC_MIN_INT) rcd=nco_def_dim(out_id,bnd_nm_out,bnd_nbr,&dmn_id_bnd_out); char *clm_nm_out=NULL; char *grd_nm_out=NULL; char *lnd_nm_out=NULL; char *pft_nm_out=NULL; if(need_clm) clm_nm_out=(char *)strdup(clm_nm_in); if(need_grd) grd_nm_out=(char *)strdup(grd_nm_in); if(need_lnd) lnd_nm_out=(char *)strdup(lnd_nm_in); if(need_pft) pft_nm_out=(char *)strdup(pft_nm_in); int dmn_id_clm_out=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_lnd_out=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_mec_out=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_pft_out=NC_MIN_INT; /* [id] Dimension ID */ /* fxm: make an ilun enumerated type? */ int ilun_vegetated_or_bare_soil; /* 1 [enm] */ int ilun_crop; /* 2 [enm] */ int ilun_landice; /* 3 [enm] */ int ilun_landice_multiple_elevation_classes; /* 4 [enm] */ int ilun_deep_lake; /* 5 [enm] */ int ilun_wetland; /* 6 [enm] */ int ilun_urban_tbd; /* 7 [enm] */ int ilun_urban_hd; /* 8 [enm] */ int ilun_urban_md; /* 9 [enm] */ if(flg_nm_hst){ rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_vegetated_or_bare_soil",&ilun_vegetated_or_bare_soil,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_crop",&ilun_crop,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_landice",&ilun_landice,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_landice_multiple_elevation_classes",&ilun_landice_multiple_elevation_classes,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_deep_lake",&ilun_deep_lake,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_wetland",&ilun_wetland,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_urban_tbd",&ilun_urban_tbd,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_urban_hd",&ilun_urban_hd,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_urban_md",&ilun_urban_md,NC_INT); }else{ /* !flg_nm_hst */ rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_vegetated_or_bare_soil",&ilun_vegetated_or_bare_soil,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_crop",&ilun_crop,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_landice",&ilun_landice,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_landice_multiple_elevation_classes",&ilun_landice_multiple_elevation_classes,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_deep_lake",&ilun_deep_lake,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_wetland",&ilun_wetland,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_urban_tbd",&ilun_urban_tbd,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_urban_hd",&ilun_urban_hd,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_urban_md",&ilun_urban_md,NC_INT); } /* !flg_nm_hst */ /* Determine output Column dimension if needed */ int *cols1d_ityp=NULL; /* [id] Column type */ int *cols1d_ityplun=NULL; /* [id] Column landunit type */ if(need_clm){ if(cols1d_ityp_id != NC_MIN_INT) cols1d_ityp=(int *)nco_malloc(clm_nbr_in*sizeof(int)); cols1d_ityplun=(int *)nco_malloc(clm_nbr_in*sizeof(int)); if(cols1d_ityp_id != NC_MIN_INT) rcd=nco_get_var(in_id,cols1d_ityp_id,cols1d_ityp,NC_INT); rcd=nco_get_var(in_id,cols1d_ityplun_id,cols1d_ityplun,NC_INT); mec_nbr_out=0; for(clm_idx=0;clm_idx<clm_nbr_in;clm_idx++){ if(cols1d_ityplun[clm_idx] != ilun_landice_multiple_elevation_classes) continue; while(cols1d_ityplun[clm_idx++] == ilun_landice_multiple_elevation_classes) mec_nbr_out++; break; } /* !clm_idx */ /* NB: landice_MEC (ilun=4, usually) landunits have 10 (always, AFAICT) glacier elevation classes */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO mec_nbr_out = %ld\n",nco_prg_nm_get(),mec_nbr_out); } /* !need_clm */ /* Determine output Grid dimension if needed: CLM/ELM 'gridcell' dimension counts each gridcell that contains land Replace this dimension by horizontal dimension(s) in input data file */ if(need_clm){ if(flg_grd_1D) grd_nbr_out=col_nbr; if(flg_grd_2D) grd_nbr_out=lat_nbr*lon_nbr; if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO grd_nbr_out = %ld\n",nco_prg_nm_get(),grd_nbr_out); } /* !need_grd */ /* Determine output Landunit dimension if needed */ if(need_lnd){ lnd_nbr_out=3; /* fxm: Based on TBUILD variable for 3 urban landunit types */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO lnd_nbr_out = %ld\n",nco_prg_nm_get(),lnd_nbr_out); } /* !need_lnd */ /* Determine output PFT dimension if needed */ //double *pfts1d_wtgcell=NULL; /* [id] PFT weight relative to corresponding gridcell */ int *pfts1d_ityp_veg=NULL; /* [id] PFT vegetation type */ int *pfts1d_ityplun=NULL; /* [id] PFT landunit type */ int *pfts1d_ixy=NULL; /* [id] PFT 2D longitude index */ int *pfts1d_jxy=NULL; /* [id] PFT 2D latitude index */ int pft_typ; /* [enm] PFT type */ if(need_pft){ //pfts1d_wtgcell=(double *)nco_malloc(pft_nbr_in*sizeof(double)); pfts1d_ityp_veg=(int *)nco_malloc(pft_nbr_in*sizeof(int)); pfts1d_ityplun=(int *)nco_malloc(pft_nbr_in*sizeof(int)); //rcd=nco_get_var(in_id,pfts1d_wtgcell_id,pfts1d_wtgcell,NC_DOUBLE); rcd=nco_get_var(in_id,pfts1d_ityp_veg_id,pfts1d_ityp_veg,NC_INT); rcd=nco_get_var(in_id,pfts1d_ityplun_id,pfts1d_ityplun,NC_INT); pft_nbr_out=0; for(pft_idx=0;pft_idx<pft_nbr_in;pft_idx++){ if((pfts1d_ityplun[pft_idx] != ilun_vegetated_or_bare_soil) && (pfts1d_ityplun[pft_idx] != ilun_crop)) continue; /* Skip bare ground */ while(pfts1d_ityp_veg[++pft_idx] != 0) pft_nbr_out++; break; } /* !pft_idx */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO pft_nbr_out = %ld\n",nco_prg_nm_get(),pft_nbr_out); pfts1d_ixy=(int *)nco_malloc(pft_nbr_in*sizeof(int)); rcd=nco_get_var(in_id,pfts1d_ixy_id,pfts1d_ixy,NC_INT); if(flg_grd_2D){ pfts1d_jxy=(int *)nco_malloc(pft_nbr_in*sizeof(int)); rcd=nco_get_var(in_id,pfts1d_jxy_id,pfts1d_jxy,NC_INT); } /* !flg_grd_2D */ } /* !need_pft */ /* Define unpacked versions of needed dimensions before all else */ //(void)fprintf(stdout,"%s: DEBUG quark1\n",nco_prg_nm_get()); if(need_clm && clm_nbr_out > 0L) rcd=nco_def_dim(out_id,clm_nm_out,clm_nbr_out,&dmn_id_clm_out); if(need_lnd && lnd_nbr_out > 0L) rcd=nco_def_dim(out_id,lnd_nm_out,lnd_nbr_out,&dmn_id_lnd_out); if(need_pft && pft_nbr_out > 0L) rcd=nco_def_dim(out_id,pft_nm_out,pft_nbr_out,&dmn_id_pft_out); /* Assume MECs are new output dimension if they are enumerated in input */ if(mec_nbr_out > 0L) rcd=nco_def_dim(out_id,mec_nm_out,mec_nbr_out,&dmn_id_mec_out); /* Pre-allocate dimension ID and cnt/srt space */ char *var_nm; /* [sng] Variable name */ int *dmn_ids_in=NULL; /* [id] Dimension IDs */ int *dmn_ids_out=NULL; /* [id] Dimension IDs */ int dmn_nbr_max; /* [nbr] Maximum number of dimensions variable can have in input or output */ int var_id_in; /* [id] Variable ID */ int var_id_out; /* [id] Variable ID */ long *dmn_cnt_in=NULL; long *dmn_cnt_out=NULL; long *dmn_srt=NULL; nc_type var_typ; /* [enm] Variable type (same for input and output variable) */ nco_bool PCK_ATT_CPY=True; /* [flg] Copy attributes "scale_factor", "add_offset" */ int dmn_in_fst; /* [idx] Offset of input- relative to output-dimension due to non-MRV dimension insertion */ int dmn_nbr_rec; /* [nbr] Number of unlimited dimensions */ int *dmn_ids_rec=NULL; /* [id] Unlimited dimension IDs */ rcd+=nco_inq_ndims(in_id,&dmn_nbr_max); dmn_ids_in=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); dmn_ids_out=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* Obtain record dimension information from data file (restart files have no time dimension) */ rcd+=nco_inq_unlimdims(in_id,&dmn_nbr_rec,(int *)NULL); if(dmn_nbr_rec > 0){ dmn_ids_rec=(int *)nco_malloc(dmn_nbr_rec*sizeof(int)); rcd=nco_inq_unlimdims(in_id,&dmn_nbr_rec,dmn_ids_rec); } /* !dmn_nbr_rec */ dfl_lvl=rgr->dfl_lvl; fl_out_fmt=rgr->fl_out_fmt; //const int dmn_nbr_0D=0; /* [nbr] Rank of 0-D grid variables (scalars) */ const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */ const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */ nc_type crd_typ_in; nc_type crd_typ_out; /* Required grid variables */ int lat_in_id; /* [id] Variable ID for latitude */ int lat_out_id; /* [id] Variable ID for latitude */ int lon_in_id; /* [id] Variable ID for longitude */ int lon_out_id; /* [id] Variable ID for longitude */ rcd=nco_inq_varid(hrz_id,lat_nm_in,&lat_in_id); rcd=nco_inq_varid(hrz_id,lon_nm_in,&lon_in_id); rcd=nco_inq_vartype(hrz_id,lat_in_id,&crd_typ_in); /* NB: ELM/CLM history files default to NC_FLOAT for most grid variables To convert to NC_DOUBLE on output, also convert _FillValue attribute type consistently */ crd_typ_out=crd_typ_in; /* Optional grid variables */ char *area_nm; char *sgs_frc_nm; char *lat_bnd_nm; char *lon_bnd_nm; char *sgs_msk_nm; int area_in_id=NC_MIN_INT; /* [id] Variable ID for area */ int area_out_id=NC_MIN_INT; /* [id] Variable ID for area */ int sgs_frc_in_id=NC_MIN_INT; /* [id] Variable ID for fraction */ int sgs_frc_out_id=NC_MIN_INT; /* [id] Variable ID for fraction */ int lat_bnd_in_id=NC_MIN_INT; /* [id] Variable ID for latitude bounds */ int lat_bnd_out_id=NC_MIN_INT; /* [id] Variable ID for latitude bounds */ int lon_bnd_in_id=NC_MIN_INT; /* [id] Variable ID for longitude bounds */ int lon_bnd_out_id=NC_MIN_INT; /* [id] Variable ID for longitude bounds */ int sgs_msk_in_id=NC_MIN_INT; /* [id] Variable ID for mask */ int sgs_msk_out_id=NC_MIN_INT; /* [id] Variable ID for mask */ nco_bool flg_area_out=False; /* [flg] Add area to output */ nco_bool flg_lat_bnd_out=False; /* [flg] Add latitude bounds to output */ nco_bool flg_lon_bnd_out=False; /* [flg] Add longitude bounds to output */ nco_bool flg_sgs_frc_out=False; /* [flg] Add fraction to output */ nco_bool flg_sgs_msk_out=False; /* [flg] Add mask to output */ area_nm=rgr->area_nm ? rgr->area_nm : strdup("area"); lat_bnd_nm=rgr->lat_bnd_nm ? rgr->lat_bnd_nm : strdup("lat_bnd"); lon_bnd_nm=rgr->lon_bnd_nm ? rgr->lon_bnd_nm : strdup("lon_bnd"); sgs_frc_nm=rgr->sgs_frc_nm ? rgr->sgs_frc_nm : strdup("landfrac"); sgs_msk_nm=rgr->sgs_msk_nm ? rgr->sgs_msk_nm : strdup("landmask"); if((rcd=nco_inq_varid_flg(hrz_id,area_nm,&area_in_id)) == NC_NOERR) flg_area_out=True; if((rcd=nco_inq_varid_flg(hrz_id,lat_bnd_nm,&lat_bnd_in_id)) == NC_NOERR) flg_lat_bnd_out=True; if((rcd=nco_inq_varid_flg(hrz_id,lon_bnd_nm,&lon_bnd_in_id)) == NC_NOERR) flg_lon_bnd_out=True; if((rcd=nco_inq_varid_flg(hrz_id,sgs_frc_nm,&sgs_frc_in_id)) == NC_NOERR) flg_sgs_frc_out=True; if((rcd=nco_inq_varid_flg(hrz_id,sgs_msk_nm,&sgs_msk_in_id)) == NC_NOERR) flg_sgs_msk_out=True; if(flg_grd_1D){ rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col_out,&lat_out_id); if(dfl_lvl > 0) rcd+=nco_flt_def_out(out_id,lat_out_id,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,lat_in_id,lat_out_id,PCK_ATT_CPY); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col_out,&lon_out_id); if(dfl_lvl > 0) rcd+=nco_flt_def_out(out_id,lon_out_id,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,lon_in_id,lon_out_id,PCK_ATT_CPY); var_crt_nbr++; if(flg_lat_bnd_out){ dmn_ids_out[0]=dmn_id_col_out; dmn_ids_out[1]=dmn_id_bnd_out; rcd+=nco_def_var(out_id,lat_bnd_nm,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_bnd_out_id); if(dfl_lvl > 0) rcd+=nco_flt_def_out(out_id,lat_bnd_out_id,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,lat_bnd_in_id,lat_bnd_out_id,PCK_ATT_CPY); var_crt_nbr++; } /* !flg_lat_bnd_out */ if(flg_lon_bnd_out){ dmn_ids_out[0]=dmn_id_col_out; dmn_ids_out[1]=dmn_id_bnd_out; rcd+=nco_def_var(out_id,lon_bnd_nm,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_bnd_out_id); if(dfl_lvl > 0) rcd+=nco_flt_def_out(out_id,lon_bnd_out_id,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,lon_bnd_in_id,lon_bnd_out_id,PCK_ATT_CPY); var_crt_nbr++; } /* !flg_lon_bnd_out */ if(flg_area_out){ rcd+=nco_def_var(out_id,area_nm,crd_typ_out,dmn_nbr_1D,&dmn_id_col_out,&area_out_id); if(dfl_lvl > 0) rcd+=nco_flt_def_out(out_id,area_out_id,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,area_in_id,area_out_id,PCK_ATT_CPY); var_crt_nbr++; } /* !flg_area_out */ if(flg_sgs_frc_out){ rcd+=nco_def_var(out_id,sgs_frc_nm,crd_typ_out,dmn_nbr_1D,&dmn_id_col_out,&sgs_frc_out_id); if(dfl_lvl > 0) rcd+=nco_flt_def_out(out_id,sgs_frc_out_id,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,sgs_frc_in_id,sgs_frc_out_id,PCK_ATT_CPY); var_crt_nbr++; } /* !flg_sgs_frc_out */ if(flg_sgs_msk_out){ rcd+=nco_def_var(out_id,sgs_msk_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_col_out,&sgs_msk_out_id); if(dfl_lvl > 0) rcd+=nco_flt_def_out(out_id,sgs_msk_out_id,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,sgs_msk_in_id,sgs_msk_out_id,PCK_ATT_CPY); var_crt_nbr++; } /* !flg_sgs_msk_out */ } /* !flg_grd_1D */ if(flg_grd_2D){ rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lat_out,&lat_out_id); if(dfl_lvl > 0) rcd+=nco_flt_def_out(out_id,lat_out_id,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,lat_in_id,lat_out_id,PCK_ATT_CPY); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lon_out,&lon_out_id); if(dfl_lvl > 0) rcd+=nco_flt_def_out(out_id,lon_out_id,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,lon_in_id,lon_out_id,PCK_ATT_CPY); var_crt_nbr++; if(flg_lat_bnd_out){ dmn_ids_out[0]=dmn_id_lat_out; dmn_ids_out[1]=dmn_id_bnd_out; rcd+=nco_def_var(out_id,lat_bnd_nm,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_bnd_out_id); if(dfl_lvl > 0) rcd+=nco_flt_def_out(out_id,lat_bnd_out_id,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,lat_bnd_in_id,lat_bnd_out_id,PCK_ATT_CPY); var_crt_nbr++; } /* !flg_lat_bnd_out */ if(flg_lon_bnd_out){ dmn_ids_out[0]=dmn_id_lon_out; dmn_ids_out[1]=dmn_id_bnd_out; rcd+=nco_def_var(out_id,lon_bnd_nm,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_bnd_out_id); if(dfl_lvl > 0) rcd+=nco_flt_def_out(out_id,lon_bnd_out_id,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,lon_bnd_in_id,lon_bnd_out_id,PCK_ATT_CPY); var_crt_nbr++; } /* !flg_lon_bnd_out */ dmn_ids_out[0]=dmn_id_lat_out; dmn_ids_out[1]=dmn_id_lon_out; if(flg_area_out){ rcd+=nco_def_var(out_id,area_nm,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&area_out_id); if(dfl_lvl > 0) rcd+=nco_flt_def_out(out_id,area_out_id,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,area_in_id,area_out_id,PCK_ATT_CPY); var_crt_nbr++; } /* !flg_area_out */ if(flg_sgs_frc_out){ rcd+=nco_def_var(out_id,sgs_frc_nm,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&sgs_frc_out_id); if(dfl_lvl > 0) rcd+=nco_flt_def_out(out_id,sgs_frc_out_id,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,sgs_frc_in_id,sgs_frc_out_id,PCK_ATT_CPY); var_crt_nbr++; } /* !flg_sgs_frc_out */ if(flg_sgs_msk_out){ rcd+=nco_def_var(out_id,sgs_msk_nm,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids_out,&sgs_msk_out_id); if(dfl_lvl > 0) rcd+=nco_flt_def_out(out_id,sgs_msk_out_id,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,sgs_msk_in_id,sgs_msk_out_id,PCK_ATT_CPY); var_crt_nbr++; } /* !flg_sgs_msk_out */ } /* !flg_grd_2D */ int flg_pck; /* [flg] Variable is packed on disk */ nco_bool has_mss_val; /* [flg] Has numeric missing value attribute */ nco_bool flg_add_spc_crd; /* [flg] Add spatial coordinates to S1D variable */ float mss_val_flt; double mss_val_dbl; nco_s1d_typ_enm nco_s1d_typ; /* [enm] Sparse-1D type of input variable */ aed_sct aed_mtd_fll_val; /* Define unpacked S1D and copied variables in output file */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ var_nm=trv.nm; /* Preserve input type in output type */ var_typ=trv.var_typ; dmn_nbr_in=trv.nbr_dmn; dmn_nbr_out=trv.nbr_dmn; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid_flg(out_id,var_nm,&var_id_out); /* If variable has not been defined, define it */ if(rcd != NC_NOERR){ if(trv.flg_rgr){ /* Unpack */ rcd=nco_inq_vardimid(in_id,var_id_in,dmn_ids_in); dmn_in_fst=0; flg_add_spc_crd=False; rcd=nco_inq_var_packing(in_id,var_id_in,&flg_pck); if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports S1D variable \"%s\" is packed so results unpredictable. HINT: If regridded values seems weird, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,var_nm); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_ids_in[dmn_idx],dmn_nm); if(clm_nm_in && !strcmp(dmn_nm,clm_nm_in)){ if(mec_nbr_out > 0L){ /* Change input column dimension to MEC if present */ dmn_ids_out[dmn_idx]=dmn_id_mec_out; dmn_cnt_out[dmn_idx]=mec_nbr_out; dmn_in_fst++; dmn_nbr_out++; } /* !mec_nbr_out */ flg_add_spc_crd=True; }else if(!strcmp(dmn_nm,grd_nm_in)){ /* Gridcell dimension disappears to become spatial dimension in output */ flg_add_spc_crd=True; }else if(!strcmp(dmn_nm,lnd_nm_in)){ /* Change landunit dimension */ dmn_ids_out[dmn_idx]=dmn_id_lnd_out; dmn_cnt_out[dmn_idx]=lnd_nbr_out; flg_add_spc_crd=True; }else if(!strcmp(dmn_nm,pft_nm_in)){ if(pft_nbr_out > 0L){ /* Change input PFT dimension to PFT if present */ dmn_ids_out[dmn_idx]=dmn_id_pft_out; dmn_cnt_out[dmn_idx]=pft_nbr_out; dmn_in_fst++; dmn_nbr_out++; } /* !pft_nbr_out */ flg_add_spc_crd=True; }else{ /* Dimensions [clm/lnd/pft]_nm_in were pre-defined above as [clm/lnd/pft]_nm_out, replicate all other dimensions */ rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_ids_out+dmn_idx); } /* !clm */ if(rcd != NC_NOERR){ /* Current input dimension is not yet in output file */ rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx],dmn_cnt_out+dmn_idx); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_ids_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt_out[dmn_idx]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_ids_out+dmn_idx); } /* !rcd */ if(flg_add_spc_crd){ /* Follow by spatial dimension(s) */ if(flg_grd_1D){ dmn_ids_out[dmn_idx+dmn_in_fst]=dmn_id_col_out; dmn_cnt_out[dmn_idx+dmn_in_fst]=col_nbr; } /* !flg_grd_1D */ if(flg_grd_2D){ dmn_ids_out[dmn_idx+dmn_in_fst]=dmn_id_lat_out; dmn_cnt_out[dmn_idx+dmn_in_fst]=lat_nbr; dmn_in_fst++; dmn_nbr_out++; dmn_ids_out[dmn_idx+dmn_in_fst]=dmn_id_lon_out; dmn_cnt_out[dmn_idx+dmn_in_fst]=lon_nbr; } /* !flg_grd_2D */ } /* !flg_add_spc_crd */ } /* !dmn_idx */ }else{ /* !flg_rgr */ /* Replicate non-S1D variables */ rcd=nco_inq_vardimid(in_id,var_id_in,dmn_ids_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_ids_in[dmn_idx],dmn_nm); rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_ids_out+dmn_idx); if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx],dmn_cnt_out+dmn_idx); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_ids_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt_out[dmn_idx]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_ids_out+dmn_idx); } /* !rcd */ } /* !dmn_idx */ } /* !flg_rgr */ rcd=nco_def_var(out_id,var_nm,var_typ,dmn_nbr_out,dmn_ids_out,&var_id_out); /* Duplicate netCDF4 settings when possible */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) if(dmn_nbr_out > 0) rcd=nco_flt_def_wrp(in_id,var_id_in,(char *)NULL,out_id,var_id_out,dfl_lvl); (void)nco_att_cpy(in_id,out_id,var_id_in,var_id_out,PCK_ATT_CPY); /* Variables with subterranean levels and missing-value extrapolation must have _FillValue attribute */ nco_bool flg_add_msv_att; /* [flg] Extrapolation requires _FillValue */ flg_add_msv_att=False; if(flg_add_msv_att && trv.flg_rgr){ has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,&mss_val_dbl); if(!has_mss_val){ nco_bool flg_att_chg; /* [flg] _FillValue attribute was written */ aed_mtd_fll_val.var_nm=var_nm; aed_mtd_fll_val.id=var_id_out; aed_mtd_fll_val.type=var_typ; if(var_typ == NC_FLOAT) aed_mtd_fll_val.val.fp=&mss_val_flt; else if(var_typ == NC_DOUBLE) aed_mtd_fll_val.val.dp=&mss_val_dbl; flg_att_chg=nco_aed_prc(out_id,var_id_out,aed_mtd_fll_val); if(!flg_att_chg && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: WARNING %s reports unsuccessful attempt to create _FillValue attribute for variable %s\n",nco_prg_nm_get(),fnc_nm,var_nm); } /* !has_mss_val */ } /* !flg_add_msv_att */ } /* !rcd */ } /* !var */ } /* !idx_tbl */ /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); /* Begin data mode */ (void)nco_enddef(out_id); /* Copy coordinate system before closing template file NB: nco_cpy_var_val() cannot be used here when coordinates are in fl_tpl not fl_in */ (void)nco_cpy_var_val_lmt(hrz_id,out_id,(FILE *)NULL,lat_nm_in,(lmt_sct *)NULL,(int)0); (void)nco_cpy_var_val_lmt(hrz_id,out_id,(FILE *)NULL,lon_nm_in,(lmt_sct *)NULL,(int)0); if(flg_lat_bnd_out) (void)nco_cpy_var_val_lmt(hrz_id,out_id,(FILE *)NULL,lat_bnd_nm,(lmt_sct *)NULL,(int)0); if(flg_lon_bnd_out) (void)nco_cpy_var_val_lmt(hrz_id,out_id,(FILE *)NULL,lon_bnd_nm,(lmt_sct *)NULL,(int)0); if(flg_sgs_frc_out) (void)nco_cpy_var_val_lmt(hrz_id,out_id,(FILE *)NULL,sgs_frc_nm,(lmt_sct *)NULL,(int)0); if(flg_sgs_msk_out) (void)nco_cpy_var_val_lmt(hrz_id,out_id,(FILE *)NULL,sgs_msk_nm,(lmt_sct *)NULL,(int)0); if(flg_grd_tpl){ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ /* No further access to template file, close it */ nco_close(tpl_id); /* Remove local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_tpl); } /* !flg_grd_tpl */ /* Free pre-allocated array space */ if(dmn_ids_in) dmn_ids_in=(int *)nco_free(dmn_ids_in); if(dmn_ids_out) dmn_ids_out=(int *)nco_free(dmn_ids_out); if(dmn_ids_rec) dmn_ids_rec=(int *)nco_free(dmn_ids_rec); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); /* Unpack and copy data from input file */ //int dmn_idx_col=int_CEWI; /* [idx] Index of column dimension */ //int dmn_idx_lat=int_CEWI; /* [idx] Index of latitude dimension */ //int dmn_idx_lon=int_CEWI; /* [idx] Index of longitude dimension */ int thr_idx; /* [idx] Thread index */ //int var_id; /* [id] Current variable ID */ size_t var_sz_in; /* [nbr] Number of elements in variable (will be self-multiplied) */ size_t var_sz_out; /* [nbr] Number of elements in variable (will be self-multiplied) */ ptr_unn var_val_in; ptr_unn var_val_out; /* Using naked stdin/stdout/stderr in parallel region generates warning Copy appropriate filehandle to variable scoped as shared in parallel clause */ FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */ #ifdef __GNUG__ # pragma omp parallel for firstprivate(var_val_in,var_val_out) private(dmn_cnt_in,dmn_cnt_out,dmn_ids_in,dmn_ids_out,dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dmn_nm,dmn_srt,has_clm,has_grd,has_lnd,has_pft,has_mss_val,idx_out,idx_tbl,in_id,mss_val_dbl,rcd,thr_idx,trv,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ) shared(dmn_id_clm_in,dmn_id_clm_out,dmn_id_col_in,dmn_id_col_out,dmn_id_lat_in,dmn_id_lat_out,dmn_id_lnd_in,dmn_id_lnd_out,dmn_id_lon_in,dmn_id_lon_out,dmn_id_pft_in,dmn_id_pft_out,flg_s1d_clm,flg_s1d_pft,clm_nbr_in,clm_nbr_out,col_nbr,lat_nbr,lnd_nbr_in,lnd_nbr_out,lon_nbr,pft_nbr_in,pft_nbr_out,out_id,pfts1d_ixy,pfts1d_jxy) #endif /* !__GNUG__ */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; thr_idx=omp_get_thread_num(); in_id=trv_tbl->in_id_arr[thr_idx]; #ifdef _OPENMP if(nco_dbg_lvl_get() >= nco_dbg_grp && !thr_idx && !idx_tbl) (void)fprintf(fp_stdout,"%s: INFO %s reports regrid loop uses %d thread%s\n",nco_prg_nm_get(),fnc_nm,omp_get_num_threads(),(omp_get_num_threads() > 1) ? "s" : ""); if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s: INFO thread = %d, idx_tbl = %d, nm = %s\n",nco_prg_nm_get(),thr_idx,idx_tbl,trv.nm); #endif /* !_OPENMP */ if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s%s ",trv.flg_rgr ? "#" : "~",trv.nm); if(trv.flg_rgr){ /* Unpack variable */ var_nm=trv.nm; var_typ=trv.var_typ; /* NB: Output type in file is same as input type */ var_sz_in=1L; var_sz_out=1L; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid(out_id,var_nm,&var_id_out); rcd=nco_inq_varndims(in_id,var_id_in,&dmn_nbr_in); rcd=nco_inq_varndims(out_id,var_id_out,&dmn_nbr_out); dmn_nbr_max= dmn_nbr_in > dmn_nbr_out ? dmn_nbr_in : dmn_nbr_out; dmn_ids_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_ids_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int)); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* max() for both input and output grids */ dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); rcd=nco_inq_vardimid(in_id,var_id_in,dmn_ids_in); rcd=nco_inq_vardimid(out_id,var_id_out,dmn_ids_out); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx],dmn_cnt_in+dmn_idx); var_sz_in*=dmn_cnt_in[dmn_idx]; dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ rcd=nco_inq_dimlen(out_id,dmn_ids_out[dmn_idx],dmn_cnt_out+dmn_idx); if(dmn_cnt_out[dmn_idx] == 0L){ /* No records have been written, so overwrite zero output record size with input record size */ char dmn_rec_nm[NC_MAX_NAME]; /* [sng] Record dimension name */ int dmn_rec_id_in; rcd=nco_inq_dimname(out_id,dmn_ids_out[dmn_idx],dmn_rec_nm); rcd=nco_inq_dimid(in_id,dmn_rec_nm,&dmn_rec_id_in); rcd=nco_inq_dimlen(in_id,dmn_rec_id_in,dmn_cnt_out+dmn_idx); } /* !dmn_cnt_out */ var_sz_out*=dmn_cnt_out[dmn_idx]; dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ var_val_in.vp=(void *)nco_malloc_dbg(var_sz_in*nco_typ_lng(var_typ),fnc_nm,"Unable to malloc() input value buffer"); var_val_out.vp=(void *)nco_malloc_dbg(var_sz_out*nco_typ_lng(var_typ),fnc_nm,"Unable to malloc() output value buffer"); /* Initialize output */ (void)memset(var_val_out.vp,0,var_sz_out*nco_typ_lng(var_typ)); /* Obtain input variable */ rcd=nco_get_vara(in_id,var_id_in,dmn_srt,dmn_cnt_in,var_val_in.vp,var_typ); has_clm=has_grd=has_lnd=has_pft=False; nco_s1d_typ=nco_s1d_nil; for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm; if(!has_clm && clm_nm_in) has_clm=!strcmp(dmn_nm_cp,clm_nm_in); if(!has_grd && grd_nm_in) has_grd=!strcmp(dmn_nm_cp,grd_nm_in); if(!has_lnd && lnd_nm_in) has_lnd=!strcmp(dmn_nm_cp,lnd_nm_in); if(!has_pft && pft_nm_in) has_pft=!strcmp(dmn_nm_cp,pft_nm_in); } /* !dmn_idx */ if(has_clm) nco_s1d_typ=nco_s1d_clm; else if(has_grd) nco_s1d_typ=nco_s1d_grd; else if(has_lnd) nco_s1d_typ=nco_s1d_lnd; else if(has_pft) nco_s1d_typ=nco_s1d_pft; else{ (void)fprintf(stderr,"%s: ERROR %s reports variable %s does not appear to be sparse\n",nco_prg_nm_get(),fnc_nm,var_nm); nco_exit(EXIT_FAILURE); } /* !strstr() */ if(nco_dbg_lvl_get() >= nco_dbg_std){ (void)fprintf(stderr,"%s: INFO %s reports variable %s is sparse type %s\n",nco_prg_nm_get(),fnc_nm,var_nm,nco_s1d_sng(nco_s1d_typ)); } /* !dbg */ /* The Hard Work */ if(nco_s1d_typ == nco_s1d_pft){ /* Turn GPP(time,pft) into GPP(time,pft,lndgrid) */ for(pft_idx=0;pft_idx<pft_nbr_in;pft_idx++){ pft_typ=pfts1d_ityp_veg[pft_idx]; /* [1 <= pft_typ <= pft_nbr_out] */ /* Skip bare ground, output array contains only vegetated types */ if(!pft_typ) continue; /* grd_idx is the index relative to the origin of the horizontal grid for a given level [0 <= grd_idx_out <= col_nbr_out-1L], [1 <= pfts1d_ixy <= col_nbr_out] */ grd_idx_out= flg_grd_1D ? pfts1d_ixy[pft_idx]-1L : (pfts1d_ixy[pft_idx]-1L)*lat_nbr+(pfts1d_jxy[pft_idx]-1L); idx_out=(pft_typ-1)*grd_sz_out+grd_idx_out; /* memcpy() would allow next statement to work for generic types However, memcpy() is a system call and could be expensive in an innermost loop */ switch(var_typ){ case NC_FLOAT: var_val_out.fp[idx_out]=var_val_in.fp[pft_idx]; break; case NC_DOUBLE: var_val_out.dp[idx_out]=var_val_in.dp[pft_idx]; break; case NC_INT: var_val_out.ip[idx_out]=var_val_in.ip[pft_idx]; break; default: (void)fprintf(fp_stdout,"%s: ERROR %s reports unsupported type\n",nco_prg_nm_get(),fnc_nm); nco_dfl_case_nc_type_err(); break; } /* !var_typ */ } /* !idx */ } /* !nco_s1d_typ */ #pragma omp critical { /* begin OpenMP critical */ rcd=nco_put_vara(out_id,var_id_out,dmn_srt,dmn_cnt_out,var_val_out.vp,var_typ); } /* end OpenMP critical */ if(dmn_ids_in) dmn_ids_in=(int *)nco_free(dmn_ids_in); if(dmn_ids_out) dmn_ids_out=(int *)nco_free(dmn_ids_out); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); if(var_val_in.vp) var_val_in.vp=(void *)nco_free(var_val_in.vp); if(var_val_out.vp) var_val_out.vp=(void *)nco_free(var_val_out.vp); }else{ /* !trv.flg_rgr */ /* Use standard NCO copy routine for variables that are not regridded 20190511: Copy them only once */ #pragma omp critical { /* begin OpenMP critical */ (void)nco_cpy_var_val(in_id,out_id,(FILE *)NULL,(md5_sct *)NULL,trv.nm,trv_tbl); } /* end OpenMP critical */ } /* !flg_rgr */ } /* !xtr */ } /* end (OpenMP parallel for) loop over idx_tbl */ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"\n"); if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s completion report: Variables interpolated = %d, copied unmodified = %d, omitted = %d, created = %d\n",nco_prg_nm_get(),fnc_nm,var_rgr_nbr,var_cpy_nbr,var_xcl_nbr,var_crt_nbr); /* Free output data memory */ if(cols1d_ityp) cols1d_ityp=(int *)nco_free(cols1d_ityp); if(cols1d_ityplun) cols1d_ityplun=(int *)nco_free(cols1d_ityplun); if(pfts1d_ityp_veg) pfts1d_ityp_veg=(int *)nco_free(pfts1d_ityp_veg); if(pfts1d_ityplun) pfts1d_ityplun=(int *)nco_free(pfts1d_ityplun); if(pfts1d_ixy) pfts1d_ixy=(int *)nco_free(pfts1d_ixy); if(pfts1d_jxy) pfts1d_jxy=(int *)nco_free(pfts1d_jxy); //if(pfts1d_wtgcell) pfts1d_wtgcell=(double *)nco_free(pfts1d_wtgcell); if(clm_nm_in) clm_nm_in=(char *)nco_free(clm_nm_in); if(grd_nm_in) grd_nm_in=(char *)nco_free(grd_nm_in); if(lnd_nm_in) lnd_nm_in=(char *)nco_free(lnd_nm_in); if(pft_nm_in) pft_nm_in=(char *)nco_free(pft_nm_in); if(clm_nm_out) clm_nm_out=(char *)nco_free(clm_nm_out); if(grd_nm_out) grd_nm_out=(char *)nco_free(grd_nm_out); if(lnd_nm_out) lnd_nm_out=(char *)nco_free(lnd_nm_out); if(pft_nm_out) pft_nm_out=(char *)nco_free(pft_nm_out); return rcd; } /* !nco_s1d_unpack() */
algebra_lineare_par.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> #define NUM_BLOCCHI 156 #define K 6000 #define NUM_PRIMI 128 #define N_BITS 64 #define TYPE unsigned long typedef TYPE word; struct row_stats { // bit piu a destra long unsigned b_dx; // num di bit a 1 long unsigned n_bit; }; struct row_stats wt[K]; // Ritorna l'i-mo bit della k-ma riga unsigned int get_k_i(unsigned long M[][NUM_BLOCCHI], unsigned long k, unsigned long i) { unsigned long I = i / N_BITS; unsigned long n_shift = N_BITS - ((i % N_BITS ) + 1); //printf("i=%lu, I=%lu, n_shift=%lu, get=%lu, ", i, I, n_shift, (M[k][I] >> n_shift) & 1); //print_bits((M[k][I] >> n_shift)); //printf(" \n"); return (M[k][I] >> n_shift) & 1; } // Sostituisco all riga k la riga k + j: // k = k + j // Utilizzo lo XOR bit a bit (che corrisponde) // alla somma in modulo 2. // Eseguo lo XOR tra ogni blocco dei vettori unsigned long add_j_to_k(unsigned long M [][NUM_BLOCCHI], unsigned long k, unsigned long j, unsigned long n_blocchi) { //#pragma omp parallel for schedule(dynamic, n_blocchi/2) for(unsigned long I = 0; I < n_blocchi; ++I) M[k][I] = M[k][I] ^ M[j][I]; } void get_wt_k(unsigned long M[][NUM_BLOCCHI], unsigned long n_blocchi, unsigned long k, struct row_stats * wt) { wt->b_dx = 128; wt->n_bit = 0; unsigned long i = 0; while(get_k_i(M, k, i) == 0 && i < (n_blocchi * N_BITS)) ++i; if(i >= (n_blocchi * N_BITS)) return; //printf("i=%lu get=%lu, ", i, get_k_i(M, k, i)); wt->b_dx = i; //#pragma omp parallel for schedule(dynamic, (n_blocchi * N_BITS)/2) for(i = i; i < (n_blocchi * N_BITS); ++i) if(get_k_i(M, k, i)) wt->n_bit++; //printf("b_dx=%lu n_bit=%lu\n", wt->b_dx, wt->n_bit); } /* void get_wt_k(unsigned long M[][NUM_BLOCCHI], unsigned long n_blocchi, unsigned long k, struct row_stats * wt) { unsigned long b; // appoggio per copie locali unsigned long last_bit_pos = 0; // posizione ultimo bit a 1 nel blocco unsigned long bit_1_count = 0; // conta dei bit a 1 unsigned long last_block_pos = 0; // blocco che contiene l'ultimo bit for(unsigned long I = n_blocchi; I > 0; --I) { b = M[k][I-1]; //printf("%lu, I=%lu\n", M[k][I-1], I-1); for(unsigned i = 0; i < N_BITS; ++i) { if(b == 0) break; bit_1_count += b & 1; // sommo se il bit i-mo è a 1 b = b >> 1; //printf("b=%lu, ", b); //printf("bit=%lu\n", bit_1_count); last_bit_pos = i; last_block_pos = I-1; //printf("last_bit=%lu, last_block=%lu\n", last_bit_pos, last_block_pos); } } printf("n_b=%lu b_dx=%lu I=%lu\n", n_blocchi*64-1, last_bit_pos, last_block_pos); (*wt).b_dx = (n_blocchi*64 - 1) - (last_bit_pos + last_block_pos*N_BITS); (*wt).n_bit = bit_1_count; } */ void bit_gaussian_elimination_mod_2(unsigned long M[][NUM_BLOCCHI], unsigned long n_row, unsigned long n_col, unsigned long n_blocks, struct row_stats wt[]) { for(unsigned long i = 0; i < n_col; ++i) { unsigned long j; for(j = 0; j < n_row && wt[j].b_dx != i; ++j) //printf("wt[%d].b_dx=%d ==? %d\n", j, wt[j].b_dx, i) ;// avanzo e basta //printf("j=%d\n", j); // #pragma omp parallel for schedule(auto) for(unsigned k = j + 1; k < n_row; ++k) { //printf("wt[%d].b_dx=%d ==? %d\n", j, wt[j].b_dx, i); //printf("get=%d\n", get_k_i(M, k, i)); //getchar(); if(get_k_i(M, k, i)) { // il bit v(k)(i) deve essere a 1 add_j_to_k(M, k, j, n_blocks); // v(k) = v(k) + v(j) //printf("add: %lu = %lu + %lu\n", k, k, j); // sommare le righe della matrice degli esponenti in Z // moltiplicare i Q(A) // aggiorno info su wt: bit piu' a destra e n bit a 1 get_wt_k(M, n_blocks, k, & wt[k]); } } } } void print_bits(unsigned long a) { unsigned int bits[N_BITS]; for(unsigned int i = 0; i < N_BITS; ++i) bits[i] = (a >> i) & 1U; for(int i = 63; i >= 0; --i) printf("%d", bits[i]); } void print_all(unsigned long M[][NUM_BLOCCHI], int righe){ for(int i = 0; i < righe; ++i) { for(int j = 0; j < NUM_BLOCCHI; ++j) { print_bits(M[i][j]); printf(" "); } printf("\n"); } } int main() { unsigned long M[K][NUM_BLOCCHI]; /* N_BITS N_BITS 1) 000 ... 001 000 ... 001 2) 000 ... 000 000 ... 010 */ int n_threads = omp_get_num_threads(); int chunck = K/n_threads; double t1 = omp_get_wtime(); #pragma omp parallel for schedule(dynamic, K/4) for(int i = 0; i < K; ++i) get_wt_k(M, NUM_BLOCCHI, i, & wt[i]); double t2 = omp_get_wtime(); double t_set_up = t2 - t1; //get_wt_k(M, 2, 1, & wt[1]); //for(int i = 0; i < 6; ++i) //printf("wt[].b_dx=%lu, wt[].n_bit=%lu\n", wt[i].b_dx, wt[i].n_bit); //print_all(M, K); //printf("\n\n"); double t3 = omp_get_wtime(); bit_gaussian_elimination_mod_2(M, K, NUM_BLOCCHI*N_BITS, NUM_BLOCCHI, wt); double t4 = omp_get_wtime(); double t_gauss = t4 - t3; printf("#time_gauss time_set_up time_totale\n"); printf("%.6f ", t_gauss); printf("%.6f ", t_set_up); printf("%.6f\n", t_gauss + t_set_up); //print_all(M, K); //for(int i=0; i<2; i++){ //print_bits(M[i][0]); //printf(" "); //print_bits(M[i][1]); //printf("\n"); //get_wt_k(i, & wt); //printf("wt[].b_dx=%lu, wt[].n_bit=%lu\n", wt.b_dx, wt.n_bit); //} //add_k_to_j(0, 1); //printf("\n"); //print_bits(M[0][0]); //printf(" "); //print_bits(M[0][1]); //printf("\n"); //for(int i = 0; i < 64; ++i) //get_k_i(0, i); //printf("%d", get_k_i(0, i)); //printf("\n"); }
Common.h
// Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved. // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef COMMON_H #define COMMON_H #include <cmath> #include <optional> #include <yato/array_view.h> //#define CHECK_ASSERT #include <training/system/Errors.h> #include <training/system/Name.h> #include <training/system/TypeHalf.h> #include <training/system/Types.h> #include <map> #include <utility> #if defined(_BLAS) && !defined(_BLAS_ENHANCE) extern "C" { #include <cblas.h> } #else #ifndef OPENBLAS_CONST #define OPENBLAS_CONST const #endif typedef enum CBLAS_TRANSPOSE { CblasNoTrans = 111, CblasTrans = 112, CblasConjTrans = 113, CblasConjNoTrans = 114 } CBLAS_TRANSPOSE; typedef enum CBLAS_UPLO { CblasUpper = 121, CblasLower = 122 } CBLAS_UPLO; #endif #ifdef CHECK_ASSERT #define CHECK_NEAR ASSERT_NEAR #else #define CHECK_NEAR EXPECT_NEAR #endif #define RAUL_E 2.71828182845904523536 // e #define RAUL_LOG2E 1.44269504088896340736 // log2(e) #define RAUL_LOG10E 0.434294481903251827651 // log10(e) #define RAUL_LN2 0.693147180559945309417 // ln(2) #define RAUL_LN10 2.30258509299404568402 // ln(10) #define RAUL_PI 3.14159265358979323846 // pi #define RAUL_PI_2 1.57079632679489661923 // pi/2 #define RAUL_PI_4 0.785398163397448309616 // pi/4 #define RAUL_1_PI 0.318309886183790671538 // 1/pi #define RAUL_2_PI 0.636619772367581343076 // 2/pi #define RAUL_2_SQRTPI 1.12837916709551257390 // 2/sqrt(pi) #define RAUL_SQRT2_PI 0.79788456080286535588 // sqrt(2/pi) #define RAUL_SQRT2 1.41421356237309504880 // sqrt(2) #define RAUL_SQRT1_2 0.707106781186547524401 // 1/sqrt(2) #define GELU_CONST 0.044715 namespace raul { enum class Limit : int { Left = 0, Middle = 1, Right = 2 }; enum class Dimension : int { Default = -1, Batch = 0, Depth = 1, Height = 2, Width = 3 }; #if defined(_MSC_VER) #define INLINE __forceinline #else #define INLINE __attribute__((always_inline)) #endif template<typename Type> class TensorImpl; typedef TensorImpl<dtype> Tensor; typedef TensorImpl<half> TensorFP16; #if defined(ANDROID) #define TOMMTYPE(var) static_cast<typename MM::type>(var) #else #define TOMMTYPE(var) castHelper<typename MM::type>::cast(var) #endif using shape = yato::dimensionality<4U, size_t>; } // raul namespace namespace raul { enum class NetworkMode { Train = 0, Test = 1, TrainCheckpointed = 2 }; enum class CompressionMode { NONE = -1, FP16 = 0, INT8 = 1 }; enum class CalculationMode { DETERMINISTIC = 0, #if defined(_OPENMP) FAST = 1, #endif }; /** * @brief Hardware target platform * */ enum class ExecutionTarget { CPU = 0, CPUFP16 = 1 }; /** * @brief Hardware target platform per layer * * \note Might override execution target for workflow, useful for mixed precision */ enum class LayerExecutionTarget { Default = -1, // use same as ExecutionTarget CPU = 0, // from this point enums should be aligned with ExecutionTarget (due to LayerExecutionTarget = static_cast<ExecutionTarget>(enum)) CPUFP16 = 1 }; /** * @brief Memory allocation mode */ enum class AllocationMode { STANDARD, POOL }; enum class DeclarationType { Tensor = 0, Shape = 1, // Alias = 2 }; class OpenclInitializer; class Common { public: // generate vector of random index permutation of [0..n-1] static void generate_permutation(size_t n, std::vector<size_t>& ind_vector, unsigned int seed = 0); /* * [cols x rows] * A[k x m] * B[n x k] * C[n x m] * https://software.intel.com/en-us/mkl-developer-reference-c-cblas-gemm * C = alpha * A * B + beta * C * bOffset - in elements (not bytes) */ static void gemm(OPENBLAS_CONST CBLAS_TRANSPOSE transA, OPENBLAS_CONST CBLAS_TRANSPOSE transB, size_t m, size_t n, size_t k, OPENBLAS_CONST dtype alpha, OPENBLAS_CONST dtype* a, OPENBLAS_CONST dtype* b, OPENBLAS_CONST dtype beta, dtype* c); static void gemm(OPENBLAS_CONST CBLAS_TRANSPOSE transA, OPENBLAS_CONST CBLAS_TRANSPOSE transB, size_t m, size_t n, size_t k, OPENBLAS_CONST dtype alpha, OPENBLAS_CONST half* a, OPENBLAS_CONST half* b, OPENBLAS_CONST dtype beta, half* c); /** * @brief : Basic Linear Algebra Subroutine y = y + ax * * \f[ * \vec{y} = \vec{y} + \alpha * \vec{x}, * \f] * * @param n The number of elements in vectors x and y. * @param sa The scalar alpha. * @param sx The vector x of length n. Specified as: a one-dimensional array of (at least) length \f$ 1+(n-1)|incx| \f$. * @param incx The stride for vector x. Specified as: an integer. It can have any value. * @param sy The vector y of length n. Specified as: a one-dimensional array of (at least) length \f$ 1+(n-1)|incy| \f$. * @param incy The stride for vector y. * @param xOffset The offset for vector x. * @param yOffset The offset for vector y. * @return The vector y, containing the results of the computation. */ static void axpy(size_t n, OPENBLAS_CONST dtype sa, OPENBLAS_CONST dtype* sx, size_t incx, dtype* sy, size_t incy, size_t xOffset = 0, size_t yOffset = 0); static void axpy(size_t n, OPENBLAS_CONST dtype sa, OPENBLAS_CONST half* sx, size_t incx, half* sy, size_t incy, size_t xOffset = 0, size_t yOffset = 0); /** * @brief : Basic Linear Algebra Subroutine y = ax + by * * \f[ * \vec{y} = \alpha \vec{x} + \beta \vec{y}, * \f] * * @param n The number of elements in vectors x and y. * @param alpha The scalar alpha. * @param x The vector x of length n. Specified as: a one-dimensional array of (at least) length \f$ 1+(n-1)|incx| \f$. * @param incx The stride for vector x. Specified as: an integer. It can have any value. * @param beta The scalar beta. * @param y The vector y of length n. Specified as: a one-dimensional array of (at least) length \f$ 1+(n-1)|incy| \f$. * @param incy The stride for vector y. * @param xOffset The offset for vector x. * @param yOffset The offset for vector y. * @return The vector y, containing the results of the computation. */ static int axpby(OPENBLAS_CONST size_t n, OPENBLAS_CONST dtype alpha, OPENBLAS_CONST dtype* x, OPENBLAS_CONST size_t incx, OPENBLAS_CONST dtype beta, dtype* y, OPENBLAS_CONST size_t incy, size_t xOffset, size_t yOffset); static int axpby(OPENBLAS_CONST size_t n, OPENBLAS_CONST dtype alpha, OPENBLAS_CONST half* x, OPENBLAS_CONST size_t incx, OPENBLAS_CONST dtype beta, half* y, OPENBLAS_CONST size_t incy, size_t xOffset, size_t yOffset); /** * @brief : Basic Linear Algebra Subroutine y = alpha * a * x + beta * y * * Vector by vector element wise multiplication * * \f[ * \vec{y} = \alpha \vec{a} \vec{x} + \beta \vec{y}, * \f] * * @param n The number of elements in vectors x and y. * @param alpha The scalar alpha. * @param a The vector of length n. * @param x The vector x of length n. Specified as: a one-dimensional array of (at least) length \f$ 1+(n-1)|incx| \f$. * @param incx The stride for vector x. Specified as: an integer. It can have any value. * @param beta The scalar beta. * @param y The vector y of length n. Specified as: a one-dimensional array of (at least) length \f$ 1+(n-1)|incy| \f$. * @param incy The stride for vector y. */ static void hadamard(OPENBLAS_CONST size_t n, OPENBLAS_CONST dtype alpha, OPENBLAS_CONST dtype* a, OPENBLAS_CONST dtype* x, OPENBLAS_CONST size_t incx, OPENBLAS_CONST dtype beta, dtype* y, OPENBLAS_CONST size_t incy); static dtype dot(size_t n, OPENBLAS_CONST dtype* sx, size_t incx, OPENBLAS_CONST dtype* sy, size_t incy); static void scal(size_t n, OPENBLAS_CONST dtype sa, dtype* sx, size_t incx); static void transpose(Tensor& tensor, size_t cols); static void transpose(TensorFP16& tensor, size_t cols); /* * memory for dst should be allocated externaly */ static void addPadding1D(const dtype* src, dtype* dst, size_t srcChannels, size_t srcSize, size_t dstSize, bool reversedOrder = false); template<typename T> static void addPadding2D(const T* src, T* dst, size_t srcChannels, size_t srcWidth, size_t srcHeight, size_t dstWidth, size_t dstHeight) { if ((dstWidth >= srcWidth) && (dstHeight >= srcHeight)) { size_t padWidth = dstWidth - srcWidth; size_t padHeight = dstHeight - srcHeight; size_t leftPad = padWidth / 2; // size_t rightPad = padWidth - leftPad; size_t topPad = padHeight / 2; size_t bottomPad = padHeight - topPad; for (size_t d = 0; d < srcChannels; ++d) { // top for (size_t y = 0; y < topPad; ++y) { for (size_t x = 0; x < dstWidth; ++x) { dst[d * dstWidth * dstHeight + dstWidth * y + x] = static_cast<T>(0.0_dt); } } for (size_t y = topPad; y < topPad + srcHeight; ++y) { // left for (size_t x = 0; x < leftPad; ++x) { dst[d * dstWidth * dstHeight + dstWidth * y + x] = static_cast<T>(0.0_dt); } // src for (size_t x = leftPad; x < leftPad + srcWidth; ++x) { dst[d * dstWidth * dstHeight + dstWidth * y + x] = src[d * srcWidth * srcHeight + srcWidth * (y - topPad) + x - leftPad]; } // right for (size_t x = leftPad + srcWidth; x < dstWidth; ++x) { dst[d * dstWidth * dstHeight + dstWidth * y + x] = static_cast<T>(0.0_dt); } } // bottom for (size_t y = dstHeight - bottomPad; y < dstHeight; ++y) { for (size_t x = 0; x < dstWidth; ++x) { dst[d * dstWidth * dstHeight + dstWidth * y + x] = static_cast<T>(0.0_dt); } } } } } /* * memory for dst should be allocated externaly */ static void removePadding1D(const dtype* src, dtype* dst, size_t srcChannels, size_t srcSize, size_t dstSize, bool reversedOrder = false, bool overwrite = true); template<typename T> static void removePadding2D(const T* src, T* dst, size_t srcChannels, size_t srcWidth, size_t srcHeight, size_t dstWidth, size_t dstHeight, bool overwrite = true) { if ((dstWidth <= srcWidth) && (dstHeight <= srcHeight)) { size_t padWidth = srcWidth - dstWidth; size_t padHeight = srcHeight - dstHeight; size_t leftPad = padWidth / 2; // size_t rightPad = padWidth - leftPad; size_t topPad = padHeight / 2; // size_t bottomPad = padHeight - topPad; if (overwrite) { for (size_t d = 0; d < srcChannels; ++d) { for (size_t y = 0; y < dstHeight; ++y) { for (size_t x = 0; x < dstWidth; ++x) { dst[d * dstWidth * dstHeight + dstWidth * y + x] = src[d * srcWidth * srcHeight + srcWidth * (y + topPad) + x + leftPad]; } } } } else { for (size_t d = 0; d < srcChannels; ++d) { for (size_t y = 0; y < dstHeight; ++y) { for (size_t x = 0; x < dstWidth; ++x) { dst[d * dstWidth * dstHeight + dstWidth * y + x] += src[d * srcWidth * srcHeight + srcWidth * (y + topPad) + x + leftPad]; } } } } } } /* * paddingWidth, paddingHeight - zero padding added for both sides of the input * memory for matrix should be allocated externaly */ template<typename T> static void im2col(const T* image, size_t imageWidth, size_t imageHeight, size_t imageChannels, size_t filterWidth, size_t filterHeight, size_t strideWidth, size_t strideHeight, size_t paddingWidth, size_t paddingHeight, T* matrix, bool reversedOrder = false); static size_t im2colOutputSize(size_t imageWidth, size_t imageHeight, size_t imageChannels, size_t filterWidth, size_t filterHeight, size_t strideWidth, size_t strideHeight, size_t paddingWidth, size_t paddingHeight, size_t dilationWidth, size_t dilationHeight); /* * paddingWidth, paddingHeight - zero padding added for both sides of the input * memory for image should be allocated externaly */ template<typename T> static void col2im(const T* matrix, size_t imageWidth, size_t imageHeight, size_t imageChannels, size_t filterWidth, size_t filterHeight, size_t strideWidth, size_t strideHeight, size_t paddingWidth, size_t paddingHeight, T* image, bool reversedOrder = false, bool zeroOutput = true); /* * Rectified Linear Unit */ template<typename T> static T ReLU(T x) { return std::max(static_cast<T>(0), x); } template<typename T> static T ReLU6(T x) { return std::min(std::max(static_cast<T>(0), x), static_cast<T>(6.0_dt)); } template<typename T> static void ReLU(const T& in, T& out) { std::transform(in.begin(), in.end(), out.begin(), [&](typename T::type val) -> typename T::type { return ReLU(val); }); } template<typename T> static void ReLU6(const T& in, T& out) { std::transform(in.begin(), in.end(), out.begin(), [&](typename T::type val) -> typename T::type { return ReLU6(val); }); } template<typename T> static void ReLUBackward(const T& out, const T& delta, T& prevDelta) { #if defined(_OPENMP) #pragma omp parallel for #endif for (size_t q = 0; q < prevDelta.size(); ++q) { prevDelta[q] += (out[q] > static_cast<typename T::type>(0)) ? delta[q] : static_cast<typename T::type>(0); } } template<typename T> static void ReLU6Backward(const T& out, const T& delta, T& prevDelta) { #if defined(_OPENMP) #pragma omp parallel for #endif for (size_t q = 0; q < prevDelta.size(); ++q) { prevDelta[q] += (out[q] > static_cast<typename T::type>(0) && out[q] < static_cast<typename T::type>(6.0f)) ? delta[q] : static_cast<typename T::type>(0); } } /* * Gaussian error linear unit * @see https://arxiv.org/abs/1606.08415 */ static dtype GeLU_Erf(dtype x); static dtype GeLU_Tanh(dtype x); /* * Hard Sigmoid */ template<typename T> static T HSigmoid(T x) { return static_cast<T>(ReLU6(TODTYPE(x) + 3.0_dt) / 6.0_dt); } /* * Hard Swish */ template<typename T> static T HSwish(T x) { return x * HSigmoid(x); } static dtype sign(dtype x) { return TODTYPE((0.0_dt < x) - (x < 0.0_dt)); } template<typename T, typename U> static void copyView(const T& view_from, U& view_to, const bool overwrite = false) { auto retLhs = [](typename T::value_type& lhs, [[maybe_unused]] typename T::value_type& rhs) { return lhs; }; auto copyViewImpl = [](const T& view_from, U& view_to, auto&& func) { for (size_t i1 = 0; i1 < view_from.size(0); ++i1) { for (size_t i2 = 0; i2 < view_from.size(1); ++i2) { for (size_t i3 = 0; i3 < view_from.size(2); ++i3) { for (size_t i4 = 0; i4 < view_from.size(3); ++i4) { view_to[i1][i2][i3][i4] = func(view_from[i1][i2][i3][i4], view_to[i1][i2][i3][i4]); } } } } }; if (overwrite) { copyViewImpl(view_from, view_to, retLhs); } else { copyViewImpl(view_from, view_to, std::plus<typename T::value_type>()); } } template<typename T> static void unpack4D(const T& src, T& dst, Dimension dir, size_t index, const Name& layerType, const Name& layerName, bool overwrite) { auto input4d = src.get4DView(); auto inputDims = yato::dims(src.getDepth(), src.getHeight(), src.getWidth()); auto outputDims = dst.getShape(); const typename T::type* startEl = nullptr; switch (dir) { case Dimension::Depth: startEl = &input4d[0][index][0][0]; break; case Dimension::Height: startEl = &input4d[0][0][index][0]; break; default: throw std::runtime_error(layerType + "[" + layerName + "]: unpack4D unknown dim"); } auto srcView = yato::array_view_4d<const typename T::type>(startEl, outputDims, inputDims); auto outputView = dst.get4DView(); Common::copyView(srcView, outputView, overwrite); } template<typename T> static void pack4D(const T& src, T& dst, Dimension dir, size_t index, const Name& layerType, const Name& layerName, bool overwrite) { auto output4d = dst.get4DView(); yato::dimensionality<3U, size_t> concatDims(dst.getDepth(), dst.getHeight(), dst.getWidth()); auto srcView = src.get4DView(); typename T::type* startEl = nullptr; switch (dir) { case Dimension::Depth: startEl = &output4d[0][index][0][0]; break; case Dimension::Height: startEl = &output4d[0][0][index][0]; break; default: throw std::runtime_error(layerType + "[" + layerName + "]: pack4D unknown dim"); } auto dstView = yato::array_view_4d<typename T::type>(startEl, src.getShape(), concatDims); Common::copyView(srcView, dstView, overwrite); } /* * Upper triangle of a rectangular array */ template<typename T> static void triu(T* data, size_t nrows, size_t ncols, int diag = 0) { size_t i = 0; int cols = (int)ncols; int rows = (int)nrows; for (int r = 0; r < rows; ++r) { for (int c = 0; c < cols; ++c, ++i) { if (c - r - diag < 0) { data[i] = static_cast<T>(0); } } } } /* * Applies a 1D convolution over an input signal composed of several input planes. * Supports 2 modes: * 1. PyTorch style: Input[N, C, 1, L1] (or [N, 1, C, L1]) -> Output[N, FILTERS, 1, L2] (or [N, 1, FILTERS, L2]) * 2. TensorFlow style: Input[N, L1, 1, C] (or [N, 1, L1, C]) -> Output[N, L2, 1, FILTERS] (or [N, 1, L2, FILTERS]) * Output is not zeroed prior to convolution (operator += is used) */ static void conv1d(const dtype* input, dtype* output, const dtype* kernel, const dtype* bias, size_t batchSize, size_t inputSize, size_t inputChannels, size_t outputSize, size_t outputChannels, size_t kernelSize, size_t padding, size_t stride, size_t dilation = 1U, size_t groups = 1U, bool tfStyle = false); /* * Applies 2D convolution over input tensor, all channels convolved * Output is not zeroed prior to convolution (operator += is used) */ template<typename T> static void conv2d(const T* input, T* output, const T* kernel, const T* bias, size_t batchSize, size_t inputWidth, size_t inputHeight, size_t inputChannels, size_t outputWidth, size_t outputHeight, size_t outputChannels, size_t kernelWidth, size_t kernelHeight, size_t paddingW, size_t paddingH, size_t strideW, size_t strideH, size_t dilationW = 1U, size_t dilationH = 1U, size_t groups = 1U) { auto inputs3D = yato::array_view_3d<T>(const_cast<T*>(input), yato::dims(batchSize, inputChannels, inputHeight * inputWidth)); auto outputs3D = yato::array_view_3d<T>(output, yato::dims(batchSize, outputChannels, outputHeight * outputWidth)); auto kernelsWeights4D = yato::array_view_4d<T>(const_cast<T*>(kernel), yato::dims(outputChannels, inputChannels / groups, kernelHeight, kernelWidth)); for (size_t q = 0; q < batchSize; ++q) { for (size_t d = 0; d < outputChannels; ++d) { std::fill(outputs3D[q][d].begin(), outputs3D[q][d].end(), static_cast<T>(0.0_dt)); } size_t inputWidthPadded = inputWidth + 2 * paddingW; size_t inputHeightPadded = inputHeight + 2 * paddingH; std::vector<T> inputPadded(inputChannels * inputHeightPadded * inputWidthPadded); Common::addPadding2D(&inputs3D[q][0][0], inputPadded.data(), inputChannels, inputWidth, inputHeight, inputWidthPadded, inputHeightPadded); auto inputPadded2D = yato::view(inputPadded).reshape(yato::dims(inputChannels, inputHeightPadded * inputWidthPadded)); for (size_t group = 0; group < groups; ++group) { for (size_t kernelIndex = 0; kernelIndex < outputChannels / groups; ++kernelIndex) { for (size_t d = 0; d < inputChannels / groups; ++d) { for (size_t oy = 0; oy < outputHeight; ++oy) { for (size_t ox = 0; ox < outputWidth; ++ox) { for (size_t ky = 0; ky < kernelHeight; ++ky) { for (size_t kx = 0; kx < kernelWidth; ++kx) { outputs3D[q][kernelIndex + group * outputChannels / groups][oy * outputWidth + ox] += kernelsWeights4D[kernelIndex + group * outputChannels / groups][d][ky][kx] * inputPadded2D[d + group * inputChannels / groups][oy * inputWidthPadded * strideH + ky * dilationH * inputWidthPadded + ox * strideW + kx * dilationW]; } } } } } } } } if (bias) { for (size_t q = 0; q < batchSize; ++q) { for (size_t kernelIndex = 0; kernelIndex < outputChannels; ++kernelIndex) { for (size_t oy = 0; oy < outputHeight; ++oy) { for (size_t ox = 0; ox < outputWidth; ++ox) { outputs3D[q][kernelIndex][oy * outputWidth + ox] += bias[kernelIndex]; } } } } } } template<typename T = dtype, typename Iterator> static void arange(Iterator begin, Iterator end, T start = static_cast<T>(0), T step = static_cast<T>(1)) { auto val = start; for (auto p = begin; p != end; ++p) { *p = static_cast<std::remove_reference_t<decltype(*p)>>(val); val += step; } } template<typename T = dtype, typename Iterable> static void arange(Iterable& i, T start = static_cast<T>(0), T step = static_cast<T>(1)) { return arange(i.begin(), i.end(), start, step); } static void replaceAll(std::string& str, const std::string& srcSubstr, const std::string& tgtSubstr) { size_t start_pos = 0; while ((start_pos = str.find(srcSubstr, start_pos)) != std::string::npos) { str.replace(start_pos, srcSubstr.length(), tgtSubstr); start_pos += tgtSubstr.length(); // srcSubstr could be a substring of tgtSubstr } } static bool startsWith(const std::string& str, const std::string& srcSubstr) { return (str.rfind(srcSubstr, 0) == 0); } static std::vector<std::string> split(const std::string& string, char delimeter); /* * @see https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html */ template<typename T> static bool shapeIsBroadcastable(const T& from, const T& to) { const auto n = to.dimensions_num(); for (size_t i = 0; i < n; ++i) { if (from[i] != to[i] && from[i] != 1U && to[i] != 1U) { return false; } } return true; } static bool endsWith(std::string const& value, std::string const& ending) { if (ending.size() > value.size()) { return false; } return std::equal(ending.rbegin(), ending.rend(), value.rbegin()); } static shape getStrides(const shape& tensor_shape); static shape offsetToIndexes(size_t offset, const shape& strides); static size_t indexesToOffset(const shape& indexes, const shape& strides); }; template<class T> bool if_equals(const std::string&& error, const T val1, const T val2) { if (val1 != val2) { throw(std::runtime_error(error)); } return val1 == val2; } } // raul namespace #endif // COMMON_H
omp_critical_with_hint.c
// RUN: %libomp-compile-and-run // critial with hint was introduced with icc 19 // UNSUPPORTED: icc-18 #include <stdio.h> #include <omp.h> #include "omp_testsuite.h" int test_omp_critical(int iter) { int sum; int known_sum; sum = 0; #pragma omp parallel { int mysum = 0; int i; #pragma omp for for (i = 0; i < 1000; i++) mysum = mysum + i; switch (iter % 4) { case 0: #pragma omp critical(c0) hint(omp_sync_hint_uncontended) sum = mysum + sum; break; case 1: #pragma omp critical(c1) hint(omp_sync_hint_contended) sum = mysum + sum; break; case 2: #pragma omp critical(c2) hint(omp_sync_hint_nonspeculative) sum = mysum + sum; break; case 3: #pragma omp critical(c3) hint(omp_sync_hint_speculative) sum = mysum + sum; break; default:; } } known_sum = 999 * 1000 / 2; return (known_sum == sum); } int main() { int i; int num_failed = 0; for (i = 0; i < 4 * REPETITIONS; i++) { if (!test_omp_critical(i)) { num_failed++; } } return num_failed; }
blurtiles.c
#include <stdlib.h> #include "blurtiles.h" void blurtiles(float* l,int m,int n,float*output){ #pragma omp parallel for for (int H80 = 0; H80 < 1; H80++) { for (int H81 = 0; H81 < m; H81++) { float tmp2 = 0; float tmp3 = 0; if (0 <= H80 - (1)) { float tmp4 = 0; float tmp5 = 0; if (0 <= H81 - (1)) { tmp5 = l[(((m)) * (H80 - (1))) + H81 - (1)]; } float tmp6 = 0; tmp6 = l[(((m)) * (H80 - (1))) + H81]; tmp4 = tmp5 + tmp6; float tmp7 = 0; if (H81 + 1 < m) { tmp7 = l[(((m)) * (H80 - (1))) + H81 + 1]; } tmp3 = tmp4 + tmp7; } float tmp8 = 0; float tmp9 = 0; float tmp10 = 0; if (0 <= H81 - (1)) { tmp10 = l[(((m)) * (H80)) + H81 - (1)]; } float tmp11 = 0; tmp11 = l[(((m)) * (H80)) + H81]; tmp9 = tmp10 + tmp11; float tmp12 = 0; if (H81 + 1 < m) { tmp12 = l[(((m)) * (H80)) + H81 + 1]; } tmp8 = tmp9 + tmp12; tmp2 = tmp3 + tmp8; float tmp13 = 0; float tmp14 = 0; float tmp15 = 0; if (0 <= H81 - (1)) { tmp15 = l[(((m)) * (H80 + 1)) + H81 - (1)]; } float tmp16 = 0; tmp16 = l[(((m)) * (H80 + 1)) + H81]; tmp14 = tmp15 + tmp16; float tmp17 = 0; if (H81 + 1 < m) { tmp17 = l[(((m)) * (H80 + 1)) + H81 + 1]; } tmp13 = tmp14 + tmp17; output[(m) * (H80) + H81] = tmp2 + tmp13; } } #pragma omp parallel for for (int H128 = 1; H128 < (n - (1 + 0)); H128++) { for (int H129 = 0; H129 < 1; H129++) { float tmp18 = 0; float tmp19 = 0; float tmp20 = 0; float tmp21 = 0; if (0 <= H129 - (1)) { tmp21 = l[(((m)) * (H128 - (1))) + H129 - (1)]; } float tmp22 = 0; tmp22 = l[(((m)) * (H128 - (1))) + H129]; tmp20 = tmp21 + tmp22; float tmp23 = 0; tmp23 = l[(((m)) * (H128 - (1))) + H129 + 1]; tmp19 = tmp20 + tmp23; float tmp24 = 0; float tmp25 = 0; float tmp26 = 0; if (0 <= H129 - (1)) { tmp26 = l[(((m)) * (H128)) + H129 - (1)]; } float tmp27 = 0; tmp27 = l[(((m)) * (H128)) + H129]; tmp25 = tmp26 + tmp27; float tmp28 = 0; tmp28 = l[(((m)) * (H128)) + H129 + 1]; tmp24 = tmp25 + tmp28; tmp18 = tmp19 + tmp24; float tmp29 = 0; float tmp30 = 0; float tmp31 = 0; if (0 <= H129 - (1)) { tmp31 = l[(((m)) * (H128 + 1)) + H129 - (1)]; } float tmp32 = 0; tmp32 = l[(((m)) * (H128 + 1)) + H129]; tmp30 = tmp31 + tmp32; float tmp33 = 0; tmp33 = l[(((m)) * (H128 + 1)) + H129 + 1]; tmp29 = tmp30 + tmp33; output[(((1 + ((m - (1 + 0)) - (1 + 0))) + (m - (m - (1))))) * (((H128 - (1)) + 1)) + H129] = tmp18 + tmp29; } } #pragma omp parallel for for (int H150 = 0; H150 < (((n - (1 + 0)) - (1)) / (64)); H150++) { for (int H162 = 0; H162 < ((((m - (1 + 0)) - (1 + 0))) / (64)); H162++) { float* tmp34 = (float*) calloc(1,(66) * (64) * sizeof (float)); for (int H165 = 0; H165 < 66; H165++) { for (int H166 = 0; H166 < 64; H166++) { float tmp35 = 0; float tmp36 = 0; tmp36 = l[(((m)) * ((H150) * (64) + H165)) + (H162) * (64) + H166]; float tmp37 = 0; tmp37 = l[(((m)) * ((H150) * (64) + H165)) + (H162) * (64) + H166 + 1]; tmp35 = tmp36 + tmp37; float tmp38 = 0; tmp38 = l[(((m)) * ((H150) * (64) + H165)) + (H162) * (64) + H166 + 2]; tmp34[(64) * (H165) + H166] = tmp35 + tmp38; } } float* x10 = tmp34; for (int H168 = 0; H168 < 64; H168++) { for (int H169 = 0; H169 < 64; H169++) { float tmp39 = 0; float tmp40 = 0; tmp40 = x10[(((64)) * (H168)) + H169]; float tmp41 = 0; tmp41 = x10[(((64)) * (H168 + 1)) + H169]; tmp39 = tmp40 + tmp41; float tmp42 = 0; tmp42 = x10[(((64)) * (H168 + 2)) + H169]; output[(((1 + ((m - (1 + 0)) - (1 + 0))) + (m - (m - (1))))) * (((H150) * (64) + H168 + 1)) + ((H162) * (64) + H169 + 1)] = tmp39 + tmp42; } } free(tmp34); } for (int H188 = ((((m - (1 + 0)) - (1 + 0))) / (64)); H188 < ((((m - (1 + 0)) - (1 + 0))) / (64)) + ((((((m - (1 + 0)) - (1 + 0))) % (64))) + (64) - 1 ) / (64); H188++) { for (int H189 = 0; H189 < 64; H189++) { for (int H190 = 0; H190 < 64; H190++) { if ((H188) * (64) + H189 < ((m - (1 + 0)) - (1 + 0))) { if ((H150) * (64) + H190 < (n - (1 + 0)) - (1)) { float tmp43 = 0; float tmp44 = 0; float tmp45 = 0; float tmp46 = 0; tmp46 = l[(((m)) * ((H150) * (64) + H190)) + (H188) * (64) + H189]; float tmp47 = 0; tmp47 = l[(((m)) * ((H150) * (64) + H190)) + (H188) * (64) + H189 + 1]; tmp45 = tmp46 + tmp47; float tmp48 = 0; tmp48 = l[(((m)) * ((H150) * (64) + H190)) + (H188) * (64) + H189 + 1 + 1]; tmp44 = tmp45 + tmp48; float tmp49 = 0; float tmp50 = 0; float tmp51 = 0; tmp51 = l[(((m)) * ((H150) * (64) + H190 + 1)) + (H188) * (64) + H189]; float tmp52 = 0; tmp52 = l[(((m)) * ((H150) * (64) + H190 + 1)) + (H188) * (64) + H189 + 1]; tmp50 = tmp51 + tmp52; float tmp53 = 0; tmp53 = l[(((m)) * ((H150) * (64) + H190 + 1)) + (H188) * (64) + H189 + 1 + 1]; tmp49 = tmp50 + tmp53; tmp43 = tmp44 + tmp49; float tmp54 = 0; float tmp55 = 0; float tmp56 = 0; tmp56 = l[(((m)) * ((H150) * (64) + H190 + 1 + 1)) + (H188) * (64) + H189]; float tmp57 = 0; tmp57 = l[(((m)) * ((H150) * (64) + H190 + 1 + 1)) + (H188) * (64) + H189 + 1]; tmp55 = tmp56 + tmp57; float tmp58 = 0; tmp58 = l[(((m)) * ((H150) * (64) + H190 + 1 + 1)) + (H188) * (64) + H189 + 1 + 1]; tmp54 = tmp55 + tmp58; output[(((1 + ((m - (1 + 0)) - (1 + 0))) + (m - (m - (1))))) * (((H150) * (64) + H190 + 1)) + ((((H188 - (((((m - (1 + 0)) - (1 + 0))) / (64)))) + ((((m - (1 + 0)) - (1 + 0))) / (64)))) * (64) + H189 + 1)] = tmp43 + tmp54; } } } } } } #pragma omp parallel for for (int H191 = (((n - (1 + 0)) - (1)) / (64)); H191 < (((n - (1 + 0)) - (1)) / (64)) + (((((n - (1 + 0)) - (1)) % (64))) + (64) - 1 ) / (64); H191++) { for (int H192 = 0; H192 < ((((m - (1 + 0)) - (1 + 0))) / (64)) + ((((((m - (1 + 0)) - (1 + 0))) % (64))) + (64) - 1 ) / (64); H192++) { for (int H193 = 0; H193 < 64; H193++) { for (int H194 = 0; H194 < 64; H194++) { if ((H192) * (64) + H193 < ((m - (1 + 0)) - (1 + 0))) { if ((H191) * (64) + H194 < (n - (1 + 0)) - (1)) { float tmp59 = 0; float tmp60 = 0; float tmp61 = 0; float tmp62 = 0; tmp62 = l[(((m)) * ((H191) * (64) + H194)) + (H192) * (64) + H193]; float tmp63 = 0; tmp63 = l[(((m)) * ((H191) * (64) + H194)) + (H192) * (64) + H193 + 1]; tmp61 = tmp62 + tmp63; float tmp64 = 0; tmp64 = l[(((m)) * ((H191) * (64) + H194)) + (H192) * (64) + H193 + 1 + 1]; tmp60 = tmp61 + tmp64; float tmp65 = 0; float tmp66 = 0; float tmp67 = 0; tmp67 = l[(((m)) * ((H191) * (64) + H194 + 1)) + (H192) * (64) + H193]; float tmp68 = 0; tmp68 = l[(((m)) * ((H191) * (64) + H194 + 1)) + (H192) * (64) + H193 + 1]; tmp66 = tmp67 + tmp68; float tmp69 = 0; tmp69 = l[(((m)) * ((H191) * (64) + H194 + 1)) + (H192) * (64) + H193 + 1 + 1]; tmp65 = tmp66 + tmp69; tmp59 = tmp60 + tmp65; float tmp70 = 0; float tmp71 = 0; float tmp72 = 0; tmp72 = l[(((m)) * ((H191) * (64) + H194 + 1 + 1)) + (H192) * (64) + H193]; float tmp73 = 0; tmp73 = l[(((m)) * ((H191) * (64) + H194 + 1 + 1)) + (H192) * (64) + H193 + 1]; tmp71 = tmp72 + tmp73; float tmp74 = 0; tmp74 = l[(((m)) * ((H191) * (64) + H194 + 1 + 1)) + (H192) * (64) + H193 + 1 + 1]; tmp70 = tmp71 + tmp74; output[(((1 + ((m - (1 + 0)) - (1 + 0))) + (m - (m - (1))))) * (((((H191 - ((((n - (1 + 0)) - (1)) / (64)))) + (((n - (1 + 0)) - (1)) / (64)))) * (64) + H194 + 1)) + ((H192) * (64) + H193 + 1)] = tmp59 + tmp70; } } } } } } #pragma omp parallel for for (int H195 = 1; H195 < (n - (1 + 0)); H195++) { for (int H196 = m - (1); H196 < m; H196++) { float tmp75 = 0; float tmp76 = 0; float tmp77 = 0; float tmp78 = 0; float tmp79 = 0; tmp79 = l[(((m)) * (H195 - (1))) + H196 - (1)]; float tmp80 = 0; tmp80 = l[(((m)) * (H195 - (1))) + H196]; tmp78 = tmp79 + tmp80; float tmp81 = 0; if (H196 + 1 < m) { tmp81 = l[(((m)) * (H195 - (1))) + H196 + 1]; } tmp77 = tmp78 + tmp81; float tmp82 = 0; float tmp83 = 0; float tmp84 = 0; tmp84 = l[(((m)) * (H195)) + H196 - (1)]; float tmp85 = 0; tmp85 = l[(((m)) * (H195)) + H196]; tmp83 = tmp84 + tmp85; float tmp86 = 0; if (H196 + 1 < m) { tmp86 = l[(((m)) * (H195)) + H196 + 1]; } tmp82 = tmp83 + tmp86; tmp76 = tmp77 + tmp82; float tmp87 = 0; float tmp88 = 0; tmp88 = l[(((m)) * (H195 + 1)) + H196 - (1)]; float tmp89 = 0; tmp89 = l[(((m)) * (H195 + 1)) + H196]; tmp87 = tmp88 + tmp89; tmp75 = tmp76 + tmp87; float tmp90 = 0; if (H196 + 1 < m) { tmp90 = l[(((m)) * (H195 + 1)) + H196 + 1]; } output[(((1 + ((m - (1 + 0)) - (1 + 0))) + (m - (m - (1))))) * (((H195 - (1)) + 1)) + ((H196 - (m - (1))) + (1 + ((m - (1 + 0)) - (1 + 0))))] = tmp75 + tmp90; } } #pragma omp parallel for for (int H197 = n - (1); H197 < n; H197++) { for (int H198 = 0; H198 < m; H198++) { float tmp91 = 0; float tmp92 = 0; float tmp93 = 0; float tmp94 = 0; if (0 <= H198 - (1)) { tmp94 = l[(((m)) * (H197 - (1))) + H198 - (1)]; } float tmp95 = 0; tmp95 = l[(((m)) * (H197 - (1))) + H198]; tmp93 = tmp94 + tmp95; float tmp96 = 0; if (H198 + 1 < m) { tmp96 = l[(((m)) * (H197 - (1))) + H198 + 1]; } tmp92 = tmp93 + tmp96; float tmp97 = 0; float tmp98 = 0; float tmp99 = 0; if (0 <= H198 - (1)) { tmp99 = l[(((m)) * (H197)) + H198 - (1)]; } float tmp100 = 0; tmp100 = l[(((m)) * (H197)) + H198]; tmp98 = tmp99 + tmp100; float tmp101 = 0; if (H198 + 1 < m) { tmp101 = l[(((m)) * (H197)) + H198 + 1]; } tmp97 = tmp98 + tmp101; tmp91 = tmp92 + tmp97; float tmp102 = 0; if (H197 + 1 < n) { float tmp103 = 0; float tmp104 = 0; if (0 <= H198 - (1)) { tmp104 = l[(((m)) * (H197 + 1)) + H198 - (1)]; } float tmp105 = 0; tmp105 = l[(((m)) * (H197 + 1)) + H198]; tmp103 = tmp104 + tmp105; float tmp106 = 0; if (H198 + 1 < m) { tmp106 = l[(((m)) * (H197 + 1)) + H198 + 1]; } tmp102 = tmp103 + tmp106; } output[(m) * (((H197 - (n - (1))) + (1 + ((n - (1 + 0)) - (1))))) + H198] = tmp91 + tmp102; } } }
move_shallow_water_particle_utility.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Miguel Maso Sotomayor // Pablo Becker // #ifndef KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED #define KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED ///@defgroup MoveShallowWaterParticleUtility ///@brief Utility to move particles on the eulerian mesh with an /// explicit scheme. This is the basic tool of the pfem2 framework // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/node.h" #include "includes/checks.h" #include "includes/dof.h" #include "includes/variables.h" #include "containers/array_1d.h" #include "containers/data_value_container.h" #include "includes/mesh.h" #include "utilities/math_utils.h" #include "includes/global_pointer_variables.h" #include "processes/node_erase_process.h" #include "utilities/geometry_utilities.h" #include "includes/model_part.h" #include "includes/kratos_parameters.h" #include "spatial_containers/spatial_containers.h" #include "spatial_containers/cell.h" #include "spatial_containers/bins_dynamic_objects.h" #include "utilities/spatial_containers_configure.h" #include "geometries/line_2d_2.h" #include "geometries/triangle_2d_3.h" #include "geometries/triangle_3d_3.h" #include "geometries/point.h" #include "shallow_water_application_variables.h" #include "shallow_water_particle.h" #include "utilities/openmp_utils.h" #include "time.h" //#include "processes/process.h" namespace Kratos { //this class is to be modified by the user to customize the interpolation process template< unsigned int TDim> class MoveShallowWaterParticleUtility { public: typedef SpatialContainersConfigure<TDim> Configure; typedef typename Configure::PointType PointType; typedef typename Configure::ContainerType ContainerType; typedef typename Configure::IteratorType IteratorType; typedef typename Configure::ResultContainerType ResultContainerType; typedef typename Configure::ResultIteratorType ResultIteratorType; typedef PointerVector< ShallowParticle, ShallowParticle*, std::vector<ShallowParticle*> > ParticlePointerVector; KRATOS_CLASS_POINTER_DEFINITION(MoveShallowWaterParticleUtility); //template<unsigned int TDim> MoveShallowWaterParticleUtility(ModelPart& rModelPart, Parameters rParameters) : mrModelPart(rModelPart), mScalarVar1(&KratosComponents< Variable<double> >::Get( rParameters["convection_scalar_variable"].GetString() ) ), mVectorVar1(&KratosComponents< Variable<array_1d<double,3> > >::Get( rParameters["convection_vector_variable"].GetString() ) ) { KRATOS_TRY std::cout << "Initializing moveparticle utility for scalar transport" << std::endl; Parameters default_parameters( R"( { "convection_scalar_variable" : "HEIGHT", "convection_vector_variable" : "VELOCITY", "maximum_number_of_particles" : 16 } )" ); // Now validate agains defaults -- this also ensures no type mismatch rParameters.ValidateAndAssignDefaults(default_parameters); m_scalar_var1_name = rParameters["convection_scalar_variable"].GetString(); m_vector_var1_name = rParameters["convection_vector_variable"].GetString(); mMaxNumberOfParticles = rParameters["maximum_number_of_particles"].GetDouble(); Check(); //storing water and air density and their inverses, just in case it is needed for the streamline integration //loop in elements to change their ID to their position in the array. Easier to get information later. //DO NOT PARALELIZE THIS! IT MUST BE SERIAL!!!!!!!!!!!!!!!!!!!!!! ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin(); for(unsigned int ii=0; ii<mrModelPart.Elements().size(); ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; ielem->SetId(ii+1); } mLastElemId= (mrModelPart.ElementsEnd()-1)->Id(); int node_id=0; // we look for the smallest edge. could be used as a weighting function when going lagrangian->eulerian instead of traditional shape functions(method currently used) ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin(); std::vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator pnode = inodebegin+ii; array_1d<double,3> position_node; double distance=0.0; position_node = pnode->Coordinates(); GlobalPointersVector< Node<3> >& rneigh = pnode->GetValue(NEIGHBOUR_NODES); //we loop all the nodes to check all the edges const double number_of_neighbours = static_cast<double>(rneigh.size()); for( GlobalPointersVector<Node<3> >::iterator inode = rneigh.begin(); inode!=rneigh.end(); inode++) { array_1d<double,3> position_difference; position_difference = inode->Coordinates() - position_node; const double current_distance = norm_2( position_difference ); distance += current_distance / number_of_neighbours; } //and we save the largest edge. pnode->SetValue(MEAN_SIZE, distance); node_id=pnode->GetId(); } } mLastNodeId=node_id; //we also calculate the element mean size in the same way, for the courant number //also we set the right size to the LHS column for the pressure enrichments, in order to recover correctly the enrichment pressure std::vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition); //before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element. #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; double elem_size; array_1d<double,3> Edge(3,0.0); Edge = ielem->GetGeometry()[1].Coordinates() - ielem->GetGeometry()[0].Coordinates(); elem_size = Edge[0]*Edge[0]; for (unsigned int d = 1; d < TDim; d++) elem_size += Edge[d]*Edge[d]; for (unsigned int i = 2; i < (TDim+1); i++) for(unsigned int j = 0; j < i; j++) { Edge = ielem->GetGeometry()[i].Coordinates() - ielem->GetGeometry()[j].Coordinates(); double Length = Edge[0]*Edge[0]; for (unsigned int d = 1; d < TDim; d++) Length += Edge[d]*Edge[d]; if (Length < elem_size) elem_size = Length; } elem_size = sqrt(elem_size); ielem->SetValue(MEAN_SIZE, elem_size); } } //matrix containing the position of the 4/15/45 particles that we will seed at the beggining BoundedMatrix<double, 5*(1+TDim), 3 > pos; BoundedMatrix<double, 5*(1+TDim), (1+TDim) > N; int particle_id=0; mNElems = mrModelPart.Elements().size(); std::cout << " about to resize vectors" << std::endl; //setting the right size to the vector containing the particles assigned to each element //particles vector. this vector contains ALL the particles in the simulation. mParticlesVector.resize(mNElems*mMaxNumberOfParticles); //and this vector contains the current number of particles that are in each element (currently zero) mNumOfParticlesInElems.resize(mNElems); mNumOfParticlesInElems=ZeroVector(mNElems); //when moving the particles, an auxiliary vector is necessary (to store the previous number) mNumOfParticlesInElemsAux.resize(mNElems); //each element will have a list of pointers to all the particles that are inside. //this vector contains the pointers to the vector of (particle) pointers of each element. mVectorOfParticlePointersVectors.resize(mNElems); //int artz; //std::cin >> artz; int i_int=0; //careful! it's not the id, but the position inside the array! std::cout << " about to create particles" << std::endl; //now we seed: LOOP IN ELEMENTS //using loop index, DO NOT paralelize this! change lines : mparticles_in_elems_pointers((ii*mMaxNumberOfParticles)+mparticles_in_elems_integers(ii)) = pparticle; and the next one mOffset=0; //ShallowParticle& firstparticle = mParticlesVector[0]; for(unsigned int ii=0; ii<mrModelPart.Elements().size(); ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; //(ielem->GetValue(BED_PARTICLE_POINTERS)) = ParticlePointerVector( mMaxNumberOfParticles*2, &firstparticle ); //ParticlePointerVector& particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); //now we link the mpointers_to_particle_pointers_vectors to the corresponding element //mpointers_to_particle_pointers_vectors(ii) = &particle_pointers; //now we resize the vector of particle pointers. it is double sized because we move the particles from an initial position (first half) to a final position (second half). //for(int j=0; j<(mMaxNumberOfParticles*2); j++) // particle_pointers.push_back(&firstparticle); mVectorOfParticlePointersVectors[ii] = ParticlePointerVector( mMaxNumberOfParticles*2 ); ParticlePointerVector& particle_pointers = mVectorOfParticlePointersVectors[ii]; //int & number_of_particles = ielem->GetValue(NUMBER_OF_BED_PARTICLES); int & number_of_particles = mNumOfParticlesInElems[ii]; number_of_particles=0; Geometry< Node<3> >& geom = ielem->GetGeometry(); //unsigned int elem_id = ielem->Id(); ComputeGaussPointPositions_initial(geom, pos, N); //we also have the standard (4), and 45 //now we seed the particles in the current element for (unsigned int j = 0; j < pos.size1(); j++) { ++particle_id; ShallowParticle& pparticle = mParticlesVector[particle_id-1]; //~ pparticle.X()=pos(j,0); //~ pparticle.Y()=pos(j,1); //~ pparticle.Z()=pos(j,2); pparticle.Coordinates() = row(pos,j); pparticle.GetEraseFlag()=false; array_1d<float, 3 > & vector1 = pparticle.GetVector1(); float & scalar1 = pparticle.GetScalar1(); noalias(vector1) = ZeroVector(3); scalar1=0.0; for (unsigned int k = 0; k < (TDim+1); k++) { scalar1 += N(j, k) * geom[k].FastGetSolutionStepValue(*mScalarVar1); noalias(vector1) += N(j, k) * geom[k].FastGetSolutionStepValue(*mVectorVar1); } particle_pointers(j) = &pparticle; number_of_particles++ ; } ++i_int; } mNParticles=particle_id; //we save the last particle created as the total number of particles we have. For the moment this is true. std::cout << " [Creating particles : " << mNParticles << " particles created]" << std::endl; mParticlePrintingToolInitialized=false; KRATOS_CATCH("") } ~MoveShallowWaterParticleUtility() {} void MountBin() { KRATOS_TRY //copy the elements to a new container, as the list will //be shuffled duringthe construction of the tree ContainerType& rElements = mrModelPart.ElementsArray(); IteratorType it_begin = rElements.begin(); IteratorType it_end = rElements.end(); //const int number_of_elem = rElements.size(); typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin, it_end ) ); paux.swap(mpBinsObjectDynamic); //BinsObjectDynamic<Configure> mpBinsObjectDynamic(it_begin, it_end ); std::cout << " finished mounting Bins" << std::endl; KRATOS_CATCH("") } /// Calculates the mean velocity /** This function computes the mean velocity within an element and * stores it in MEAN_VEL_OVER_ELEM_SIZE variable. * This variable keeps the courant number aprox 0.1 in each substep * * @see MoveParticle * @see MoveParticleInverseWay */ void CalculateVelOverElemSize() { KRATOS_TRY const double nodal_weight = 1.0/ (1.0 + double (TDim) ); ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin(); std::vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Geometry<Node<3> >& geom = ielem->GetGeometry(); array_1d<double, 3 >vector_mean_velocity=ZeroVector(3); for (unsigned int i=0; i != (TDim+1) ; i++) vector_mean_velocity += geom[i].FastGetSolutionStepValue(VELOCITY); vector_mean_velocity *= nodal_weight; //~ const double mean_velocity = sqrt ( pow(vector_mean_velocity[0],2) + pow(vector_mean_velocity[1],2) + pow(vector_mean_velocity[2],2) ); const double mean_velocity = norm_2( vector_mean_velocity ); ielem->SetValue(MEAN_VEL_OVER_ELEM_SIZE, mean_velocity / ( ielem->GetValue(MEAN_SIZE) ) ); } } KRATOS_CATCH("") } /// Reset the boundary conditions /** When a variable is fixed this function resets the nodal values * with the previous time step */ void ResetBoundaryConditions() { KRATOS_TRY typedef VariableComponent<VectorComponentAdaptor<array_1d<double, 3> > > component_type; component_type vector_var_x = KratosComponents< component_type >::Get(m_vector_var1_name+std::string("_X")); component_type vector_var_y = KratosComponents< component_type >::Get(m_vector_var1_name+std::string("_Y")); component_type vector_var_z = KratosComponents< component_type >::Get(m_vector_var1_name+std::string("_Z")); ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin(); std::vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; if (inode->IsFixed(*mScalarVar1)) { inode->FastGetSolutionStepValue(*mScalarVar1)=inode->GetSolutionStepValue(*mScalarVar1,1); } if (inode->IsFixed(vector_var_x)) { inode->FastGetSolutionStepValue(vector_var_x)=inode->GetSolutionStepValue(vector_var_x,1); } if (inode->IsFixed(vector_var_y)) { inode->FastGetSolutionStepValue(vector_var_y)=inode->GetSolutionStepValue(vector_var_y,1); } if (inode->IsFixed(vector_var_z)) { inode->FastGetSolutionStepValue(vector_var_z)=inode->GetSolutionStepValue(vector_var_z,1); } } } KRATOS_CATCH("") } /// Auxiliar function to compute the "delta variables" /** Delta variables are the difference between two time steps. * It's value is used to update particles info * * @see CorrectParticlesWithoutMovingUsingDeltaVariables */ void CalculateDeltaVariables() { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin(); std::vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(DELTA_SCALAR1) = inode->FastGetSolutionStepValue(*mScalarVar1) - inode->FastGetSolutionStepValue(PROJECTED_SCALAR1); inode->FastGetSolutionStepValue(DELTA_VECTOR1) = inode->FastGetSolutionStepValue(*mVectorVar1) - inode->FastGetSolutionStepValue(PROJECTED_VECTOR1); //PROJECTED_VECTOR1 } } KRATOS_CATCH("") } /// Auxiliar function /** This function copy a scalar variable value to the previous time step */ void CopyScalarVarToPreviousTimeStep(const Variable<double>& OriginVariable, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin(); std::vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->GetSolutionStepValue(OriginVariable,1) = inode->FastGetSolutionStepValue(OriginVariable); } } KRATOS_CATCH("") } /// Auxiliar function /** This function copy a vector variable value to the previous time step */ void CopyVectorVarToPreviousTimeStep(const Variable<array_1d<double,3>>& OriginVariable, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin(); std::vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; noalias(inode->GetSolutionStepValue(OriginVariable,1)) = inode->FastGetSolutionStepValue(OriginVariable); } } KRATOS_CATCH("") } /// Move all the particles /** This function moves the particles across the streamlines * according to the velocity given by VELOCITY variable. The * movement is performed in nsubsteps, during a total time * of DELTA_TIME * * @see Moveparticle */ void MoveParticles() { KRATOS_TRY const ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo(); const int offset = mOffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //moveparticlesdiff reads from the pointers of one part (ie odd) and saves into the other part (ie even part) //since it is the only function in the whole procedure that does this, it must use alternatively one part and the other. bool even_timestep; if (offset!=0) even_timestep=false; else even_timestep=true; const int post_offset = mMaxNumberOfParticles * static_cast<int>(even_timestep); //and we also save the offset to know the location in which we will save the pointers after we've moved the particles double delta_t = CurrentProcessInfo[DELTA_TIME]; array_1d<double,TDim+1> N; const unsigned int max_results = 10000; //double integration_distance= 2.0; mMaxSubSteps = 10; mMaxSubStepDt = delta_t / static_cast<double>(mMaxSubSteps); std::vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition); ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin(); //before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element. #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { //ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii; int & number_of_particles = mNumOfParticlesInElems[ii]; //old_element->GetValue(NUMBER_OF_BED_PARTICLES); mNumOfParticlesInElemsAux[ii] = number_of_particles; mNumOfParticlesInElems[ii] = 0; //we reset the local vectors for a faster access; } } std::cout << "convecting particles" << std::endl; //We move the particles across the fixed mesh and saving change data into them (using the function MoveParticle) #pragma omp barrier #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { ResultContainerType results(max_results); GlobalPointersVector< Element > elements_in_trajectory; elements_in_trajectory.resize(20); for(unsigned int ielem = element_partition[kkk]; ielem<element_partition[kkk+1]; ielem++) { ModelPart::ElementsContainerType::iterator old_element = ielembegin+ielem; const int old_element_id = old_element->Id(); ParticlePointerVector& old_element_particle_pointers = mVectorOfParticlePointersVectors[old_element_id-1]; if ( (results.size()) != max_results ) results.resize(max_results); unsigned int number_of_elements_in_trajectory = 0; //excluding the origin one (current one, ielem) for (int ii = 0; ii < mNumOfParticlesInElemsAux[ielem]; ii++) { ShallowParticle& pparticle = old_element_particle_pointers[offset+ii]; Element::Pointer pcurrent_element( *old_element.base() ); ResultIteratorType result_begin = results.begin(); bool & erase_flag=pparticle.GetEraseFlag(); if (erase_flag == false){ MoveParticle(pparticle,pcurrent_element,elements_in_trajectory,number_of_elements_in_trajectory,result_begin,max_results); //saqué N de los argumentos, no lo necesito ya q empieza SIEMPRE en un nodo y no me importa donde termina const int current_element_id = pcurrent_element->Id(); int & number_of_particles_in_current_elem = mNumOfParticlesInElems[current_element_id-1]; if (number_of_particles_in_current_elem < mMaxNumberOfParticles && erase_flag == false) { ParticlePointerVector& current_element_particle_pointers = mVectorOfParticlePointersVectors[current_element_id-1]; #pragma omp critical { if (number_of_particles_in_current_elem < mMaxNumberOfParticles) // we cant go over this node, there's no room. otherwise we would be in the position of the first particle of the next element!! { current_element_particle_pointers(post_offset+number_of_particles_in_current_elem) = &pparticle; number_of_particles_in_current_elem++ ; KRATOS_ERROR_IF( number_of_particles_in_current_elem > mMaxNumberOfParticles ) << "In move shallow water particle utility: exceeded maximum number of particles" << std::endl; //~ if (number_of_particles_in_current_elem > mMaxNumberOfParticles) //~ KRATOS_WATCH("MAL"); } else { pparticle.GetEraseFlag()=true; //so we just delete it! } } } else { pparticle.GetEraseFlag()=true; //so we just delete it! } } } } } // After having changed everything we change the status of the mOddTimeStep flag: mOffset = post_offset;; // KRATOS_CATCH("") } /// Transfer particles information to the mesh nodes /** This function explicitly projects data from particles (lagrangian) * onto the eulerian mesh. Shape functions of the elements determine * the particle location within the element and its contribution to * each node as a weighting function. */ void TransferLagrangianToEulerian() //explicit { KRATOS_TRY const double threshold = 1e-10 / (static_cast<double>(TDim)+1.0); std::cout << "projecting info to mesh" << std::endl; const int offset = mOffset; // the array of pointers for each element has twice the required size so that // we use a part in odd timesteps and the other in even ones. //(flag managed only by MoveParticles) // We must project data from the particles (lagrangian) onto the eulerian mesh //int nnodes = mrModelPart.Nodes().size(); //array_1d<double,(n_nodes)> eulerian_nodes_sumweights; // We save data from previous time step of the eulerian mesh in case we must reuse it later // cos no particle was found around the nodes though we could've use a bigger buffer, to be changed later! // after having saved data, we reset them to zero, this way it's easier to add the contribution // of the surrounding particles. ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin(); std::vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(PROJECTED_SCALAR1)=0.0; inode->FastGetSolutionStepValue(PROJECTED_VECTOR1)=ZeroVector(3); inode->FastGetSolutionStepValue(YP)=0.0; } } // Adding contribution, loop on elements, since each element has stored the particles found inside of it std::vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition); ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin(); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; array_1d<double,3*(TDim+1)> nodes_positions; array_1d<double,3*(TDim+1)> nodes_added_vector1 = ZeroVector(3*(TDim+1)); array_1d<double,(TDim+1)> nodes_added_scalar1 = ZeroVector((TDim+1)); array_1d<double,(TDim+1)> nodes_added_weights = ZeroVector((TDim+1)); //array_1d<double,(TDim+1)> weighting_inverse_divisor; Geometry<Node<3> >& geom = ielem->GetGeometry(); for (int i=0 ; i!=(TDim+1) ; ++i) { nodes_positions[i*3+0]=geom[i].X(); nodes_positions[i*3+1]=geom[i].Y(); nodes_positions[i*3+2]=geom[i].Z(); //weighting_inverse_divisor[i]=1.0/((geom[i].FastGetSolutionStepValue(MEAN_SIZE))*1.01); } int & number_of_particles_in_elem= mNumOfParticlesInElems[ii]; ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii]; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { if (iii==mMaxNumberOfParticles) // It means we are out of our portion of the array, abort loop! break; ShallowParticle& pparticle = element_particle_pointers[offset+iii]; if (pparticle.GetEraseFlag()==false) { array_1d<double,3> & position = pparticle.Coordinates(); const float& particle_scalar1 = pparticle.GetScalar1(); const array_1d<float,3>& particle_vector1 = pparticle.GetVector1(); array_1d<double,TDim+1> N; bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N); if (is_found==false) // Something went wrong. if it was close enough to the edge we simply send it inside the element. { KRATOS_INFO("MoveShallowWaterParticleUtility") << N << std::endl; for (int j=0 ; j!=(TDim+1); j++) if (N[j]<0.0 && N[j]> -1e-5) N[j]=1e-10; } for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element { // These lines for a weighting function based on the distance (or square distance) from the node insteadof the shape functions //double sq_dist = 0; //for (int k=0 ; k!=(TDim); k++) sq_dist += ((position[k] - nodes_positions[j*3+k])*(position[k] - nodes_positions[j*3+k])); //double weight = (1.0 - (sqrt(sq_dist)*weighting_inverse_divisor[j] ) ); double weight=N(j)*N(j); //weight=N(j)*N(j)*N(j); if (weight<threshold) weight=1e-10; nodes_added_weights[j] += weight; nodes_added_scalar1[j] += weight*static_cast<double>(particle_scalar1); for (int k=0 ; k!=(TDim); k++) //x,y,(z) { nodes_added_vector1[j*3+k] += weight * static_cast<double>(particle_vector1[k]); } } } } for (int i=0 ; i!=(TDim+1) ; ++i) { geom[i].SetLock(); geom[i].FastGetSolutionStepValue(PROJECTED_SCALAR1) += nodes_added_scalar1[i]; geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR1_X) += nodes_added_vector1[3*i+0]; geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR1_Y) += nodes_added_vector1[3*i+1]; geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR1_Z) += nodes_added_vector1[3*i+2]; geom[i].FastGetSolutionStepValue(YP) += nodes_added_weights[i]; geom[i].UnSetLock(); } } } #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; double sum_weights = inode->FastGetSolutionStepValue(YP); if (sum_weights>0.00001) { double & scalar = inode->FastGetSolutionStepValue(PROJECTED_SCALAR1); array_1d<double,3> & vector = inode->FastGetSolutionStepValue(PROJECTED_VECTOR1); scalar /=sum_weights; // resetting the scalar1 vector /=sum_weights; // resetting the vector1 } else // This should never happen because other ways to recover the information have been executed before, but leaving it just in case.. { inode->FastGetSolutionStepValue(PROJECTED_SCALAR1)=inode->FastGetSolutionStepValue(*mScalarVar1,1); // Resetting the convected scalar inode->FastGetSolutionStepValue(PROJECTED_VECTOR1)=inode->FastGetSolutionStepValue(*mVectorVar1,1); // Resetting the convected vector } } } KRATOS_CATCH("") } /// Update all the particles without moving them /** This function updates all the particles variables using the * "delta variables" from the nodal database. * * @see CorrectParticleUsingDeltaVariables */ void CorrectParticlesWithoutMovingUsingDeltaVariables() { KRATOS_TRY const int offset = mOffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //(flag managed only by MoveParticles) ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin(); std::vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Element::Pointer pelement(*ielem.base()); Geometry<Node<3> >& geom = ielem->GetGeometry(); //ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); //int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES); int & number_of_particles_in_elem= mNumOfParticlesInElems[ii]; ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii]; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { if (iii>mMaxNumberOfParticles) //it means we are out of our portion of the array, abort loop! break; ShallowParticle & pparticle = element_particle_pointers[offset+iii]; bool erase_flag= pparticle.GetEraseFlag(); if (erase_flag==false) { CorrectParticleUsingDeltaVariables(pparticle,pelement,geom); //'lite' version, we pass by reference the geometry, so much cheaper } } } } KRATOS_CATCH("") } /// Fill an element with particles /** This function is to be executed after moving particles and * before tranferring data from lagrangian particles to eulerian mesh * If an element finishes with less particles than "minimum number * of particles", then PreReseed adds particles inside it. * A minimal reseed is performed in order to not disturb the projection * from lagrangian to euelrian. * * @see MinimumNumberOfParticles * * @see MoveParticles * @see MoveParticleInverseWay: is called to get the particle values */ void PreReseed(int MinimumNumberOfParticles) { KRATOS_TRY const int offset =mOffset; const int max_results = 1000; //tools for the paralelization unsigned int number_of_threads = OpenMPUtils::GetNumThreads(); std::vector<unsigned int> elem_partition; int number_of_rows = mrModelPart.Elements().size(); elem_partition.resize(number_of_threads + 1); int elem_partition_size = number_of_rows / number_of_threads; elem_partition[0] = 0; elem_partition[number_of_threads] = number_of_rows; //KRATOS_WATCH(elem_partition_size); for (unsigned int i = 1; i < number_of_threads; i++) elem_partition[i] = elem_partition[i - 1] + elem_partition_size; ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin(); #pragma omp parallel firstprivate(elem_partition) { ResultContainerType results(max_results); int k = OpenMPUtils::ThisThread(); //ModelPart::ElementsContainerType::iterator it_begin = mrModelPart.ElementsBegin() + elem_partition[k]; //ModelPart::ElementsContainerType::iterator it_end = mrModelPart.ElementsBegin() + elem_partition[k+1] ; //ModelPart::NodesContainerType local_list=aux[k]; //PointerVectorSet<ShallowParticle, IndexedObject> & list=aux[k]; BoundedMatrix<double, (TDim+1), 3 > pos; BoundedMatrix<double, (TDim+1) , (TDim+1) > N; unsigned int freeparticle=0; //we start with the first position in the particles array //int local_id=1; for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++) { //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; results.resize(max_results); //const int & elem_id = ielem->Id(); //ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); //int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES); int & number_of_particles_in_elem = mNumOfParticlesInElems[ii]; ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii]; if (number_of_particles_in_elem < (MinimumNumberOfParticles)) // && (ielem->GetGeometry())[0].Y()<0.10 ) { Geometry< Node<3> >& geom = ielem->GetGeometry(); ComputeGaussPointPositionsForPreReseed(geom, pos, N); for (unsigned int j = 0; j < (pos.size1()); j++) // I am dropping the last one, the one in the middle of the element { bool keep_looking = true; while(keep_looking) { if (mParticlesVector[freeparticle].GetEraseFlag()==true) { #pragma omp critical { if (mParticlesVector[freeparticle].GetEraseFlag()==true) { mParticlesVector[freeparticle].GetEraseFlag()=false; keep_looking=false; } } if (keep_looking==false) break; else freeparticle++; } else freeparticle++; } ShallowParticle pparticle(pos(j,0),pos(j,1),pos(j,2)); array_1d<double,TDim+1>aux2_N; bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux2_N); KRATOS_ERROR_IF_NOT( is_found ) << "In move shallow water particle utility: particle not found in domain" << std::endl; pparticle.GetEraseFlag()=false; ResultIteratorType result_begin = results.begin(); Element::Pointer pelement( *ielem.base() ); MoveParticleInverseWay(pparticle, pelement, result_begin, max_results); //and we copy it to the array: mParticlesVector[freeparticle] = pparticle; element_particle_pointers(offset+number_of_particles_in_elem) = &mParticlesVector[freeparticle]; pparticle.GetEraseFlag()=false; number_of_particles_in_elem++; } } } } KRATOS_CATCH("") } /// Fill an element with particles /** This function is to be executed after the mesh stage solver is * called and the particles are updated. * If an element contains less particles than "minimum number of * particles", then PostReseed adds particles inside it. * A full reseed is performed and the particle gets it's convected * variables directly from the eulerian mesh * * @param MinimumNumberOfParticles * * @see PreReseed */ void PostReseed(int MinimumNumberOfParticles) //pooyan's way { KRATOS_TRY const int offset = mOffset; //TOOLS FOR THE PARALELIZATION unsigned int number_of_threads = OpenMPUtils::GetNumThreads(); std::vector<unsigned int> elem_partition; int number_of_rows=mrModelPart.Elements().size(); //KRATOS_THROW_ERROR(std::logic_error, "Add ----NODAL_H---- variable!!!!!! ERROR", ""); elem_partition.resize(number_of_threads + 1); int elem_partition_size = number_of_rows / number_of_threads; elem_partition[0] = 0; elem_partition[number_of_threads] = number_of_rows; for (unsigned int i = 1; i < number_of_threads; i++) elem_partition[i] = elem_partition[i - 1] + elem_partition_size; ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin(); #pragma omp parallel firstprivate(elem_partition) // firstprivate(results)//we will add the nodes in different parts of aux and later assemple everything toghether, remaming particles ids to get consecutive ids { unsigned int reused_particles=0; unsigned int freeparticle = 0; //we start by the first position; int k = OpenMPUtils::ThisThread(); BoundedMatrix<double, (3+2*TDim), 3 > pos; //7 particles (2D) or 9 particles (3D) BoundedMatrix<double, (3+2*TDim), (TDim+1) > N; double mesh_scalar1; array_1d<double,3> mesh_vector1; array_1d<int, (3+2*TDim) > positions; unsigned int number_of_reseeded_particles; for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++) { //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; int & number_of_particles_in_elem = mNumOfParticlesInElems[ii]; ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii]; Geometry< Node<3> >& geom = ielem->GetGeometry(); if ( number_of_particles_in_elem < (MinimumNumberOfParticles) ) // && (geom[0].Y()<0.10) ) || (number_of_water_particles_in_elem>2 && number_of_particles_in_elem<(MinimumNumberOfParticles) ) ) { //bool reseed_more=false; number_of_reseeded_particles = 0; //reseed_more=true; number_of_reseeded_particles = 3 + 2*TDim; ComputeGaussPointPositionsForPostReseed(geom, pos, N); for (unsigned int j = 0; j < number_of_reseeded_particles; j++) { // Now we have to find an empty space (a particle that was about to be deleted) in the // particles model part. once found. there will be our renewed particle: bool keep_looking = true; while(keep_looking) { if (mParticlesVector[freeparticle].GetEraseFlag()==true) { #pragma omp critical { if (mParticlesVector[freeparticle].GetEraseFlag()==true) { mParticlesVector[freeparticle].GetEraseFlag()=false; keep_looking=false; } } if (keep_looking==false) break; else freeparticle++; } else freeparticle++; } ShallowParticle pparticle(pos(j,0),pos(j,1),pos(j,2)); array_1d<double,TDim+1>aux_N; bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux_N); KRATOS_ERROR_IF_NOT( is_found ) << "In move shallow water particle utility: particle not found in domain" << std::endl; mesh_scalar1 = 0.0; mesh_vector1 = ZeroVector(3); for (unsigned int l = 0; l < (TDim+1); l++) { mesh_scalar1 += N(j,l) * geom[l].FastGetSolutionStepValue(*mScalarVar1); noalias(mesh_vector1) += N(j, l) * geom[l].FastGetSolutionStepValue(*mVectorVar1); } pparticle.GetScalar1()=mesh_scalar1; pparticle.GetVector1()=mesh_vector1; pparticle.GetEraseFlag()=false; mParticlesVector[freeparticle]=pparticle; element_particle_pointers(offset+number_of_particles_in_elem) = &mParticlesVector[freeparticle]; number_of_particles_in_elem++; KRATOS_ERROR_IF( keep_looking ) << "In move shallow water particle utility: Finished the list and couldnt find a free cell for the new particle!" << std::endl; reused_particles++; } } } } KRATOS_CATCH("") } /// Fill a model part with particles /** This function prints the particles to a model part * * @param rLagrangianModelPart: empty model part to print particles * @param FilterFactor: the function will print one particle of every "filter factor" */ void ExecuteParticlesPrintingTool( ModelPart& rLagrangianModelPart, unsigned int FilterFactor ) { KRATOS_TRY // We will only print one out of every "filter factor" particles of the total particle list if (mParticlePrintingToolInitialized == false) { KRATOS_ERROR_IF( rLagrangianModelPart.NodesBegin() - rLagrangianModelPart.NodesEnd() > 0 ) << "In move shallow water particle utility: an empty model part is required for the particles printing tool" << std::endl; rLagrangianModelPart.AddNodalSolutionStepVariable(*mScalarVar1); rLagrangianModelPart.AddNodalSolutionStepVariable(DISPLACEMENT); for (unsigned int i = 0; i != ((mMaxNumberOfParticles*mNElems)/FilterFactor) + FilterFactor; i++) { Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode( i+mLastNodeId+1 , 0.0, 0.0, 0.0); //recordar que es el nueevo model part!! //pnode->SetBufferSize(mrModelPart.NodesBegin()->GetBufferSize()); pnode->SetBufferSize(1); } mParticlePrintingToolInitialized=true; } // Resetting data of the unused particles const double inactive_particle_position = -10.0; array_1d<double,3>inactive_particle_position_vector; inactive_particle_position_vector(0)=inactive_particle_position; inactive_particle_position_vector(1)=inactive_particle_position; inactive_particle_position_vector(2)=inactive_particle_position; ModelPart::NodesContainerType::iterator inodebegin = rLagrangianModelPart.NodesBegin(); for(unsigned int ii = 0; ii < rLagrangianModelPart.Nodes().size(); ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(*mScalarVar1) = 0.0; inode->FastGetSolutionStepValue(DISPLACEMENT) = inactive_particle_position_vector; } int counter = 0; //ModelPart::NodesContainerType::iterator it_begin = rLagrangianModelPart.NodesBegin(); for (int i = 0; i != mMaxNumberOfParticles*mNElems; i++) { ShallowParticle& pparticle = mParticlesVector[i]; if(pparticle.GetEraseFlag() == false && i%FilterFactor == 0) { ModelPart::NodesContainerType::iterator inode = inodebegin + counter; //copying info from the particle to the (printing) node. inode->FastGetSolutionStepValue(*mScalarVar1) = pparticle.GetScalar1(); inode->FastGetSolutionStepValue(DISPLACEMENT) = pparticle.Coordinates(); counter++; } } KRATOS_CATCH("") } protected: private: /// Move a particle /** this function moves a particle according to the velocity given * by VELOCITY variable. The movement is performed in nsubsteps, * during a total time of DELTA_TIME * * @param pParticle * @param pElement * @param rElementsInTrajectory * @param rNumberOfElementsInTrajectory * @param ResultBegin * @param MaxNumberOfResults * * @see MoveParticles */ void MoveParticle(ShallowParticle & pParticle, Element::Pointer & pElement, GlobalPointersVector< Element >& rElementsInTrajectory, unsigned int & rNumberOfElementsInTrajectory, ResultIteratorType ResultBegin, const unsigned int MaxNumberOfResults) { const ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; unsigned int nsubsteps; double substep_dt; bool keep_integrating = false; bool is_found; array_1d<double,3> vel; array_1d<double,3> vel_without_other_phase_nodes=ZeroVector(3); array_1d<double,3> position; array_1d<double,3> mid_position; array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. position = pParticle.Coordinates(); //initial coordinates double only_integral = 0.0 ; is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { keep_integrating=true; Geometry< Node<3> >& geom = pElement->GetGeometry();//the element we're in vel=ZeroVector(3); for(unsigned int j=0; j<(TDim+1); j++) { noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; } //calculating substep to get +- courant(substep) = 0.1 nsubsteps = 10.0 * (delta_t * pElement->GetValue(MEAN_VEL_OVER_ELEM_SIZE)); if (nsubsteps<1) nsubsteps=1; substep_dt = delta_t / double(nsubsteps); only_integral = 1.0;// weight;//*double(nsubsteps); position += vel*substep_dt;//weight; // DONE THE FIRST LOCATION OF THE PARTICLE, NOW WE PROCEED TO STREAMLINE INTEGRATION USING THE MESH VELOCITY unsigned int check_from_element_number = 0; for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle. { if (keep_integrating == true) { is_found = FindNodeOnMesh(position, N, pElement, rElementsInTrajectory, rNumberOfElementsInTrajectory, check_from_element_number, ResultBegin, MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { Geometry< Node<3> >& geom = pElement->GetGeometry();//the element we're in vel = ZeroVector(3); for(unsigned int j=0; j<(TDim+1); j++) { noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; } only_integral += 1.0; //values saved for the current time step position+=vel*substep_dt;//weight; } else { keep_integrating=false; break; } } else break; } } if (keep_integrating == false) (pParticle.GetEraseFlag()=true); else is_found = FindNodeOnMesh(position, N ,pElement,ResultBegin,MaxNumberOfResults); //we must save the pointer of the last element that we're in (inside the pointervector pElement) if (is_found == false) ( pParticle.GetEraseFlag()=true); pParticle.Coordinates() = position; } /// This function updates a particle /** This function updates a particle variables using the "delta * variables" from the nodal database. * * @param pParticle * @param pElement * @param rGeom * * @see CorrectParticlesWithoutMovingUsingDeltaVariables */ void CorrectParticleUsingDeltaVariables(ShallowParticle & pParticle, Element::Pointer & pElement, Geometry< Node<3> >& rGeom) { array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. array_1d<double,3> coords = pParticle.Coordinates(); float & particle_scalar1 = pParticle.GetScalar1(); array_1d<float,3> & particle_vector1 = pParticle.GetVector1(); //double distance=0.0; double delta_scalar1 = 0.0; array_1d<double,3> delta_vector1 = ZeroVector(3); bool is_found = CalculatePosition(rGeom,coords[0],coords[1],coords[2],N); if(is_found == false) { KRATOS_INFO("MoveShallowWaterParticleUtility") << N << std::endl; for (int j=0 ; j!=(TDim+1); j++) if (N[j]<0.0 ) N[j]=1e-10; } for(unsigned int j=0; j<(TDim+1); j++) { delta_scalar1 += rGeom[j].FastGetSolutionStepValue(DELTA_SCALAR1)*N[j]; noalias(delta_vector1) += rGeom[j].FastGetSolutionStepValue(DELTA_VECTOR1)*N[j]; } particle_scalar1 = particle_scalar1 + delta_scalar1; particle_vector1 = particle_vector1 + delta_vector1; } /// Move a particle in the inverse way /** this function moves a particle according to the -velocity given * by VELOCITY variable. The movement is performed by a backward * integration in nsubsteps, during a total time of DELTA_TIME * Before the particle goes out of the element, gets the value * of the eulerian mesh and stores it * * @param pParticle * @param pElement * @param ResultBegin * @param MaxNumberOfResults * * @see PreReseed */ void MoveParticleInverseWay(ShallowParticle & pParticle, Element::Pointer & pElement, //NOT A REFERENCE!! WE SHALL NOT OVERWRITE THE ELEMENT IT BELONGS TO! ResultIteratorType ResultBegin, const unsigned int MaxNumberOfResults) { const ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; unsigned int nsubsteps; double substep_dt; bool keep_integrating = false; bool is_found; double scalar1 = 0.0; array_1d<double,3> vector1; array_1d<double,3> vel; array_1d<double,3> position; array_1d<double,3> mid_position; array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. position = pParticle.Coordinates(); // + (pParticle)->FastGetSolutionStepValue(DISPLACEMENT); //initial coordinates double only_integral = 0.0 ; is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { keep_integrating = true; Geometry< Node<3> >& geom = pElement->GetGeometry(); //the element we're in scalar1 = 0.0; vector1 = ZeroVector(3); vel = ZeroVector(3); for(unsigned int j=0; j<(TDim+1); j++) { scalar1 += geom[j].FastGetSolutionStepValue(*mScalarVar1)*N[j]; noalias(vector1) += geom[j].FastGetSolutionStepValue(*mVectorVar1)*N[j]; noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; } //calculating substep to get +- courant(substep) = 1/4 nsubsteps = 10.0 * (delta_t * pElement->GetValue(MEAN_VEL_OVER_ELEM_SIZE)); if (nsubsteps<1) nsubsteps=1; substep_dt = delta_t / double(nsubsteps); only_integral = 1.0; // weight;//*double(nsubsteps); position -= vel*substep_dt; //weight; for(unsigned int i=0; i<(nsubsteps-1); i++) // this is for the substeps n+1. in the first one we already knew the position of the particle. { if (keep_integrating == true) { is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is: if (is_found == true) { Geometry< Node<3> >& geom = pElement->GetGeometry();//the element we're in scalar1 = 0.0; vector1 = ZeroVector(3); vel = ZeroVector(3); for(unsigned int j=0; j<(TDim+1); j++) { scalar1 += geom[j].FastGetSolutionStepValue(*mScalarVar1)*N(j); noalias(vector1) += geom[j].FastGetSolutionStepValue(*mVectorVar1)*N[j]; noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; } only_integral += 1.0; //weight ; //values saved for the current time step position -= vel*substep_dt; //weight; } else keep_integrating = false; } } pParticle.GetScalar1() = scalar1; pParticle.GetVector1() = vector1; } } /// Find the element into which a given node is located /** This function should find the element into which a given node * is located and return a pointer to the element and the vector * containing the shape functions that define the positions within * the element. * If false is returned the element is not found * * @param position of the node * @param N: return shape functions that define the positions within the elem * @param pElement: return a pointer to the element * @param ResultBegin * @param MaxNumberOfResults * @return FindNodeOnMesh if the element is found of not * * @see CalculatePosition */ bool FindNodeOnMesh( const array_1d<double,3>& rPosition, array_1d<double,TDim+1>& N, Element::Pointer & pElement, ResultIteratorType ResultBegin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; array_1d<double,TDim+1> aux_N; //before using the bin to search for possible elements we check first the last element in which the particle was. Geometry<Node<3> >& geom_default = pElement->GetGeometry(); //(*(i))->GetGeometry(); bool is_found_1 = CalculatePosition(geom_default,rPosition[0],rPosition[1],rPosition[2],N); if (is_found_1) //that was easy! { return true; } // To begin with we check the neighbour elements; it is a bit more expensive GlobalPointersVector< Element >& neighb_elems = pElement->GetValue(NEIGHBOUR_ELEMENTS); for (unsigned int i=0;i!=(neighb_elems.size());i++) { Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N); if (is_found_2) { pElement = neighb_elems[i].shared_from_this(); return true; } } // If checking all the neighbour elements did not work, we have to use the bins // ask to the container for the list of candidate elements SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{rPosition}, ResultBegin, MaxNumberOfResults ); if (results_found>0) { //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i< results_found; i++) { Geometry<Node<3> >& geom = (*(ResultBegin + i))->GetGeometry(); //find local position bool is_found_3 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N); if (is_found_3) { pElement = (*(ResultBegin + i))->shared_from_this(); return true; } } } //if nothing worked, then: //not found case return false; } /// Find the element into which a given node is located /** This function should find the element into which a given node * is located and return a pointer to the element and the vector * containing the shape functions that define the positions within * the element. * If false is returned the element is not found * This version includes predefined elements following a trajectory * * @param rPosition of the node * @param N Output shape functions that define the positions within the elem * @param pElement Output a pointer to the element * @param rElementsInTrajectory * @param rNumberOfElementsInTrajectory Output * @param CheckFromElementNumber * @param ResultBegin * @param MaxNumberOfResults * @return FindNodeOnMesh if the element is found of not * * @see CalculatePosition */ bool FindNodeOnMesh( const array_1d<double,3>& rPosition, array_1d<double,TDim+1>& N, Element::Pointer & pElement, GlobalPointersVector< Element >& rElementsInTrajectory, unsigned int & rNumberOfElementsInTrajectory, unsigned int & rCheckFromElementNumber, ResultIteratorType ResultBegin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; //~ const array_1d<double,3>& coords = rPosition; array_1d<double,TDim+1> aux_N; //before using the bin to search for possible elements we check first the last element in which the particle was. Geometry<Node<3> >& geom_default = pElement->GetGeometry(); //(*(i))->GetGeometry(); bool is_found_1 = CalculatePosition(geom_default,rPosition[0],rPosition[1],rPosition[2],N); if(is_found_1 == true) { return true; //that was easy! } // If it was not found in the first element, we can proceed to check in the following elements (in the trajectory defined by previous particles that started from the same element. for (unsigned int i=(rCheckFromElementNumber);i!=rNumberOfElementsInTrajectory;i++) { Geometry<Node<3> >& geom = rElementsInTrajectory[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],aux_N); if (is_found_2) { pElement = rElementsInTrajectory[i].shared_from_this(); N = aux_N; rCheckFromElementNumber = i+1 ; //now i element matches pElement, so to avoid cheching twice the same element we send the counter to the following element. return true; } } // Now we check the neighbour elements: GlobalPointersVector< Element >& neighb_elems = pElement->GetValue(NEIGHBOUR_ELEMENTS); for (unsigned int i=0;i!=(neighb_elems.size());i++) { Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N); if (is_found_2) { pElement = neighb_elems[i].shared_from_this(); if (rNumberOfElementsInTrajectory<20) { rElementsInTrajectory(rNumberOfElementsInTrajectory) = pElement; rNumberOfElementsInTrajectory++; rCheckFromElementNumber = rNumberOfElementsInTrajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the rElementsInTrajectory list. we are the particle that is adding elements to the list } return true; } } // If checking all the neighbour elements did not work, we have to use the bins // ask to the container for the list of candidate elements SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{rPosition}, ResultBegin, MaxNumberOfResults ); if(results_found>0) { //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i< results_found; i++) { Geometry<Node<3> >& geom = (*(ResultBegin + i))->GetGeometry(); //find local position bool is_found = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N); if (is_found) { pElement = (*(ResultBegin + i))->shared_from_this(); if (rNumberOfElementsInTrajectory<20) { rElementsInTrajectory(rNumberOfElementsInTrajectory) = pElement; rNumberOfElementsInTrajectory++; rCheckFromElementNumber = rNumberOfElementsInTrajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the rElementsInTrajectory list. we are the particle that is adding elements to the list } return true; } } } //not found case return false; } /// Calculate the position of a given particle inside an element /** This function calculates the position of a given particle inside * an element and returns the shape functions that define it position * within the element and returns false if the particle is otuside * the element * * @param rGeom: the element (a triangle) * @param xc: the postition of the particle * @param yc: the postition of the particle * @param zc: the postition of the particle * @param N: the shape functions to define the particle position * * @return CalculatePosition */ inline bool CalculatePosition( const Geometry<Node < 3 > >&rGeom, const double xc, const double yc, const double zc, array_1d<double,3> & N ) { double x0 = rGeom[0].X(); double y0 = rGeom[0].Y(); double x1 = rGeom[1].X(); double y1 = rGeom[1].Y(); double x2 = rGeom[2].X(); double y2 = rGeom[2].Y(); double area = CalculateVol(x0, y0, x1, y1, x2, y2); KRATOS_ERROR_IF( area == 0.0 ) << "In move shallow water particle utility: element with zero area found" << std::endl; double inv_area = 1.0 / area; N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area; N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area; N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } /// Calculate the position of a given particle inside an element /** This function calculates the position of a given particle inside * an element and returns the shape functions that define it position * within the element and returns false if the particle is otuside * the element * * @param rNodesPositions of the element (a triangle) * @param xc: the postition of the particle * @param yc: the postition of the particle * @param zc: the postition of the particle * @param N: the shape functions to define the particle position * * @return CalculatePosition */ inline bool CalculatePosition( const array_1d<double,3*(TDim+1)>& rNodesPositions, const double xc, const double yc, const double zc, array_1d<double,3> & N ) { const double& x0 = rNodesPositions[0]; const double& y0 = rNodesPositions[1]; const double& x1 = rNodesPositions[3]; const double& y1 = rNodesPositions[4]; const double& x2 = rNodesPositions[6]; const double& y2 = rNodesPositions[7]; double area = CalculateVol(x0, y0, x1, y1, x2, y2); KRATOS_ERROR_IF( area == 0.0 ) << "In move shallow water particle utility: element with zero area found" << std::endl; double inv_area = 1.0 / area; N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area; N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area; N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } /// Calculate the position of a given particle inside an element /** This function calculates the position of a given particle inside * an element and returns the shape functions that define it position * within the element and returns false if the particle is otuside * the element * * @param rGeom: the element (a tetrahedron) * @param xc: the postition of the particle * @param yc: the postition of the particle * @param zc: the postition of the particle * @param N: the shape functions to define the particle position * * @return CalculatePosition */ inline bool CalculatePosition( const Geometry<Node < 3 > >&rGeom, const double xc, const double yc, const double zc, array_1d<double, 4 > & N ) { double x0 = rGeom[0].X(); double y0 = rGeom[0].Y(); double z0 = rGeom[0].Z(); double x1 = rGeom[1].X(); double y1 = rGeom[1].Y(); double z1 = rGeom[1].Z(); double x2 = rGeom[2].X(); double y2 = rGeom[2].Y(); double z2 = rGeom[2].Z(); double x3 = rGeom[3].X(); double y3 = rGeom[3].Y(); double z3 = rGeom[3].Z(); double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); KRATOS_ERROR_IF( vol == 0.0 ) << "In move shallow water particle utility: element with zero vol found" << std::endl; double inv_vol = 1.0 / vol; N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol; N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol; N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol; N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } /// Calculate the position of a given particle inside an element /** This function calculates the position of a given particle inside * an element and returns the shape functions that define it position * within the element and returns false if the particle is otuside * the element * * @param rNodesPositions of the element (a tetrahedron) * @param xc: the postition of the particle * @param yc: the postition of the particle * @param zc: the postition of the particle * @param N: the shape functions to define the particle position * * @return CalculatePosition */ inline bool CalculatePosition( const array_1d<double,3*(TDim+1)>& rNodesPositions, const double xc, const double yc, const double zc, array_1d<double, 4 > & N ) { const double& x0 = rNodesPositions[0]; const double& y0 = rNodesPositions[1]; const double& z0 = rNodesPositions[2]; const double& x1 = rNodesPositions[3]; const double& y1 = rNodesPositions[4]; const double& z1 = rNodesPositions[5]; const double& x2 = rNodesPositions[6]; const double& y2 = rNodesPositions[7]; const double& z2 = rNodesPositions[8]; const double& x3 = rNodesPositions[9]; const double& y3 = rNodesPositions[10]; const double& z3 = rNodesPositions[11]; double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); KRATOS_ERROR_IF( vol == 0.0 ) << "In move shallow water particle utility: element with zero vol found" << std::endl; double inv_vol = 1.0 / vol; N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol; N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol; N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol; N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } /// Calculate the volume /** This function computes the area of a triangle */ inline double CalculateVol( const double x0, const double y0, const double x1, const double y1, const double x2, const double y2 ) { return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0)); } /// Calculate the volume /** This function computes the volume of a tetrahedron */ inline double CalculateVol( const double x0, const double y0, const double z0, const double x1, const double y1, const double z1, const double x2, const double y2, const double z2, const double x3, const double y3, const double z3 ) { double x10 = x1 - x0; double y10 = y1 - y0; double z10 = z1 - z0; double x20 = x2 - x0; double y20 = y2 - y0; double z20 = z2 - z0; double x30 = x3 - x0; double y30 = y3 - y0; double z30 = z3 - z0; double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30; return detJ * 0.1666666666666666666667; } /// Compute the Gauss points /** */ void ComputeGaussPointPositions_4( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos, BoundedMatrix<double, 7, 3 > & N ) { double one_third = 1.0 / 3.0; double one_sixt = 0.15; //1.0 / 6.0; double two_third = 0.7; //2.0 * one_third; N(0, 0) = one_sixt; N(0, 1) = one_sixt; N(0, 2) = two_third; N(1, 0) = two_third; N(1, 1) = one_sixt; N(1, 2) = one_sixt; N(2, 0) = one_sixt; N(2, 1) = two_third; N(2, 2) = one_sixt; N(3, 0) = one_third; N(3, 1) = one_third; N(3, 2) = one_third; //first pos(0, 0) = one_sixt * geom[0].X() + one_sixt * geom[1].X() + two_third * geom[2].X(); pos(0, 1) = one_sixt * geom[0].Y() + one_sixt * geom[1].Y() + two_third * geom[2].Y(); pos(0, 2) = one_sixt * geom[0].Z() + one_sixt * geom[1].Z() + two_third * geom[2].Z(); //second pos(1, 0) = two_third * geom[0].X() + one_sixt * geom[1].X() + one_sixt * geom[2].X(); pos(1, 1) = two_third * geom[0].Y() + one_sixt * geom[1].Y() + one_sixt * geom[2].Y(); pos(1, 2) = two_third * geom[0].Z() + one_sixt * geom[1].Z() + one_sixt * geom[2].Z(); //third pos(2, 0) = one_sixt * geom[0].X() + two_third * geom[1].X() + one_sixt * geom[2].X(); pos(2, 1) = one_sixt * geom[0].Y() + two_third * geom[1].Y() + one_sixt * geom[2].Y(); pos(2, 2) = one_sixt * geom[0].Z() + two_third * geom[1].Z() + one_sixt * geom[2].Z(); //fourth pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X(); pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y(); pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z(); } /// Compute the Gauss points /** For a triangle * * @see PostReseed */ void ComputeGaussPointPositionsForPostReseed( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos, BoundedMatrix<double, 7, 3 > & N ) //2d { double one_third = 1.0 / 3.0; double one_eight = 0.12; //1.0 / 6.0; double three_quarters = 0.76; //2.0 * one_third; N(0, 0) = one_eight; N(0, 1) = one_eight; N(0, 2) = three_quarters; N(1, 0) = three_quarters; N(1, 1) = one_eight; N(1, 2) = one_eight; N(2, 0) = one_eight; N(2, 1) = three_quarters; N(2, 2) = one_eight; N(3, 0) = one_third; N(3, 1) = one_third; N(3, 2) = one_third; N(4, 0) = one_eight; N(4, 1) = 0.44; N(4, 2) = 0.44; N(5, 0) = 0.44; N(5, 1) = one_eight; N(5, 2) = 0.44; N(6, 0) = 0.44; N(6, 1) = 0.44; N(6, 2) = one_eight; //first pos(0, 0) = one_eight * geom[0].X() + one_eight * geom[1].X() + three_quarters * geom[2].X(); pos(0, 1) = one_eight * geom[0].Y() + one_eight * geom[1].Y() + three_quarters * geom[2].Y(); pos(0, 2) = one_eight * geom[0].Z() + one_eight * geom[1].Z() + three_quarters * geom[2].Z(); //second pos(1, 0) = three_quarters * geom[0].X() + one_eight * geom[1].X() + one_eight * geom[2].X(); pos(1, 1) = three_quarters * geom[0].Y() + one_eight * geom[1].Y() + one_eight * geom[2].Y(); pos(1, 2) = three_quarters * geom[0].Z() + one_eight * geom[1].Z() + one_eight * geom[2].Z(); //third pos(2, 0) = one_eight * geom[0].X() + three_quarters * geom[1].X() + one_eight * geom[2].X(); pos(2, 1) = one_eight * geom[0].Y() + three_quarters * geom[1].Y() + one_eight * geom[2].Y(); pos(2, 2) = one_eight * geom[0].Z() + three_quarters * geom[1].Z() + one_eight * geom[2].Z(); //fourth pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X(); pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y(); pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z(); //fifth pos(4, 0) = one_eight * geom[0].X() + 0.44 * geom[1].X() + 0.44 * geom[2].X(); pos(4, 1) = one_eight * geom[0].Y() + 0.44 * geom[1].Y() + 0.44 * geom[2].Y(); pos(4, 2) = one_eight * geom[0].Z() + 0.44 * geom[1].Z() + 0.44 * geom[2].Z(); //sixth pos(5, 0) = 0.44 * geom[0].X() + one_eight * geom[1].X() + 0.44 * geom[2].X(); pos(5, 1) = 0.44 * geom[0].Y() + one_eight * geom[1].Y() + 0.44 * geom[2].Y(); pos(5, 2) = 0.44 * geom[0].Z() + one_eight * geom[1].Z() + 0.44 * geom[2].Z(); //seventh pos(6, 0) = 0.44 * geom[0].X() + 0.44 * geom[1].X() + one_eight * geom[2].X(); pos(6, 1) = 0.44 * geom[0].Y() + 0.44 * geom[1].Y() + one_eight * geom[2].Y(); pos(6, 2) = 0.44 * geom[0].Z() + 0.44 * geom[1].Z() + one_eight * geom[2].Z(); } /// Compute the Gauss points /** For a tetrahedron * * @see PostReseed */ void ComputeGaussPointPositionsForPostReseed( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 9, 3 > & pos, BoundedMatrix<double, 9, 4 > & N ) //3D { double one_quarter = 0.25; double small_fraction = 0.1; //1.0 / 6.0; double big_fraction = 0.7; //2.0 * one_third; double mid_fraction = 0.3; //2.0 * one_third; N(0, 0) = big_fraction; N(0, 1) = small_fraction; N(0, 2) = small_fraction; N(0, 3) = small_fraction; N(1, 0) = small_fraction; N(1, 1) = big_fraction; N(1, 2) = small_fraction; N(1, 3) = small_fraction; N(2, 0) = small_fraction; N(2, 1) = small_fraction; N(2, 2) = big_fraction; N(2, 3) = small_fraction; N(3, 0) = small_fraction; N(3, 1) = small_fraction; N(3, 2) = small_fraction; N(3, 3) = big_fraction; N(4, 0) = one_quarter; N(4, 1) = one_quarter; N(4, 2) = one_quarter; N(4, 3) = one_quarter; N(5, 0) = small_fraction; N(5, 1) = mid_fraction; N(5, 2) = mid_fraction; N(5, 3) = mid_fraction; N(6, 0) = mid_fraction; N(6, 1) = small_fraction; N(6, 2) = mid_fraction; N(6, 3) = mid_fraction; N(7, 0) = mid_fraction; N(7, 1) = mid_fraction; N(7, 2) = small_fraction; N(7, 3) = mid_fraction; N(8, 0) = mid_fraction; N(8, 1) = mid_fraction; N(8, 2) = mid_fraction; N(8, 3) = small_fraction; pos=ZeroMatrix(9,3); for (unsigned int i=0; i!=4; i++) //going through the 4 nodes { array_1d<double, 3 > & coordinates = geom[i].Coordinates(); for (unsigned int j=0; j!=9; j++) //going through the 9 particles { for (unsigned int k=0; k!=3; k++) //x,y,z pos(j,k) += N(j,i) * coordinates[k]; } } } /// Compute the Gauss points /** For a triangle * * @see PreReseed */ void ComputeGaussPointPositionsForPreReseed( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 3, 3 > & pos, BoundedMatrix<double, 3, 3 > & N ) //2D { N(0, 0) = 0.5; N(0, 1) = 0.25; N(0, 2) = 0.25; N(1, 0) = 0.25; N(1, 1) = 0.5; N(1, 2) = 0.25; N(2, 0) = 0.25; N(2, 1) = 0.25; N(2, 2) = 0.5; //first pos(0, 0) = 0.5 * geom[0].X() + 0.25 * geom[1].X() + 0.25 * geom[2].X(); pos(0, 1) = 0.5 * geom[0].Y() + 0.25 * geom[1].Y() + 0.25 * geom[2].Y(); pos(0, 2) = 0.5 * geom[0].Z() + 0.25 * geom[1].Z() + 0.25 * geom[2].Z(); //second pos(1, 0) = 0.25 * geom[0].X() + 0.5 * geom[1].X() + 0.25 * geom[2].X(); pos(1, 1) = 0.25 * geom[0].Y() + 0.5 * geom[1].Y() + 0.25 * geom[2].Y(); pos(1, 2) = 0.25 * geom[0].Z() + 0.5 * geom[1].Z() + 0.25 * geom[2].Z(); //third pos(2, 0) = 0.25 * geom[0].X() + 0.25 * geom[1].X() + 0.5 * geom[2].X(); pos(2, 1) = 0.25 * geom[0].Y() + 0.25 * geom[1].Y() + 0.5 * geom[2].Y(); pos(2, 2) = 0.25 * geom[0].Z() + 0.25 * geom[1].Z() + 0.5 * geom[2].Z(); } /// Compute the Gauss points /** For a tetrahedron * * @see PreReseed */ void ComputeGaussPointPositionsForPreReseed( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 4, 3 > & pos, BoundedMatrix<double, 4, 4 > & N ) //3D { //creating 4 particles, each will be closer to a node and equidistant to the other nodes N(0, 0) = 0.4; N(0, 1) = 0.2; N(0, 2) = 0.2; N(0, 3) = 0.2; N(1, 0) = 0.2; N(1, 1) = 0.4; N(1, 2) = 0.2; N(1, 3) = 0.2; N(2, 0) = 0.2; N(2, 1) = 0.2; N(2, 2) = 0.4; N(2, 3) = 0.2; N(3, 0) = 0.2; N(3, 1) = 0.2; N(3, 2) = 0.2; N(3, 3) = 0.4; pos=ZeroMatrix(4,3); for (unsigned int i=0; i!=4; i++) //going through the 4 nodes { array_1d<double, 3 > & coordinates = geom[i].Coordinates(); for (unsigned int j=0; j!=4; j++) //going through the 4 particles { for (unsigned int k=0; k!=3; k++) //x,y,z pos(j,k) += N(j,i) * coordinates[k]; } } } /// Compute the Gauss points /** */ void ComputeGaussPointPositions_45( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 45, 3 > & pos, BoundedMatrix<double, 45, 3 > & N ) { unsigned int counter=0; for (unsigned int i=0; i!=9;i++) { for (unsigned int j=0; j!=(9-i);j++) { N(counter,0)=0.05+double(i)*0.1; N(counter,1)=0.05+double(j)*0.1; N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z(); counter++; } } } /// Compute the Gauss points /** */ void ComputeGaussPointPositions_initial( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 15, 3 > & pos, BoundedMatrix<double, 15, 3 > & N ) //2D { unsigned int counter=0; for (unsigned int i=0; i!=5;i++) { for (unsigned int j=0; j!=(5-i);j++) { N(counter,0)=0.05+double(i)*0.2; N(counter,1)=0.05+double(j)*0.2; N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z(); counter++; } } } /// Compute the Gauss points /** */ void ComputeGaussPointPositions_initial( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 20, 3 > & pos, BoundedMatrix<double, 20, 4 > & N ) //3D { double fraction_increment; unsigned int counter=0; for (unsigned int i=0; i!=4;i++) //going to build a particle "pyramid"(tetrahedra) by layers. the first layer will be made by a triangle of 4 base X 4 height. since it is a triangle, it means it will have 10 particles { for (unsigned int j=0; j!=(4-i);j++) { for (unsigned int k=0; k!=(4-i-j);k++) { N(counter,0)= 0.27 * ( 0.175 + double(i) ) ; //this is our "surface" in which we will build each layer, so we must construct a triangle using what's left of the shape functions total (a total of 1) //total = 1.0 - N(counter,0); fraction_increment = 0.27; // N(counter,1)=fraction_increment * (0.175 + double(j)); N(counter,2)=fraction_increment * (0.175 + double(k)); N(counter,3)=1.0 - ( N(counter,0)+ N(counter,1) + N(counter,2) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X() + N(counter,3) * geom[3].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y() + N(counter,3) * geom[3].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z() + N(counter,3) * geom[3].Z(); counter++; } } } } /// check function virtual int Check() { KRATOS_TRY Node<3>& rnode = *mrModelPart.NodesBegin(); KRATOS_CHECK_VARIABLE_IN_NODAL_DATA((*mVectorVar1), rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA((*mScalarVar1), rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(VELOCITY, rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(DELTA_VECTOR1, rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(DELTA_SCALAR1, rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(PROJECTED_VECTOR1, rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(PROJECTED_SCALAR1, rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(MEAN_SIZE, rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(YP, rnode) return 0; KRATOS_CATCH("") } /// Member variables ModelPart& mrModelPart; int mNParticles; int mNElems; int mOffset; int mMaxSubSteps; double mMaxSubStepDt; int mMaxNumberOfParticles; std::vector< ShallowParticle > mParticlesVector; int mLastElemId; bool mOddTimeStep; bool mParticlePrintingToolInitialized; unsigned int mLastNodeId; DenseVector<int> mNumOfParticlesInElems; DenseVector<int> mNumOfParticlesInElemsAux; DenseVector<ParticlePointerVector> mVectorOfParticlePointersVectors; typename BinsObjectDynamic<Configure>::Pointer mpBinsObjectDynamic; const Variable<double>* mScalarVar1; const Variable<array_1d<double,3>>* mVectorVar1; std::string m_scalar_var1_name; std::string m_vector_var1_name; }; // class MoveShallowWaterParticleUtility } // namespace Kratos. #endif // KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED defined
GB_unaryop__identity_uint64_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint64_int16 // op(A') function: GB_tran__identity_uint64_int16 // C type: uint64_t // A type: int16_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint64_int16 ( uint64_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint64_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
matrix_vector_functions_intel_mkl.c
/* high level matrix/vector functions using Intel MKL for blas */ #include "matrix_vector_functions_intel_mkl.h" #include "mkl_scalapack.h" /* initialize new matrix and set all entries to zero */ mat * matrix_new(int nrows, int ncols) { mat *M = malloc(sizeof(mat)); //M->d = (double*)mkl_calloc(nrows*ncols, sizeof(double), 64); M->d = (double*)calloc(nrows*ncols, sizeof(double)); M->nrows = nrows; M->ncols = ncols; return M; } /* initialize new vector and set all entries to zero */ vec * vector_new(int nrows) { vec *v = malloc(sizeof(vec)); //v->d = (double*)mkl_calloc(nrows,sizeof(double), 64); v->d = (double*)calloc(nrows,sizeof(double)); v->nrows = nrows; return v; } void matrix_delete(mat *M) { //mkl_free(M->d); free(M->d); free(M); } void vector_delete(vec *v) { //mkl_free(v->d); free(v->d); free(v); } // column major format void matrix_set_element(mat *M, int row_num, int col_num, double val){ //M->d[row_num*(M->ncols) + col_num] = val; M->d[col_num*(M->nrows) + row_num] = val; } double matrix_get_element(mat *M, int row_num, int col_num){ //return M->d[row_num*(M->ncols) + col_num]; return M->d[col_num*(M->nrows) + row_num]; } void vector_set_element(vec *v, int row_num, double val){ v->d[row_num] = val; } double vector_get_element(vec *v, int row_num){ return v->d[row_num]; } /* load matrix from binary file * the nonzeros are in order of double loop over rows and columns format: num_rows (int) num_columns (int) nnz (double) ... nnz (double) */ mat * matrix_load_from_binary_file(char *fname){ int i, j, num_rows, num_columns, row_num, col_num; double nnz_val; size_t one = 1; FILE *fp; mat *M; fp = fopen(fname,"r"); fread(&num_rows,sizeof(int),one,fp); //read m fread(&num_columns,sizeof(int),one,fp); //read n printf("initializing M of size %d by %d\n", num_rows, num_columns); M = matrix_new(num_rows,num_columns); printf("done..\n"); // read and set elements for(i=0; i<num_rows; i++){ for(j=0; j<num_columns; j++){ fread(&nnz_val,sizeof(double),one,fp); //read nnz matrix_set_element(M,i,j,nnz_val); } } fclose(fp); return M; } /* write matrix to binary file * the nonzeros are in order of double loop over rows and columns format: num_rows (int) num_columns (int) nnz (double) ... nnz (double) */ void matrix_write_to_binary_file(mat *M, char *fname){ int i, j, num_rows, num_columns, row_num, col_num; double nnz_val; size_t one = 1; FILE *fp; num_rows = M->nrows; num_columns = M->ncols; fp = fopen(fname,"w"); fwrite(&num_rows,sizeof(int),one,fp); //write m fwrite(&num_columns,sizeof(int),one,fp); //write n // write the elements for(i=0; i<num_rows; i++){ for(j=0; j<num_columns; j++){ nnz_val = matrix_get_element(M,i,j); fwrite(&nnz_val,sizeof(double),one,fp); //write nnz } } fclose(fp); } void matrix_print(mat * M){ int i,j; double val; for(i=0; i<M->nrows; i++){ for(j=0; j<M->ncols; j++){ val = matrix_get_element(M, i, j); printf("%f ", val); } printf("\n"); } } void vector_print(vec * v){ int i; double val; for(i=0; i<v->nrows; i++){ val = vector_get_element(v, i); printf("%f\n", val); } } /* v(:) = data */ void vector_set_data(vec *v, double *data){ int i; #pragma omp parallel shared(v) private(i) { #pragma omp for for(i=0; i<(v->nrows); i++){ v->d[i] = data[i]; } } } /* scale vector by a constant */ void vector_scale(vec *v, double scalar){ int i; #pragma omp parallel shared(v,scalar) private(i) { #pragma omp for for(i=0; i<(v->nrows); i++){ v->d[i] = scalar*(v->d[i]); } } } /* scale matrix by a constant */ void matrix_scale(mat *M, double scalar){ int i; #pragma omp parallel shared(M,scalar) private(i) { #pragma omp for for(i=0; i<((M->nrows)*(M->ncols)); i++){ M->d[i] = scalar*(M->d[i]); } } } /* copy contents of vec s to d */ void vector_copy(vec *d, vec *s){ int i; //#pragma omp parallel for #pragma omp parallel shared(d,s) private(i) { #pragma omp for for(i=0; i<(s->nrows); i++){ d->d[i] = s->d[i]; } } } /* copy contents of mat S to D */ void matrix_copy(mat *D, mat *S){ int i; //#pragma omp parallel for #pragma omp parallel shared(D,S) private(i) { #pragma omp for for(i=0; i<((S->nrows)*(S->ncols)); i++){ D->d[i] = S->d[i]; } } } /* hard threshold matrix entries */ void matrix_hard_threshold(mat *M, double TOL){ int i; #pragma omp parallel shared(M) private(i) { #pragma omp for for(i=0; i<((M->nrows)*(M->ncols)); i++){ if(fabs(M->d[i]) < TOL){ M->d[i] = 0; } } } } /* build transpose of matrix : Mt = M^T */ void matrix_build_transpose(mat *Mt, mat *M){ int i,j; for(i=0; i<(M->nrows); i++){ for(j=0; j<(M->ncols); j++){ matrix_set_element(Mt,j,i,matrix_get_element(M,i,j)); } } } void matrix_build_transpose_debug(mat *Mt, mat *M, int mark){ int i,j; printf("enter\n"); for(i=0; i<(M->nrows); i++){ if (mark==3) printf("i=%d\n", i); for(j=0; j<(M->ncols); j++){ // if (mark==3) printf("i=%d, j=%d\n", i, j); matrix_set_element(Mt,j,i,matrix_get_element(M,i,j)); } } } /* subtract b from a and save result in a */ void vector_sub(vec *a, vec *b){ int i; //#pragma omp parallel for #pragma omp parallel shared(a,b) private(i) { #pragma omp for for(i=0; i<(a->nrows); i++){ a->d[i] = a->d[i] - b->d[i]; } } } /* subtract B from A and save result in A */ void matrix_sub(mat *A, mat *B){ int i; //#pragma omp parallel for #pragma omp parallel shared(A,B) private(i) { #pragma omp for for(i=0; i<((A->nrows)*(A->ncols)); i++){ A->d[i] = A->d[i] - B->d[i]; } } } /* A = A - u*v where u is a column vec and v is a row vec */ void matrix_sub_column_times_row_vector(mat *A, vec *u, vec *v){ int i,j; #pragma omp parallel for shared(A,u,v) private(j) for(i=0; i<(A->nrows); i++){ for(j=0; j<(A->ncols); j++){ matrix_set_element(A,i,j,matrix_get_element(A,i,j) - vector_get_element(u,i)*vector_get_element(v,j)); } } } /* compute euclidean norm of vector */ double vector_get2norm(vec *v){ int i; double val, normval = 0; #pragma omp parallel shared(v,normval) private(i,val) { #pragma omp for reduction(+:normval) for(i=0; i<(v->nrows); i++){ val = v->d[i]; normval += val*val; } } return sqrt(normval); } /* returns the dot product of two vectors */ double vector_dot_product(vec *u, vec *v){ int i; double dotval = 0; #pragma omp parallel shared(u,v,dotval) private(i) { #pragma omp for reduction(+:dotval) for(i=0; i<u->nrows; i++){ dotval += (u->d[i])*(v->d[i]); } } return dotval; } /* matrix frobenius norm */ double get_matrix_frobenius_norm(mat *M){ int i; double val, normval = 0; #pragma omp parallel shared(M,normval) private(i,val) { #pragma omp for reduction(+:normval) for(i=0; i<((M->nrows)*(M->ncols)); i++){ val = M->d[i]; normval += val*val; } } return sqrt(normval); } /* matrix max abs val */ double get_matrix_max_abs_element(mat *M){ int i; double val, max = 0; for(i=0; i<((M->nrows)*(M->ncols)); i++){ val = M->d[i]; if( fabs(val) > max ) max = val; } return max; } /* calculate percent error between A and B in terms of Frobenius norm: 100*norm(A - B)/norm(A) */ double get_percent_error_between_two_mats(mat *A, mat *B){ int m,n; double normA, normB, normA_minus_B; mat *A_minus_B; m = A->nrows; n = A->ncols; A_minus_B = matrix_new(m,n); matrix_copy(A_minus_B, A); matrix_sub(A_minus_B, B); normA = get_matrix_frobenius_norm(A); normB = get_matrix_frobenius_norm(B); normA_minus_B = get_matrix_frobenius_norm(A_minus_B); matrix_delete(A_minus_B); return 100.0*normA_minus_B/normA; } double get_matrix_column_norm_squared(mat *M, int colnum){ int i, m, n; double val,colnorm; m = M->nrows; n = M->ncols; colnorm = 0; for(i=0; i<m; i++){ val = matrix_get_element(M,i,colnum); colnorm += val*val; } return colnorm; } double matrix_getmaxcolnorm(mat *M){ int i,m,n; vec *col_vec; double vecnorm, maxnorm; m = M->nrows; n = M->ncols; col_vec = vector_new(m); maxnorm = 0; #pragma omp parallel for for(i=0; i<n; i++){ matrix_get_col(M,i,col_vec); vecnorm = vector_get2norm(col_vec); #pragma omp critical if(vecnorm > maxnorm){ maxnorm = vecnorm; } } vector_delete(col_vec); return maxnorm; } void compute_matrix_column_norms(mat *M, vec *column_norms){ int j; #pragma omp parallel shared(column_norms,M) private(j) { #pragma omp parallel for for(j=0; j<(M->ncols); j++){ vector_set_element(column_norms,j, get_matrix_column_norm_squared(M,j)); } } } /* initialize a random matrix */ void initialize_random_matrix(mat *M){ int i,m,n; double val; m = M->nrows; n = M->ncols; float a=0.0,sigma=1.0; int N = m*n; float *r; VSLStreamStatePtr stream; r = (float*)malloc(N*sizeof(float)); vslNewStream( &stream, BRNG, time(NULL) ); //vslNewStream( &stream, BRNG, SEED ); vsRngGaussian( METHOD, stream, N, r, a, sigma ); // read and set elements #pragma omp parallel shared(M,N,r) private(i,val) { #pragma omp parallel for for(i=0; i<N; i++){ val = r[i]; M->d[i] = val; } } free(r); } /* initialize diagonal matrix from vector data */ void initialize_diagonal_matrix(mat *D, vec *data){ int i; #pragma omp parallel shared(D) private(i) { #pragma omp parallel for for(i=0; i<(D->nrows); i++){ matrix_set_element(D,i,i,data->d[i]); } } } /* initialize identity */ void initialize_identity_matrix(mat *D){ int i; matrix_scale(D, 0); #pragma omp parallel shared(D) private(i) { #pragma omp parallel for for(i=0; i<(D->nrows); i++){ matrix_set_element(D,i,i,1.0); } } } /* invert diagonal matrix */ void invert_diagonal_matrix(mat *Dinv, mat *D){ int i; #pragma omp parallel shared(D,Dinv) private(i) { #pragma omp parallel for for(i=0; i<(D->nrows); i++){ matrix_set_element(Dinv,i,i,1.0/(matrix_get_element(D,i,i))); } } } /* C = A*B ; column major */ void matrix_matrix_mult(mat *A, mat *B, mat *C){ double alpha, beta; alpha = 1.0; beta = 0.0; //cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, A->nrows, B->ncols, A->ncols, alpha, A->d, A->ncols, B->d, B->ncols, beta, C->d, C->ncols); cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, A->nrows, B->ncols, A->ncols, alpha, A->d, A->nrows, B->d, B->nrows, beta, C->d, C->nrows); } /* C = A^T*B ; column major */ void matrix_transpose_matrix_mult(mat *A, mat *B, mat *C){ double alpha, beta; alpha = 1.0; beta = 0.0; //cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, A->ncols, B->ncols, A->nrows, alpha, A->d, A->ncols, B->d, B->ncols, beta, C->d, C->ncols); cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, A->ncols, B->ncols, A->nrows, alpha, A->d, A->nrows, B->d, B->nrows, beta, C->d, C->nrows); } /* C = A*B^T ; column major */ void matrix_matrix_transpose_mult(mat *A, mat *B, mat *C){ double alpha, beta; alpha = 1.0; beta = 0.0; //cblas_dgemm(CblasColMajor, CblasNoTrans, CblasTrans, A->nrows, B->nrows, A->ncols, alpha, A->d, A->ncols, B->d, B->ncols, beta, C->d, C->ncols); cblas_dgemm(CblasColMajor, CblasNoTrans, CblasTrans, A->nrows, B->nrows, A->ncols, alpha, A->d, A->nrows, B->d, B->nrows, beta, C->d, C->nrows); } /* y = M*x ; column major */ void matrix_vector_mult(mat *M, vec *x, vec *y){ double alpha, beta; alpha = 1.0; beta = 0.0; cblas_dgemv (CblasColMajor, CblasNoTrans, M->nrows, M->ncols, alpha, M->d, M->nrows, x->d, 1, beta, y->d, 1); } /* y = M^T*x ; column major */ void matrix_transpose_vector_mult(mat *M, vec *x, vec *y){ double alpha, beta; alpha = 1.0; beta = 0.0; cblas_dgemv (CblasColMajor, CblasTrans, M->nrows, M->ncols, alpha, M->d, M->nrows, x->d, 1, beta, y->d, 1); } /* set column of matrix to vector */ void matrix_set_col(mat *M, int j, vec *column_vec){ int i; #pragma omp parallel shared(column_vec,M,j) private(i) { #pragma omp for for(i=0; i<M->nrows; i++){ matrix_set_element(M,i,j,vector_get_element(column_vec,i)); } } } /* extract column of a matrix into a vector */ void matrix_get_col(mat *M, int j, vec *column_vec){ int i; #pragma omp parallel shared(column_vec,M,j) private(i) { #pragma omp parallel for for(i=0; i<M->nrows; i++){ vector_set_element(column_vec,i,matrix_get_element(M,i,j)); } } } /* extract row i of a matrix into a vector */ void matrix_get_row(mat *M, int i, vec *row_vec){ int j; #pragma omp parallel shared(row_vec,M,i) private(j) { #pragma omp parallel for for(j=0; j<M->ncols; j++){ vector_set_element(row_vec,j,matrix_get_element(M,i,j)); } } } /* put vector row_vec as row i of a matrix */ void matrix_set_row(mat *M, int i, vec *row_vec){ int j; #pragma omp parallel shared(row_vec,M,i) private(j) { #pragma omp parallel for for(j=0; j<M->ncols; j++){ matrix_set_element(M,i,j,vector_get_element(row_vec,j)); } } } /* Mc = M(:,inds) */ /*void matrix_get_selected_columns(mat *M, int *inds, mat *Mc){ int i; vec *col_vec = vector_new(M->nrows); for(i=0; i<(Mc->ncols); i++){ matrix_get_col(M,inds[i],col_vec); matrix_set_col(Mc,i,col_vec); } vector_delete(col_vec); }*/ /* Mc = M(:,inds) */ void matrix_get_selected_columns(mat *M, int *inds, mat *Mc){ int i; vec *col_vec; //printf("%d %d\n", M->ncols, Mc->ncols); #pragma omp parallel shared(M,Mc,inds) private(i,col_vec) { #pragma omp parallel for for(i=0; i<(Mc->ncols); i++){ //printf("line:%d\n", i); col_vec = vector_new(M->nrows); matrix_get_col(M,inds[i],col_vec); matrix_set_col(Mc,i,col_vec); vector_delete(col_vec); } } } /* M(:,inds) = Mc */ /*void matrix_set_selected_columns(mat *M, int *inds, mat *Mc){ int i; vec *col_vec = vector_new(M->nrows); for(i=0; i<(Mc->ncols); i++){ matrix_get_col(Mc,i,col_vec); matrix_set_col(M,inds[i],col_vec); } vector_delete(col_vec); }*/ /* M(:,inds) = Mc */ void matrix_set_selected_columns(mat *M, int *inds, mat *Mc){ int i; vec *col_vec; #pragma omp parallel shared(M,Mc,inds) private(i,col_vec) { #pragma omp parallel for for(i=0; i<(Mc->ncols); i++){ col_vec = vector_new(M->nrows); matrix_get_col(Mc,i,col_vec); matrix_set_col(M,inds[i],col_vec); vector_delete(col_vec); } } } /* Mr = M(inds,:) */ /*void matrix_get_selected_rows(mat *M, int *inds, mat *Mr){ int i; vec *row_vec = vector_new(M->ncols); for(i=0; i<(Mr->nrows); i++){ matrix_get_row(M,inds[i],row_vec); matrix_set_row(Mr,i,row_vec); } vector_delete(row_vec); }*/ /* Mr = M(inds,:) */ void matrix_get_selected_rows(mat *M, int *inds, mat *Mr){ int i; vec *row_vec; #pragma omp parallel shared(M,Mr,inds) private(i,row_vec) { #pragma omp parallel for for(i=0; i<(Mr->nrows); i++){ row_vec = vector_new(M->ncols); matrix_get_row(M,inds[i],row_vec); matrix_set_row(Mr,i,row_vec); vector_delete(row_vec); } } } /* M(inds,:) = Mr */ /*void matrix_set_selected_rows(mat *M, int *inds, mat *Mr){ int i; vec *row_vec = vector_new(M->ncols); for(i=0; i<(Mr->nrows); i++){ matrix_get_row(Mr,i,row_vec); matrix_set_row(M,inds[i],row_vec); } vector_delete(row_vec); }*/ /* M(inds,:) = Mr */ void matrix_set_selected_rows(mat *M, int *inds, mat *Mr){ int i; vec *row_vec; #pragma omp parallel shared(M,Mr,inds) private(i,row_vec) { #pragma omp parallel for for(i=0; i<(Mr->nrows); i++){ row_vec = vector_new(M->ncols); matrix_get_row(Mr,i,row_vec); matrix_set_row(M,inds[i],row_vec); vector_delete(row_vec); } } } /* copy only upper triangular matrix part as for symmetric matrix */ void matrix_copy_symmetric(mat *S, mat *M){ int i,j,n,m; m = M->nrows; n = M->ncols; for(i=0; i<m; i++){ for(j=0; j<n; j++){ if(j>=i){ matrix_set_element(S,i,j,matrix_get_element(M,i,j)); } } } } /* copy only upper triangular matrix part as for symmetric matrix */ void matrix_keep_only_upper_triangular(mat *M){ int i,j,n,m; m = M->nrows; n = M->ncols; for(i=0; i<m; i++){ for(j=0; j<n; j++){ if(j<i){ matrix_set_element(M,i,j,0); } } } } /* % project v in direction of u function p=project_vec(v,u) p = (dot(v,u)/norm(u)^2)*u; */ void project_vector(vec *v, vec *u, vec *p){ double dot_product_val, vec_norm, scalar_val; dot_product_val = vector_dot_product(v, u); vec_norm = vector_get2norm(u); scalar_val = dot_product_val/(vec_norm*vec_norm); vector_copy(p, u); vector_scale(p, scalar_val); } /* build orthonormal basis matrix Q = Y; for j=1:k vj = Q(:,j); for i=1:(j-1) vi = Q(:,i); vj = vj - project_vec(vj,vi); end vj = vj/norm(vj); Q(:,j) = vj; end */ void build_orthonormal_basis_from_mat(mat *A, mat *Q){ int m,n,i,j,ind,num_ortos=2; double vec_norm; vec *vi,*vj,*p; m = A->nrows; n = A->ncols; vi = vector_new(m); vj = vector_new(m); p = vector_new(m); matrix_copy(Q, A); for(ind=0; ind<num_ortos; ind++){ for(j=0; j<n; j++){ matrix_get_col(Q, j, vj); for(i=0; i<j; i++){ matrix_get_col(Q, i, vi); project_vector(vj, vi, p); vector_sub(vj, p); } vec_norm = vector_get2norm(vj); vector_scale(vj, 1.0/vec_norm); matrix_set_col(Q, j, vj); } } vector_delete(vi); vector_delete(vj); vector_delete(p); } /* output = input[inds] */ void fill_vector_from_row_list(vec *input, vec *inds, vec *output){ int i,col_num; for(i=0; i<(input->nrows); i++){ vector_set_element(output,i,vector_get_element(input,vector_get_element(inds,i))); } } /* copy the first k rows of M into M_out where k = M_out->nrows (M_out pre-initialized) */ void matrix_copy_first_rows(mat *M_out, mat *M){ int i,k; k = M_out->nrows; vec * row_vec; for(i=0; i<k; i++){ row_vec = vector_new(M->ncols); matrix_get_row(M,i,row_vec); matrix_set_row(M_out,i,row_vec); vector_delete(row_vec); } } /* copy the first k columns of M into M_out where k = M_out->ncols (M_out pre-initialized) */ void matrix_copy_first_columns(mat *M_out, mat *M){ int i,k; k = M_out->ncols; vec * col_vec; for(i=0; i<k; i++){ col_vec = vector_new(M->nrows); matrix_get_col(M,i,col_vec); matrix_set_col(M_out,i,col_vec); vector_delete(col_vec); } } /* copy contents of mat S to D */ void matrix_copy_first_columns_with_param(mat *D, mat *S, int num_columns){ int i,j; for(i=0; i<(S->nrows); i++){ for(j=0; j<num_columns; j++){ matrix_set_element(D,i,j,matrix_get_element(S,i,j)); } } } /* copy the first k rows and columns of M into M_out is kxk where k = M_out->ncols (M_out pre-initialized) M_out = M(1:k,1:k) */ void matrix_copy_first_k_rows_and_columns(mat *M_out, mat *M){ int i,j,k; k = M_out->ncols; vec * col_vec; for(i=0; i<k; i++){ for(j=0; j<k; j++){ matrix_set_element(M_out,i,j,matrix_get_element(M,i,j)); } } } /* M_out = M(:,k+1:end) */ void matrix_copy_all_rows_and_last_columns_from_indexk(mat *M_out, mat *M, int k){ int i,j,i_out,j_out; vec * col_vec; for(i=0; i<(M->nrows); i++){ for(j=k; j<(M->ncols); j++){ i_out = i; j_out = j - k; matrix_set_element(M_out,i_out,j_out,matrix_get_element(M,i,j)); } } } void fill_matrix_from_first_rows(mat *M, int k, mat *M_k){ int i; vec *row_vec; //#pragma omp parallel shared(M,M_k,k) private(i,row_vec) { //#pragma omp for for(i=0; i<k; i++){ row_vec = vector_new(M->ncols); matrix_get_row(M,i,row_vec); matrix_set_row(M_k,i,row_vec); vector_delete(row_vec); } } } void fill_matrix_from_first_columns(mat *M, int k, mat *M_k){ int i; vec *col_vec; //#pragma omp parallel shared(M,M_k,k) private(i,col_vec) { //#pragma omp for for(i=0; i<k; i++){ col_vec = vector_new(M->nrows); matrix_get_col(M,i,col_vec); matrix_set_col(M_k,i,col_vec); vector_delete(col_vec); } } } void fill_matrix_from_last_columns(mat *M, int k, mat *M_k){ int i,ind; vec *col_vec; ind = 0; for(i=k; i<M->ncols; i++){ col_vec = vector_new(M->nrows); matrix_get_col(M,i,col_vec); matrix_set_col(M_k,ind,col_vec); vector_delete(col_vec); ind++; } } /* Mout = M((k+1):end,(k+1):end) in matlab notation */ void fill_matrix_from_lower_right_corner(mat *M, int k, mat *M_out){ int i,j,i_out,j_out; for(i=k; i<M->nrows; i++){ for(j=k; j<M->ncols; j++){ i_out = i-k; j_out = j-k; //printf("setting element %d, %d of M_out\n", i_out, j_out); matrix_set_element(M_out,i_out,j_out,matrix_get_element(M,i,j)); } } } /* append matrices side by side: C = [A, B] */ void append_matrices_horizontally(mat *A, mat *B, mat *C){ int i,j; #pragma omp parallel shared(C,A) private(i) { #pragma omp for for(i=0; i<((A->nrows)*(A->ncols)); i++){ C->d[i] = A->d[i]; } } #pragma omp parallel shared(C,B,A) private(i) { #pragma omp for for(i=0; i<((B->nrows)*(B->ncols)); i++){ C->d[i + (A->nrows)*(A->ncols)] = B->d[i]; } } /* for(i=0; i<A->nrows; i++){ for(j=0; j<A->ncols; j++){ matrix_set_element(C,i,j,matrix_get_element(A,i,j)); } } for(i=0; i<B->nrows; i++){ for(j=0; j<B->ncols; j++){ matrix_set_element(C,i,A->ncols + j,matrix_get_element(B,i,j)); } }*/ } /* append matrices vertically: C = [A; B] */ void append_matrices_vertically(mat *A, mat *B, mat *C){ int i,j; for(i=0; i<A->nrows; i++){ for(j=0; j<A->ncols; j++){ matrix_set_element(C,i,j,matrix_get_element(A,i,j)); } } for(i=0; i<B->nrows; i++){ for(j=0; j<B->ncols; j++){ matrix_set_element(C,A->nrows+i,j,matrix_get_element(B,i,j)); } } } /* compute eigendecomposition of symmetric matrix M */ void compute_evals_and_evecs_of_symm_matrix(mat *S, vec *evals){ //LAPACKE_dsyev( LAPACK_ROW_MAJOR, 'V', 'U', S->nrows, S->d, S->nrows, evals->d); LAPACKE_dsyev( LAPACK_COL_MAJOR, 'V', 'U', S->nrows, S->d, S->ncols, evals->d); } /* Performs [Q,R] = qr(M,'0') compact QR factorization M is mxn ; Q is mxn ; R is min(m,n) x min(m,n) */ void compact_QR_factorization(mat *M, mat *Q, mat *R){ int i,j,m,n,k; m = M->nrows; n = M->ncols; k = min(m,n); // printf("doing QR with m = %d, n = %d, k = %d\n", m,n,k); mat *R_full = matrix_new(m,n); matrix_copy(R_full,M); //vec *tau = vector_new(n); vec *tau = vector_new(k); // get R //printf("get R..\n"); //LAPACKE_dgeqrf(CblasColMajor, m, n, R_full->d, n, tau->d); LAPACKE_dgeqrf(LAPACK_COL_MAJOR, R_full->nrows, R_full->ncols, R_full->d, R_full->nrows, tau->d); for(i=0; i<k; i++){ for(j=0; j<k; j++){ if(j>=i){ matrix_set_element(R,i,j,matrix_get_element(R_full,i,j)); } } } // get Q matrix_copy(Q,R_full); //printf("dorgqr..\n"); LAPACKE_dorgqr(LAPACK_COL_MAJOR, Q->nrows, Q->ncols, min(Q->ncols,Q->nrows), Q->d, Q->nrows, tau->d); // clean up matrix_delete(R_full); vector_delete(tau); } /* returns Q from [Q,R] = qr(M,'0') compact QR factorization M is mxn ; Q is mxn ; R is min(m,n) x min(m,n) */ void QR_factorization_getQ(mat *M, mat *Q){ int i,j,m,n,k; m = M->nrows; n = M->ncols; k = min(m,n); matrix_copy(Q,M); vec *tau = vector_new(k); LAPACKE_dgeqrf(LAPACK_COL_MAJOR, m, n, Q->d, m, tau->d); LAPACKE_dorgqr(LAPACK_COL_MAJOR, m, n, n, Q->d, m, tau->d); // clean up vector_delete(tau); } void QR_factorization_getQ_inplace(mat *Q) { // printf("k1\n"); int i,j,m,n,k; m = Q->nrows; n = Q->ncols; k = min(m,n); int *jpvt = (int*)malloc(n*sizeof(int)); vec *tau = vector_new(k); // check memory allocation // printf("k1b\n"); // for (i=0; i++; i<m) { // for (j=0; j++; j<n) { // matrix_set_element(Q, i, j, matrix_get_element(Q, i, j)); // } // } /* BUG DETECTED! the dgeqrf call raises segmentation fault occasionally. the arguments passed to it seems to be fine. probably it's due to bug internal to MKL. To reproduce the bug: call qr_bug_reproduce() in main.c */ // printf("k2 m=%d,n=%d,size=%d,tau=%d\n", m, n, sizeof(Q->d), k); // LAPACKE_dgeqrf(LAPACK_COL_MAJOR, m, n, Q->d, m, tau->d); LAPACKE_dgeqpf(LAPACK_COL_MAJOR, m, n, Q->d, m, jpvt, tau->d); // printf("k2b\n"); LAPACKE_dorgqr(LAPACK_COL_MAJOR, m, n, n, Q->d, m, tau->d); // printf("k3\n"); // clean up vector_delete(tau); free(jpvt); // printf("k4\n"); } /* computes SVD: M = U*S*Vt; note Vt = V^T */ void singular_value_decomposition(mat *M, mat *U, mat *S, mat *Vt){ int m,n,k; m = M->nrows; n = M->ncols; k = min(m,n); vec * work = vector_new(2*max(3*min(m, n)+max(m, n), 5*min(m,n))); vec * svals = vector_new(k); LAPACKE_dgesvd( LAPACK_COL_MAJOR, 'S', 'S', m, n, M->d, m, svals->d, U->d, m, Vt->d, k, work->d ); initialize_diagonal_matrix(S, svals); vector_delete(work); vector_delete(svals); } void form_svd_product_matrix(mat *U, mat *S, mat *V, mat *P){ int k,m,n; double alpha, beta; alpha = 1.0; beta = 0.0; m = P->nrows; n = P->ncols; k = S->nrows; mat * SVt = matrix_new(k,n); // form SVt = S*V^T matrix_matrix_transpose_mult(S,V,SVt); // form P = U*S*V^T matrix_matrix_mult(U,SVt,P); } void estimate_rank_and_buildQ(mat *M, double frac_of_max_rank, double TOL, mat **Q, int *good_rank){ int m,n,i,j,ind,maxdim; double vec_norm; mat *RN,*Y,*Qbig,*Qsmall; vec *vi,*vj,*p,*p1; m = M->nrows; n = M->ncols; maxdim = round(min(m,n)*frac_of_max_rank); vi = vector_new(m); vj = vector_new(m); p = vector_new(m); p1 = vector_new(m); // build random matrix printf("form RN..\n"); RN = matrix_new(n, maxdim); initialize_random_matrix(RN); // multiply to get matrix of random samples Y printf("form Y: %d x %d..\n",m,maxdim); Y = matrix_new(m, maxdim); matrix_matrix_mult(M, RN, Y); // estimate rank k and build Q from Y printf("form Qbig..\n"); Qbig = matrix_new(m, maxdim); matrix_copy(Qbig, Y); printf("estimate rank with TOL = %f..\n", TOL); *good_rank = maxdim; int forbreak = 0; for(j=0; !forbreak && j<maxdim; j++){ matrix_get_col(Qbig, j, vj); for(i=0; i<j; i++){ matrix_get_col(Qbig, i, vi); project_vector(vj, vi, p); vector_sub(vj, p); if(vector_get2norm(p) < TOL && vector_get2norm(p1) < TOL){ *good_rank = j; forbreak = 1; break; } vector_copy(p1,p); } vec_norm = vector_get2norm(vj); vector_scale(vj, 1.0/vec_norm); matrix_set_col(Qbig, j, vj); } printf("estimated rank = %d\n", *good_rank); Qsmall = matrix_new(m, *good_rank); *Q = matrix_new(m, *good_rank); matrix_copy_first_columns(Qsmall, Qbig); QR_factorization_getQ(Qsmall, *Q); matrix_delete(RN); matrix_delete(Y); matrix_delete(Qsmall); matrix_delete(Qbig); } void estimate_rank_and_buildQ2(mat *M, int kblock, double TOL, mat **Y, mat **Q, int *good_rank){ int m,n,i,j,ind,exit_loop = 0; double error_norm; mat *RN,*Y_new,*Y_big,*QtM,*QQtM; vec *vi,*vj,*p,*p1; m = M->nrows; n = M->ncols; // build random matrix printf("form RN..\n"); RN = matrix_new(n,kblock); initialize_random_matrix(RN); // multiply to get matrix of random samples Y printf("form Y: %d x %d..\n",m,kblock); *Y = matrix_new(m, kblock); matrix_matrix_mult(M, RN, *Y); ind = 0; while(!exit_loop){ printf("form Q..\n"); if(ind > 0){ matrix_delete(*Q); } *Q = matrix_new((*Y)->nrows, (*Y)->ncols); QR_factorization_getQ(*Y, *Q); // compute QtM QtM = matrix_new((*Q)->ncols, M->ncols); matrix_transpose_matrix_mult(*Q,M,QtM); // compute QQtM QQtM = matrix_new(M->nrows, M->ncols); matrix_matrix_mult(*Q,QtM,QQtM); error_norm = 0.01*get_percent_error_between_two_mats(QQtM, M); printf("Y is of size %d x %d and error_norm = %f\n", (*Y)->nrows, (*Y)->ncols, error_norm); *good_rank = (*Y)->ncols; // add more samples if needed if(error_norm > TOL){ Y_new = matrix_new(m, kblock); initialize_random_matrix(RN); matrix_matrix_mult(M, RN, Y_new); Y_big = matrix_new((*Y)->nrows, (*Y)->ncols + Y_new->ncols); append_matrices_horizontally(*Y, Y_new, Y_big); matrix_delete(*Y); *Y = matrix_new(Y_big->nrows,Y_big->ncols); matrix_copy(*Y,Y_big); matrix_delete(Y_big); matrix_delete(Y_new); matrix_delete(QtM); matrix_delete(QQtM); ind++; } else{ matrix_delete(RN); exit_loop = 1; } } } double get_seconds_frac(struct timeval start_timeval, struct timeval end_timeval){ long secs_used, micros_used; secs_used=(end_timeval.tv_sec - start_timeval.tv_sec); micros_used= ((secs_used*1000000) + end_timeval.tv_usec) - (start_timeval.tv_usec); return (micros_used/1e6); } /*********************Lijian***********************/ /* initialize new matrix and set all entries to zero for float*/ void matrix_matrix_mult_row(mat *A, mat* B, mat* C){ double alpha, beta; alpha = 1.0; beta = 0.0; cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, A->nrows, B->ncols, A->ncols, alpha, A->d, A->ncols, B->d, B->ncols, beta, C->d, C->ncols); } void matrix_transpose_matrix_mult_row(mat *A, mat* B, mat* C){ double alpha, beta; alpha = 1.0; beta = 0.0; cblas_dgemm(CblasRowMajor, CblasTrans, CblasNoTrans, A->ncols, B->ncols, A->nrows, alpha, A->d, A->ncols, B->d, B->ncols, beta, C->d, C->ncols); } /*********************Lijian***********************/
GB_unop__isnan_bool_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__isnan_bool_fp32) // op(A') function: GB (_unop_tran__isnan_bool_fp32) // C type: bool // A type: float // cast: float cij = (aij) // unaryop: cij = isnan (aij) #define GB_ATYPE \ float #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = isnan (x) ; // casting #define GB_CAST(z, aij) \ float z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (aij) ; \ Cx [pC] = isnan (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNAN || GxB_NO_BOOL || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__isnan_bool_fp32) ( bool *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = (aij) ; Cx [p] = isnan (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = (aij) ; Cx [p] = isnan (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__isnan_bool_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_to_hyper.c
//------------------------------------------------------------------------------ // GB_to_hyper: convert a matrix to hyperspasre //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // On input, the matrix may have shallow A->p content; it is safely removed. // On output, the matrix is always hypersparse (even if out of memory). If the // input matrix is non-hypersparse, it is given new A->p and A->h that are not // shallow. If the input matrix is already hypersparse, nothing is changed // (and in that case A->p and A->h remain shallow on output if shallow on // input). The A->x and A->i content is not changed; it remains in whatever // shallow/non-shallow state that it had on input). // If an out-of-memory condition occurs, all content of the matrix is cleared. // The input matrix may be jumbled; this is not an error condition. #include "GB.h" GrB_Info GB_to_hyper // convert a matrix to hypersparse ( GrB_Matrix A, // matrix to convert to hypersparse GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT_OK_OR_JUMBLED (GB_check (A, "A converting to hypersparse", GB0)) ; int64_t anz = GB_NNZ (A) ; ASSERT (GB_ZOMBIES_OK (A)) ; //-------------------------------------------------------------------------- // convert A to hypersparse form //-------------------------------------------------------------------------- if (!A->is_hyper) { //---------------------------------------------------------------------- // determine the number of threads to use //---------------------------------------------------------------------- int64_t n = A->vdim ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (n, chunk, nthreads_max) ; int ntasks = (nthreads == 1) ? 1 : (8 * nthreads) ; ntasks = GB_IMIN (ntasks, n) ; ntasks = GB_IMAX (ntasks, 1) ; //---------------------------------------------------------------------- // count the number of non-empty vectors in A in each slice //---------------------------------------------------------------------- A->is_hyper = true ; // A becomes hypersparse ASSERT (A->h == NULL) ; ASSERT (A->nvec == A->plen && A->plen == n) ; const int64_t *restrict Ap_old = A->p ; bool Ap_old_shallow = A->p_shallow ; int64_t *restrict Count ; GB_MALLOC_MEMORY (Count, ntasks+1, sizeof (int64_t)) ; if (Count == NULL) { // out of memory GB_PHIX_FREE (A) ; return (GB_OUT_OF_MEMORY) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (int tid = 0 ; tid < ntasks ; tid++) { int64_t jstart, jend, my_nvec_nonempty = 0 ; ; GB_PARTITION (jstart, jend, n, tid, ntasks) ; for (int64_t j = jstart ; j < jend ; j++) { if (Ap_old [j] < Ap_old [j+1]) my_nvec_nonempty++ ; } Count [tid] = my_nvec_nonempty ; } //---------------------------------------------------------------------- // compute cumulative sum of Counts and nvec_nonempty //---------------------------------------------------------------------- GB_cumsum (Count, ntasks, NULL, 1) ; int64_t nvec_nonempty = Count [ntasks] ; A->nvec_nonempty = nvec_nonempty ; //---------------------------------------------------------------------- // allocate the new A->p and A->h //---------------------------------------------------------------------- int64_t *restrict Ap_new ; int64_t *restrict Ah_new ; GB_MALLOC_MEMORY (Ap_new, nvec_nonempty+1, sizeof (int64_t)) ; GB_MALLOC_MEMORY (Ah_new, nvec_nonempty, sizeof (int64_t)) ; if (Ap_new == NULL || Ah_new == NULL) { // out of memory GB_FREE_MEMORY (Count, ntasks+1, sizeof (int64_t)) ; GB_FREE_MEMORY (Ap_new, nvec_nonempty+1, sizeof (int64_t)) ; GB_FREE_MEMORY (Ah_new, nvec_nonempty, sizeof (int64_t)) ; GB_PHIX_FREE (A) ; return (GB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // transplant the new A->p and A->h into the matrix //---------------------------------------------------------------------- A->plen = nvec_nonempty ; A->nvec = nvec_nonempty ; A->p = Ap_new ; A->h = Ah_new ; A->p_shallow = false ; A->h_shallow = false ; //---------------------------------------------------------------------- // construct the new hyperlist in the new A->p and A->h //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (int tid = 0 ; tid < ntasks ; tid++) { int64_t jstart, jend, k = Count [tid] ; GB_PARTITION (jstart, jend, n, tid, ntasks) ; for (int64_t j = jstart ; j < jend ; j++) { if (Ap_old [j] < Ap_old [j+1]) { // vector index j is the kth vector in the new Ah Ap_new [k] = Ap_old [j] ; Ah_new [k] = j ; k++ ; } } ASSERT (k == Count [tid+1]) ; } Ap_new [nvec_nonempty] = anz ; A->magic = GB_MAGIC ; ASSERT (A->nvec_nonempty == GB_nvec_nonempty (A, Context)) ; //---------------------------------------------------------------------- // free workspace, and free the old A->p unless it's shallow //---------------------------------------------------------------------- GB_FREE_MEMORY (Count, ntasks+1, sizeof (int64_t)) ; if (!Ap_old_shallow) { GB_FREE_MEMORY (Ap_old, n+1, sizeof (int64_t)) ; } } //-------------------------------------------------------------------------- // A is now in hypersparse form //-------------------------------------------------------------------------- ASSERT (anz == GB_NNZ (A)) ; ASSERT_OK_OR_JUMBLED (GB_check (A, "A converted to hypersparse", GB0)) ; ASSERT (A->is_hyper) ; return (GrB_SUCCESS) ; }
GB_unaryop__abs_uint16_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint16_uint64 // op(A') function: GB_tran__abs_uint16_uint64 // C type: uint16_t // A type: uint64_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT16 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint16_uint64 ( uint16_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint16_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ams.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" #include "float.h" #include "ams.h" #include "_hypre_utilities.hpp" /*-------------------------------------------------------------------------- * hypre_ParCSRRelax * * Relaxation on the ParCSR matrix A with right-hand side f and * initial guess u. Possible values for relax_type are: * * 1 = l1-scaled (or weighted) Jacobi * 2 = l1-scaled block Gauss-Seidel/SSOR * 3 = Kaczmarz * 4 = truncated version of 2 (Remark 6.2 in smoothers paper) * x = BoomerAMG relaxation with relax_type = |x| * (16 = Cheby) * * The default value of relax_type is 2. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRRelax( hypre_ParCSRMatrix *A, /* matrix to relax with */ hypre_ParVector *f, /* right-hand side */ HYPRE_Int relax_type, /* relaxation type */ HYPRE_Int relax_times, /* number of sweeps */ HYPRE_Real *l1_norms, /* l1 norms of the rows of A */ HYPRE_Real relax_weight, /* damping coefficient (usually <= 1) */ HYPRE_Real omega, /* SOR parameter (usually in (0,2) */ HYPRE_Real max_eig_est, /* for cheby smoothers */ HYPRE_Real min_eig_est, HYPRE_Int cheby_order, HYPRE_Real cheby_fraction, hypre_ParVector *u, /* initial/updated approximation */ hypre_ParVector *v, /* temporary vector */ hypre_ParVector *z /* temporary vector */ ) { HYPRE_Int sweep; for (sweep = 0; sweep < relax_times; sweep++) { if (relax_type == 1) /* l1-scaled Jacobi */ { hypre_BoomerAMGRelax(A, f, NULL, 7, 0, relax_weight, 1.0, l1_norms, u, v, z); } else if (relax_type == 2 || relax_type == 4) /* offd-l1-scaled block GS */ { /* !!! Note: relax_weight and omega flipped !!! */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, NULL, 0, omega, relax_weight, l1_norms, u, v, z, 1, 1 /* symm */); } else #endif { hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, NULL, 0, omega, relax_weight, l1_norms, u, v, z, 1, 1 /* symm */, 0 /* skip diag */, 1, 0); } } else if (relax_type == 3) /* Kaczmarz */ { hypre_BoomerAMGRelax(A, f, NULL, 20, 0, relax_weight, omega, l1_norms, u, v, z); } else /* call BoomerAMG relaxation */ { if (relax_type == 16) { hypre_ParCSRRelax_Cheby(A, f, max_eig_est, min_eig_est, cheby_fraction, cheby_order, 1, 0, u, v, z); } else { hypre_BoomerAMGRelax(A, f, NULL, hypre_abs(relax_type), 0, relax_weight, omega, l1_norms, u, v, z); } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorInRangeOf * * Return a vector that belongs to the range of a given matrix. *--------------------------------------------------------------------------*/ hypre_ParVector *hypre_ParVectorInRangeOf(hypre_ParCSRMatrix *A) { hypre_ParVector *x; x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(x); hypre_ParVectorOwnsData(x) = 1; return x; } /*-------------------------------------------------------------------------- * hypre_ParVectorInDomainOf * * Return a vector that belongs to the domain of a given matrix. *--------------------------------------------------------------------------*/ hypre_ParVector *hypre_ParVectorInDomainOf(hypre_ParCSRMatrix *A) { hypre_ParVector *x; x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixColStarts(A)); hypre_ParVectorInitialize(x); hypre_ParVectorOwnsData(x) = 1; return x; } /*-------------------------------------------------------------------------- * hypre_ParVectorBlockSplit * * Extract the dim sub-vectors x_0,...,x_{dim-1} composing a parallel * block vector x. It is assumed that &x[i] = [x_0[i],...,x_{dim-1}[i]]. *--------------------------------------------------------------------------*/ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) template<HYPRE_Int dir> __global__ void hypreCUDAKernel_ParVectorBlockSplitGather(HYPRE_Int size, HYPRE_Int dim, HYPRE_Real *x0, HYPRE_Real *x1, HYPRE_Real *x2, HYPRE_Real *x) { const HYPRE_Int i = hypre_cuda_get_grid_thread_id<1, 1>(); if (i >= size * dim) { return; } HYPRE_Real *xx[3]; xx[0] = x0; xx[1] = x1; xx[2] = x2; const HYPRE_Int d = i % dim; const HYPRE_Int k = i / dim; if (dir == 0) { xx[d][k] = x[i]; } else if (dir == 1) { x[i] = xx[d][k]; } } #endif HYPRE_Int hypre_ParVectorBlockSplit(hypre_ParVector *x, hypre_ParVector *x_[3], HYPRE_Int dim) { HYPRE_Int i, d, size_; HYPRE_Real *x_data, *x_data_[3]; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParVectorMemoryLocation(x) ); #endif size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0])); x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); for (d = 0; d < dim; d++) { x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d])); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); dim3 gDim = hypre_GetDefaultDeviceGridDimension(size_ * dim, "thread", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ParVectorBlockSplitGather<0>, gDim, bDim, size_, dim, x_data_[0], x_data_[1], x_data_[2], x_data); } else #endif { for (i = 0; i < size_; i++) for (d = 0; d < dim; d++) { x_data_[d][i] = x_data[dim * i + d]; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorBlockGather * * Compose a parallel block vector x from dim given sub-vectors * x_0,...,x_{dim-1}, such that &x[i] = [x_0[i],...,x_{dim-1}[i]]. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorBlockGather(hypre_ParVector *x, hypre_ParVector *x_[3], HYPRE_Int dim) { HYPRE_Int i, d, size_; HYPRE_Real *x_data, *x_data_[3]; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParVectorMemoryLocation(x) ); #endif size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0])); x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); for (d = 0; d < dim; d++) { x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d])); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); dim3 gDim = hypre_GetDefaultDeviceGridDimension(size_ * dim, "thread", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ParVectorBlockSplitGather<1>, gDim, bDim, size_, dim, x_data_[0], x_data_[1], x_data_[2], x_data); } else #endif { for (i = 0; i < size_; i++) for (d = 0; d < dim; d++) { x_data[dim * i + d] = x_data_[d][i]; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGBlockSolve * * Apply the block-diagonal solver diag(B) to the system diag(A) x = b. * Here B is a given BoomerAMG solver for A, while x and b are "block" * parallel vectors. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBlockSolve(void *B, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x) { HYPRE_Int d, dim = 1; hypre_ParVector *b_[3]; hypre_ParVector *x_[3]; dim = hypre_ParVectorGlobalSize(x) / hypre_ParCSRMatrixGlobalNumRows(A); if (dim == 1) { hypre_BoomerAMGSolve(B, A, b, x); return hypre_error_flag; } for (d = 0; d < dim; d++) { b_[d] = hypre_ParVectorInRangeOf(A); x_[d] = hypre_ParVectorInRangeOf(A); } hypre_ParVectorBlockSplit(b, b_, dim); hypre_ParVectorBlockSplit(x, x_, dim); for (d = 0; d < dim; d++) { hypre_BoomerAMGSolve(B, A, b_[d], x_[d]); } hypre_ParVectorBlockGather(x, x_, dim); for (d = 0; d < dim; d++) { hypre_ParVectorDestroy(b_[d]); hypre_ParVectorDestroy(x_[d]); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixFixZeroRows * * For every zero row in the matrix: set the diagonal element to 1. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixFixZeroRowsHost(hypre_ParCSRMatrix *A) { HYPRE_Int i, j; HYPRE_Real l1_norm; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); /* a row will be considered zero if its l1 norm is less than eps */ HYPRE_Real eps = 0.0; /* DBL_EPSILON * 1e+4; */ for (i = 0; i < num_rows; i++) { l1_norm = 0.0; for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++) { l1_norm += fabs(A_diag_data[j]); } if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++) { l1_norm += fabs(A_offd_data[j]); } if (l1_norm <= eps) { for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++) if (A_diag_J[j] == i) { A_diag_data[j] = 1.0; } else { A_diag_data[j] = 0.0; } if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++) { A_offd_data[j] = 0.0; } } } return hypre_error_flag; } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) __global__ void hypreCUDAKernel_ParCSRMatrixFixZeroRows( HYPRE_Int nrows, HYPRE_Int *A_diag_i, HYPRE_Int *A_diag_j, HYPRE_Complex *A_diag_data, HYPRE_Int *A_offd_i, HYPRE_Complex *A_offd_data, HYPRE_Int num_cols_offd) { HYPRE_Int row_i = hypre_cuda_get_grid_warp_id<1, 1>(); if (row_i >= nrows) { return; } HYPRE_Int lane = hypre_cuda_get_lane_id<1>(); HYPRE_Real eps = 0.0; /* DBL_EPSILON * 1e+4; */ HYPRE_Real l1_norm = 0.0; HYPRE_Int p1, q1, p2 = 0, q2 = 0; if (lane < 2) { p1 = read_only_load(A_diag_i + row_i + lane); if (num_cols_offd) { p2 = read_only_load(A_offd_i + row_i + lane); } } q1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 1); p1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 0); if (num_cols_offd) { q2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 1); p2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 0); } for (HYPRE_Int j = p1 + lane; j < q1; j += HYPRE_WARP_SIZE) { l1_norm += fabs(A_diag_data[j]); } for (HYPRE_Int j = p2 + lane; j < q2; j += HYPRE_WARP_SIZE) { l1_norm += fabs(A_offd_data[j]); } l1_norm = warp_allreduce_sum(l1_norm); if (l1_norm <= eps) { for (HYPRE_Int j = p1 + lane; j < q1; j += HYPRE_WARP_SIZE) { if (row_i == read_only_load(&A_diag_j[j])) { A_diag_data[j] = 1.0; } else { A_diag_data[j] = 0.0; } } for (HYPRE_Int j = p2 + lane; j < q2; j += HYPRE_WARP_SIZE) { A_offd_data[j] = 0.0; } } } HYPRE_Int hypre_ParCSRMatrixFixZeroRowsDevice(hypre_ParCSRMatrix *A) { HYPRE_Int nrows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); dim3 bDim, gDim; bDim = hypre_GetDefaultDeviceBlockDimension(); gDim = hypre_GetDefaultDeviceGridDimension(nrows, "warp", bDim); HYPRE_CUDA_LAUNCH(hypreCUDAKernel_ParCSRMatrixFixZeroRows, gDim, bDim, nrows, A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_data, num_cols_offd); //hypre_SyncCudaComputeStream(hypre_handle()); return hypre_error_flag; } #endif HYPRE_Int hypre_ParCSRMatrixFixZeroRows(hypre_ParCSRMatrix *A) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { return hypre_ParCSRMatrixFixZeroRowsDevice(A); } else #endif { return hypre_ParCSRMatrixFixZeroRowsHost(A); } } /*-------------------------------------------------------------------------- * hypre_ParCSRComputeL1Norms * * Compute the l1 norms of the rows of a given matrix, depending on * the option parameter: * * option 1 = Compute the l1 norm of the rows * option 2 = Compute the l1 norm of the (processor) off-diagonal * part of the rows plus the diagonal of A * option 3 = Compute the l2 norm^2 of the rows * option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid * Smoothers for Ultra-Parallel Computing" * * The above computations are done in a CF manner, whenever the provided * cf_marker is not NULL. *--------------------------------------------------------------------------*/ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) struct l1_norm_op1 : public thrust::binary_function<HYPRE_Complex, HYPRE_Complex, HYPRE_Complex> { __host__ __device__ HYPRE_Complex operator()(HYPRE_Complex &x, HYPRE_Complex &y) const { return x <= 4.0 / 3.0 * y ? y : x; } }; #endif HYPRE_Int hypre_ParCSRComputeL1Norms(hypre_ParCSRMatrix *A, HYPRE_Int option, HYPRE_Int *cf_marker, HYPRE_Real **l1_norm_ptr) { HYPRE_Int i; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_MemoryLocation memory_location_l1 = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( memory_location_l1 ); if (exec == HYPRE_EXEC_HOST) { HYPRE_Int num_threads = hypre_NumThreads(); if (num_threads > 1) { return hypre_ParCSRComputeL1NormsThreads(A, option, num_threads, cf_marker, l1_norm_ptr); } } HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_l1); HYPRE_MemoryLocation memory_location_tmp = exec == HYPRE_EXEC_HOST ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE; HYPRE_Real *diag_tmp = NULL; HYPRE_Int *cf_marker_offd = NULL; /* collect the cf marker data from other procs */ if (cf_marker != NULL) { HYPRE_Int num_sends; HYPRE_Int *int_buf_data = NULL; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; if (num_cols_offd) { cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, memory_location_tmp); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), memory_location_tmp); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg); HYPRE_THRUST_CALL( gather, hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg), hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg) + hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), cf_marker, int_buf_data ); } else #endif { HYPRE_Int index = 0; HYPRE_Int start; HYPRE_Int j; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } } comm_handle = hypre_ParCSRCommHandleCreate_v2(11, comm_pkg, memory_location_tmp, int_buf_data, memory_location_tmp, cf_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, memory_location_tmp); } if (option == 1) { /* Set the l1 norm of the diag part */ hypre_CSRMatrixComputeRowSum(A_diag, cf_marker, cf_marker, l1_norm, 1, 1.0, "set"); /* Add the l1 norm of the offd part */ if (num_cols_offd) { hypre_CSRMatrixComputeRowSum(A_offd, cf_marker, cf_marker_offd, l1_norm, 1, 1.0, "add"); } } else if (option == 2) { /* Set the abs(diag) element */ hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 1); /* Add the l1 norm of the offd part */ if (num_cols_offd) { hypre_CSRMatrixComputeRowSum(A_offd, cf_marker, cf_marker, l1_norm, 1, 1.0, "add"); } } else if (option == 3) { /* Set the CF l2 norm of the diag part */ hypre_CSRMatrixComputeRowSum(A_diag, NULL, NULL, l1_norm, 2, 1.0, "set"); /* Add the CF l2 norm of the offd part */ if (num_cols_offd) { hypre_CSRMatrixComputeRowSum(A_offd, NULL, NULL, l1_norm, 2, 1.0, "add"); } } else if (option == 4) { /* Set the abs(diag) element */ hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 1); diag_tmp = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_tmp); hypre_TMemcpy(diag_tmp, l1_norm, HYPRE_Real, num_rows, memory_location_tmp, memory_location_l1); /* Add the scaled l1 norm of the offd part */ if (num_cols_offd) { hypre_CSRMatrixComputeRowSum(A_offd, cf_marker, cf_marker_offd, l1_norm, 1, 0.5, "add"); } /* Truncate according to Remark 6.2 */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( transform, l1_norm, l1_norm + num_rows, diag_tmp, l1_norm, l1_norm_op1() ); } else #endif { for (i = 0; i < num_rows; i++) { if (l1_norm[i] <= 4.0 / 3.0 * diag_tmp[i]) { l1_norm[i] = diag_tmp[i]; } } } } else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */ { /* Set the diag element */ hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 0); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if ( exec == HYPRE_EXEC_DEVICE) { thrust::identity<HYPRE_Complex> identity; HYPRE_THRUST_CALL( replace_if, l1_norm, l1_norm + num_rows, thrust::not1(identity), 1.0 ); } else #endif { for (i = 0; i < num_rows; i++) { if (l1_norm[i] == 0.0) { l1_norm[i] = 1.0; } } } *l1_norm_ptr = l1_norm; return hypre_error_flag; } /* Handle negative definite matrices */ if (!diag_tmp) { diag_tmp = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_tmp); } /* Set the diag element */ hypre_CSRMatrixExtractDiagonal(A_diag, diag_tmp, 0); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( transform_if, l1_norm, l1_norm + num_rows, diag_tmp, l1_norm, thrust::negate<HYPRE_Real>(), is_negative<HYPRE_Real>() ); //bool any_zero = HYPRE_THRUST_CALL( any_of, l1_norm, l1_norm + num_rows, thrust::not1(thrust::identity<HYPRE_Complex>()) ); bool any_zero = 0.0 == HYPRE_THRUST_CALL( reduce, l1_norm, l1_norm + num_rows, 1.0, thrust::minimum<HYPRE_Real>() ); if ( any_zero ) { hypre_error_in_arg(1); } } else #endif { for (i = 0; i < num_rows; i++) { if (diag_tmp[i] < 0.0) { l1_norm[i] = -l1_norm[i]; } } for (i = 0; i < num_rows; i++) { /* if (fabs(l1_norm[i]) < DBL_EPSILON) */ if (fabs(l1_norm[i]) == 0.0) { hypre_error_in_arg(1); break; } } } hypre_TFree(cf_marker_offd, memory_location_tmp); hypre_TFree(diag_tmp, memory_location_tmp); *l1_norm_ptr = l1_norm; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDiagRows * * For every row containing only a diagonal element: set it to d. *--------------------------------------------------------------------------*/ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) __global__ void hypreCUDAKernel_ParCSRMatrixSetDiagRows(HYPRE_Int nrows, HYPRE_Int *A_diag_I, HYPRE_Int *A_diag_J, HYPRE_Complex *A_diag_data, HYPRE_Int *A_offd_I, HYPRE_Int num_cols_offd, HYPRE_Real d) { const HYPRE_Int i = hypre_cuda_get_grid_thread_id<1, 1>(); if (i >= nrows) { return; } HYPRE_Int j = read_only_load(&A_diag_I[i]); if ( (read_only_load(&A_diag_I[i + 1]) == j + 1) && (read_only_load(&A_diag_J[j]) == i) && (!num_cols_offd || (read_only_load(&A_offd_I[i + 1]) == read_only_load(&A_offd_I[i]))) ) { A_diag_data[j] = d; } } #endif HYPRE_Int hypre_ParCSRMatrixSetDiagRows(hypre_ParCSRMatrix *A, HYPRE_Real d) { HYPRE_Int i, j; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); dim3 gDim = hypre_GetDefaultDeviceGridDimension(num_rows, "thread", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ParCSRMatrixSetDiagRows, gDim, bDim, num_rows, A_diag_I, A_diag_J, A_diag_data, A_offd_I, num_cols_offd, d); } else #endif { for (i = 0; i < num_rows; i++) { j = A_diag_I[i]; if ((A_diag_I[i + 1] == j + 1) && (A_diag_J[j] == i) && (!num_cols_offd || (A_offd_I[i + 1] == A_offd_I[i]))) { A_diag_data[j] = d; } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSCreate * * Allocate the AMS solver structure. *--------------------------------------------------------------------------*/ void * hypre_AMSCreate() { hypre_AMSData *ams_data; ams_data = hypre_CTAlloc(hypre_AMSData, 1, HYPRE_MEMORY_HOST); /* Default parameters */ ams_data -> dim = 3; /* 3D problem */ ams_data -> maxit = 20; /* perform at most 20 iterations */ ams_data -> tol = 1e-6; /* convergence tolerance */ ams_data -> print_level = 1; /* print residual norm at each step */ ams_data -> cycle_type = 1; /* a 3-level multiplicative solver */ ams_data -> A_relax_type = 2; /* offd-l1-scaled GS */ ams_data -> A_relax_times = 1; /* one relaxation sweep */ ams_data -> A_relax_weight = 1.0; /* damping parameter */ ams_data -> A_omega = 1.0; /* SSOR coefficient */ ams_data -> A_cheby_order = 2; /* Cheby: order (1 -4 are vaild) */ ams_data -> A_cheby_fraction = .3; /* Cheby: fraction of spectrum to smooth */ ams_data -> B_G_coarsen_type = 10; /* HMIS coarsening */ ams_data -> B_G_agg_levels = 1; /* Levels of aggressive coarsening */ ams_data -> B_G_relax_type = 3; /* hybrid G-S/Jacobi */ ams_data -> B_G_theta = 0.25; /* strength threshold */ ams_data -> B_G_interp_type = 0; /* interpolation type */ ams_data -> B_G_Pmax = 0; /* max nonzero elements in interp. rows */ ams_data -> B_Pi_coarsen_type = 10; /* HMIS coarsening */ ams_data -> B_Pi_agg_levels = 1; /* Levels of aggressive coarsening */ ams_data -> B_Pi_relax_type = 3; /* hybrid G-S/Jacobi */ ams_data -> B_Pi_theta = 0.25; /* strength threshold */ ams_data -> B_Pi_interp_type = 0; /* interpolation type */ ams_data -> B_Pi_Pmax = 0; /* max nonzero elements in interp. rows */ ams_data -> beta_is_zero = 0; /* the problem has a mass term */ /* By default, do l1-GS smoothing on the coarsest grid */ ams_data -> B_G_coarse_relax_type = 8; ams_data -> B_Pi_coarse_relax_type = 8; /* The rest of the fields are initialized using the Set functions */ ams_data -> A = NULL; ams_data -> G = NULL; ams_data -> A_G = NULL; ams_data -> B_G = 0; ams_data -> Pi = NULL; ams_data -> A_Pi = NULL; ams_data -> B_Pi = 0; ams_data -> x = NULL; ams_data -> y = NULL; ams_data -> z = NULL; ams_data -> Gx = NULL; ams_data -> Gy = NULL; ams_data -> Gz = NULL; ams_data -> r0 = NULL; ams_data -> g0 = NULL; ams_data -> r1 = NULL; ams_data -> g1 = NULL; ams_data -> r2 = NULL; ams_data -> g2 = NULL; ams_data -> zz = NULL; ams_data -> Pix = NULL; ams_data -> Piy = NULL; ams_data -> Piz = NULL; ams_data -> A_Pix = NULL; ams_data -> A_Piy = NULL; ams_data -> A_Piz = NULL; ams_data -> B_Pix = 0; ams_data -> B_Piy = 0; ams_data -> B_Piz = 0; ams_data -> interior_nodes = NULL; ams_data -> G0 = NULL; ams_data -> A_G0 = NULL; ams_data -> B_G0 = 0; ams_data -> projection_frequency = 5; ams_data -> A_l1_norms = NULL; ams_data -> A_max_eig_est = 0; ams_data -> A_min_eig_est = 0; ams_data -> owns_Pi = 1; ams_data -> owns_A_G = 0; ams_data -> owns_A_Pi = 0; return (void *) ams_data; } /*-------------------------------------------------------------------------- * hypre_AMSDestroy * * Deallocate the AMS solver structure. Note that the input data (given * through the Set functions) is not destroyed. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSDestroy(void *solver) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (!ams_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (ams_data -> owns_A_G) if (ams_data -> A_G) { hypre_ParCSRMatrixDestroy(ams_data -> A_G); } if (!ams_data -> beta_is_zero) if (ams_data -> B_G) { HYPRE_BoomerAMGDestroy(ams_data -> B_G); } if (ams_data -> owns_Pi && ams_data -> Pi) { hypre_ParCSRMatrixDestroy(ams_data -> Pi); } if (ams_data -> owns_A_Pi) if (ams_data -> A_Pi) { hypre_ParCSRMatrixDestroy(ams_data -> A_Pi); } if (ams_data -> B_Pi) { HYPRE_BoomerAMGDestroy(ams_data -> B_Pi); } if (ams_data -> owns_Pi && ams_data -> Pix) { hypre_ParCSRMatrixDestroy(ams_data -> Pix); } if (ams_data -> A_Pix) { hypre_ParCSRMatrixDestroy(ams_data -> A_Pix); } if (ams_data -> B_Pix) { HYPRE_BoomerAMGDestroy(ams_data -> B_Pix); } if (ams_data -> owns_Pi && ams_data -> Piy) { hypre_ParCSRMatrixDestroy(ams_data -> Piy); } if (ams_data -> A_Piy) { hypre_ParCSRMatrixDestroy(ams_data -> A_Piy); } if (ams_data -> B_Piy) { HYPRE_BoomerAMGDestroy(ams_data -> B_Piy); } if (ams_data -> owns_Pi && ams_data -> Piz) { hypre_ParCSRMatrixDestroy(ams_data -> Piz); } if (ams_data -> A_Piz) { hypre_ParCSRMatrixDestroy(ams_data -> A_Piz); } if (ams_data -> B_Piz) { HYPRE_BoomerAMGDestroy(ams_data -> B_Piz); } if (ams_data -> r0) { hypre_ParVectorDestroy(ams_data -> r0); } if (ams_data -> g0) { hypre_ParVectorDestroy(ams_data -> g0); } if (ams_data -> r1) { hypre_ParVectorDestroy(ams_data -> r1); } if (ams_data -> g1) { hypre_ParVectorDestroy(ams_data -> g1); } if (ams_data -> r2) { hypre_ParVectorDestroy(ams_data -> r2); } if (ams_data -> g2) { hypre_ParVectorDestroy(ams_data -> g2); } if (ams_data -> zz) { hypre_ParVectorDestroy(ams_data -> zz); } if (ams_data -> G0) { hypre_ParCSRMatrixDestroy(ams_data -> A); } if (ams_data -> G0) { hypre_ParCSRMatrixDestroy(ams_data -> G0); } if (ams_data -> A_G0) { hypre_ParCSRMatrixDestroy(ams_data -> A_G0); } if (ams_data -> B_G0) { HYPRE_BoomerAMGDestroy(ams_data -> B_G0); } hypre_SeqVectorDestroy(ams_data -> A_l1_norms); /* G, x, y ,z, Gx, Gy and Gz are not destroyed */ if (ams_data) { hypre_TFree(ams_data, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetDimension * * Set problem dimension (2 or 3). By default we assume dim = 3. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetDimension(void *solver, HYPRE_Int dim) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (dim != 1 && dim != 2 && dim != 3) { hypre_error_in_arg(2); } ams_data -> dim = dim; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetDiscreteGradient * * Set the discrete gradient matrix G. * This function should be called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetDiscreteGradient(void *solver, hypre_ParCSRMatrix *G) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> G = G; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetCoordinateVectors * * Set the x, y and z coordinates of the vertices in the mesh. * * Either SetCoordinateVectors or SetEdgeConstantVectors should be * called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetCoordinateVectors(void *solver, hypre_ParVector *x, hypre_ParVector *y, hypre_ParVector *z) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> x = x; ams_data -> y = y; ams_data -> z = z; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetEdgeConstantVectors * * Set the vectors Gx, Gy and Gz which give the representations of * the constant vector fields (1,0,0), (0,1,0) and (0,0,1) in the * edge element basis. * * Either SetCoordinateVectors or SetEdgeConstantVectors should be * called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetEdgeConstantVectors(void *solver, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> Gx = Gx; ams_data -> Gy = Gy; ams_data -> Gz = Gz; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetInterpolations * * Set the (components of) the Nedelec interpolation matrix Pi=[Pix,Piy,Piz]. * * This function is generally intended to be used only for high-order Nedelec * discretizations (in the lowest order case, Pi is constructed internally in * AMS from the discreet gradient matrix and the coordinates of the vertices), * though it can also be used in the lowest-order case or for other types of * discretizations (e.g. ones based on the second family of Nedelec elements). * * By definition, Pi is the matrix representation of the linear operator that * interpolates (high-order) vector nodal finite elements into the (high-order) * Nedelec space. The component matrices are defined as Pix phi = Pi (phi,0,0) * and similarly for Piy and Piz. Note that all these operators depend on the * choice of the basis and degrees of freedom in the high-order spaces. * * The column numbering of Pi should be node-based, i.e. the x/y/z components of * the first node (vertex or high-order dof) should be listed first, followed by * the x/y/z components of the second node and so on (see the documentation of * HYPRE_BoomerAMGSetDofFunc). * * If used, this function should be called before hypre_AMSSetup() and there is * no need to provide the vertex coordinates. Furthermore, only one of the sets * {Pi} and {Pix,Piy,Piz} needs to be specified (though it is OK to provide * both). If Pix is NULL, then scalar Pi-based AMS cycles, i.e. those with * cycle_type > 10, will be unavailable. Similarly, AMS cycles based on * monolithic Pi (cycle_type < 10) require that Pi is not NULL. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetInterpolations(void *solver, hypre_ParCSRMatrix *Pi, hypre_ParCSRMatrix *Pix, hypre_ParCSRMatrix *Piy, hypre_ParCSRMatrix *Piz) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> Pi = Pi; ams_data -> Pix = Pix; ams_data -> Piy = Piy; ams_data -> Piz = Piz; ams_data -> owns_Pi = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetAlphaPoissonMatrix * * Set the matrix corresponding to the Poisson problem with coefficient * alpha (the curl-curl term coefficient in the Maxwell problem). * * If this function is called, the coarse space solver on the range * of Pi^T is a block-diagonal version of A_Pi. If this function is not * called, the coarse space solver on the range of Pi^T is constructed * as Pi^T A Pi in hypre_AMSSetup(). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetAlphaPoissonMatrix(void *solver, hypre_ParCSRMatrix *A_Pi) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_Pi = A_Pi; /* Penalize the eliminated degrees of freedom */ hypre_ParCSRMatrixSetDiagRows(A_Pi, HYPRE_REAL_MAX); /* Make sure that the first entry in each row is the diagonal one. */ /* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_Pi)); */ return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetBetaPoissonMatrix * * Set the matrix corresponding to the Poisson problem with coefficient * beta (the mass term coefficient in the Maxwell problem). * * This function call is optional - if not given, the Poisson matrix will * be computed in hypre_AMSSetup(). If the given matrix is NULL, we assume * that beta is 0 and use two-level (instead of three-level) methods. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetBetaPoissonMatrix(void *solver, hypre_ParCSRMatrix *A_G) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_G = A_G; if (!A_G) { ams_data -> beta_is_zero = 1; } else { /* Penalize the eliminated degrees of freedom */ hypre_ParCSRMatrixSetDiagRows(A_G, HYPRE_REAL_MAX); /* Make sure that the first entry in each row is the diagonal one. */ /* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_G)); */ } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetInteriorNodes * * Set the list of nodes which are interior to the zero-conductivity region. * A node is interior if interior_nodes[i] == 1.0. * * Should be called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetInteriorNodes(void *solver, hypre_ParVector *interior_nodes) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> interior_nodes = interior_nodes; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetProjectionFrequency * * How often to project the r.h.s. onto the compatible sub-space Ker(G0^T), * when iterating with the solver. * * The default value is every 5th iteration. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetProjectionFrequency(void *solver, HYPRE_Int projection_frequency) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> projection_frequency = projection_frequency; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetMaxIter * * Set the maximum number of iterations in the three-level method. * The default value is 20. To use the AMS solver as a preconditioner, * set maxit to 1, tol to 0.0 and print_level to 0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetMaxIter(void *solver, HYPRE_Int maxit) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> maxit = maxit; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetTol * * Set the convergence tolerance (if the method is used as a solver). * The default value is 1e-6. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetTol(void *solver, HYPRE_Real tol) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> tol = tol; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetCycleType * * Choose which three-level solver to use. Possible values are: * * 1 = 3-level multipl. solver (01210) <-- small solution time * 2 = 3-level additive solver (0+1+2) * 3 = 3-level multipl. solver (02120) * 4 = 3-level additive solver (010+2) * 5 = 3-level multipl. solver (0102010) <-- small solution time * 6 = 3-level additive solver (1+020) * 7 = 3-level multipl. solver (0201020) <-- small number of iterations * 8 = 3-level additive solver (0(1+2)0) <-- small solution time * 9 = 3-level multipl. solver (01210) with discrete divergence * 11 = 5-level multipl. solver (013454310) <-- small solution time, memory * 12 = 5-level additive solver (0+1+3+4+5) * 13 = 5-level multipl. solver (034515430) <-- small solution time, memory * 14 = 5-level additive solver (01(3+4+5)10) * 20 = 2-level multipl. solver (0[12]0) * * 0 = a Hiptmair-like smoother (010) * * The default value is 1. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetCycleType(void *solver, HYPRE_Int cycle_type) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> cycle_type = cycle_type; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetPrintLevel * * Control how much information is printed during the solution iterations. * The defaut values is 1 (print residual norm at each step). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetPrintLevel(void *solver, HYPRE_Int print_level) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> print_level = print_level; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetSmoothingOptions * * Set relaxation parameters for A. Default values: 2, 1, 1.0, 1.0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetSmoothingOptions(void *solver, HYPRE_Int A_relax_type, HYPRE_Int A_relax_times, HYPRE_Real A_relax_weight, HYPRE_Real A_omega) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_relax_type = A_relax_type; ams_data -> A_relax_times = A_relax_times; ams_data -> A_relax_weight = A_relax_weight; ams_data -> A_omega = A_omega; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetChebySmoothingOptions * AB: note: this could be added to the above, * but I didn't want to change parameter list) * Set parameters for chebyshev smoother for A. Default values: 2,.3. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetChebySmoothingOptions(void *solver, HYPRE_Int A_cheby_order, HYPRE_Int A_cheby_fraction) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_cheby_order = A_cheby_order; ams_data -> A_cheby_fraction = A_cheby_fraction; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetAlphaAMGOptions * * Set AMG parameters for B_Pi. Default values: 10, 1, 3, 0.25, 0, 0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetAlphaAMGOptions(void *solver, HYPRE_Int B_Pi_coarsen_type, HYPRE_Int B_Pi_agg_levels, HYPRE_Int B_Pi_relax_type, HYPRE_Real B_Pi_theta, HYPRE_Int B_Pi_interp_type, HYPRE_Int B_Pi_Pmax) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> B_Pi_coarsen_type = B_Pi_coarsen_type; ams_data -> B_Pi_agg_levels = B_Pi_agg_levels; ams_data -> B_Pi_relax_type = B_Pi_relax_type; ams_data -> B_Pi_theta = B_Pi_theta; ams_data -> B_Pi_interp_type = B_Pi_interp_type; ams_data -> B_Pi_Pmax = B_Pi_Pmax; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetAlphaAMGCoarseRelaxType * * Set the AMG coarsest level relaxation for B_Pi. Default value: 8. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetAlphaAMGCoarseRelaxType(void *solver, HYPRE_Int B_Pi_coarse_relax_type) { hypre_AMSData *ams_data = (hypre_AMSData *)solver; ams_data -> B_Pi_coarse_relax_type = B_Pi_coarse_relax_type; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetBetaAMGOptions * * Set AMG parameters for B_G. Default values: 10, 1, 3, 0.25, 0, 0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetBetaAMGOptions(void *solver, HYPRE_Int B_G_coarsen_type, HYPRE_Int B_G_agg_levels, HYPRE_Int B_G_relax_type, HYPRE_Real B_G_theta, HYPRE_Int B_G_interp_type, HYPRE_Int B_G_Pmax) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> B_G_coarsen_type = B_G_coarsen_type; ams_data -> B_G_agg_levels = B_G_agg_levels; ams_data -> B_G_relax_type = B_G_relax_type; ams_data -> B_G_theta = B_G_theta; ams_data -> B_G_interp_type = B_G_interp_type; ams_data -> B_G_Pmax = B_G_Pmax; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetBetaAMGCoarseRelaxType * * Set the AMG coarsest level relaxation for B_G. Default value: 8. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetBetaAMGCoarseRelaxType(void *solver, HYPRE_Int B_G_coarse_relax_type) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> B_G_coarse_relax_type = B_G_coarse_relax_type; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSComputePi * * Construct the Pi interpolation matrix, which maps the space of vector * linear finite elements to the space of edge finite elements. * * The construction is based on the fact that Pi = [Pi_x, Pi_y, Pi_z], * where each block has the same sparsity structure as G, and the entries * can be computed from the vectors Gx, Gy, Gz. *--------------------------------------------------------------------------*/ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) __global__ void hypreCUDAKernel_AMSComputePi_copy1(HYPRE_Int nnz, HYPRE_Int dim, HYPRE_Int *j_in, HYPRE_Int *j_out) { const HYPRE_Int i = hypre_cuda_get_grid_thread_id<1, 1>(); if (i < nnz) { const HYPRE_Int j = dim * i; for (HYPRE_Int d = 0; d < dim; d++) { j_out[j + d] = dim * read_only_load(&j_in[i]) + d; } } } __global__ void hypreCUDAKernel_AMSComputePi_copy2(HYPRE_Int nrows, HYPRE_Int dim, HYPRE_Int *i_in, HYPRE_Real *data_in, HYPRE_Real *Gx_data, HYPRE_Real *Gy_data, HYPRE_Real *Gz_data, HYPRE_Real *data_out) { const HYPRE_Int i = hypre_cuda_get_grid_warp_id<1, 1>(); if (i >= nrows) { return; } const HYPRE_Int lane_id = hypre_cuda_get_lane_id<1>(); HYPRE_Int j, istart, iend; HYPRE_Real t, G[3], *Gdata[3]; Gdata[0] = Gx_data; Gdata[1] = Gy_data; Gdata[2] = Gz_data; if (lane_id < 2) { j = read_only_load(i_in + i + lane_id); } istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); if (lane_id < dim) { t = read_only_load(Gdata[lane_id] + i); } for (HYPRE_Int d = 0; d < dim; d++) { G[d] = __shfl_sync(HYPRE_WARP_FULL_MASK, t, d); } for (j = istart + lane_id; j < iend; j += HYPRE_WARP_SIZE) { const HYPRE_Real v = data_in ? fabs(read_only_load(&data_in[j])) * 0.5 : 1.0; const HYPRE_Int k = j * dim; for (HYPRE_Int d = 0; d < dim; d++) { data_out[k + d] = v * G[d]; } } } #endif HYPRE_Int hypre_AMSComputePi(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *G, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz, HYPRE_Int dim, hypre_ParCSRMatrix **Pi_ptr) { hypre_ParCSRMatrix *Pi; /* Compute Pi = [Pi_x, Pi_y, Pi_z] */ { HYPRE_Int i, j, d; HYPRE_Real *Gx_data, *Gy_data, *Gz_data; MPI_Comm comm = hypre_ParCSRMatrixComm(G); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G); HYPRE_BigInt global_num_cols = dim * hypre_ParCSRMatrixGlobalNumCols(G); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G); HYPRE_BigInt *col_starts; HYPRE_Int col_starts_size; HYPRE_Int num_cols_offd = dim * hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G)); HYPRE_Int num_nonzeros_diag = dim * hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G)); HYPRE_Int num_nonzeros_offd = dim * hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G)); HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G); col_starts_size = 2; col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST); for (i = 0; i < col_starts_size; i++) { col_starts[i] = (HYPRE_BigInt)dim * col_starts_G[i]; } Pi = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Pi) = 1; hypre_ParCSRMatrixInitialize(Pi); hypre_TFree(col_starts, HYPRE_MEMORY_HOST); Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx)); if (dim >= 2) { Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy)); } if (dim == 3) { Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz)); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(G), hypre_ParCSRMatrixMemoryLocation(Pi) ); #endif /* Fill-in the diagonal part */ { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pi_diag = hypre_ParCSRMatrixDiag(Pi); HYPRE_Int *Pi_diag_I = hypre_CSRMatrixI(Pi_diag); HYPRE_Int *Pi_diag_J = hypre_CSRMatrixJ(Pi_diag); HYPRE_Real *Pi_diag_data = hypre_CSRMatrixData(Pi_diag); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( transform, G_diag_I, G_diag_I + G_diag_nrows + 1, Pi_diag_I, dim * _1 ); dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); dim3 gDim = hypre_GetDefaultDeviceGridDimension(G_diag_nnz, "thread", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy1, gDim, bDim, G_diag_nnz, dim, G_diag_J, Pi_diag_J ); gDim = hypre_GetDefaultDeviceGridDimension(G_diag_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy2, gDim, bDim, G_diag_nrows, dim, G_diag_I, G_diag_data, Gx_data, Gy_data, Gz_data, Pi_diag_data ); } else #endif { for (i = 0; i < G_diag_nrows + 1; i++) { Pi_diag_I[i] = dim * G_diag_I[i]; } for (i = 0; i < G_diag_nnz; i++) for (d = 0; d < dim; d++) { Pi_diag_J[dim * i + d] = dim * G_diag_J[i] + d; } for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i + 1]; j++) { *Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; if (dim >= 2) { *Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; } if (dim == 3) { *Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i]; } } } } /* Fill-in the off-diagonal part */ { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pi_offd = hypre_ParCSRMatrixOffd(Pi); HYPRE_Int *Pi_offd_I = hypre_CSRMatrixI(Pi_offd); HYPRE_Int *Pi_offd_J = hypre_CSRMatrixJ(Pi_offd); HYPRE_Real *Pi_offd_data = hypre_CSRMatrixData(Pi_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *Pi_cmap = hypre_ParCSRMatrixColMapOffd(Pi); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { if (G_offd_ncols) { HYPRE_THRUST_CALL( transform, G_offd_I, G_offd_I + G_offd_nrows + 1, Pi_offd_I, dim * _1 ); } dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); dim3 gDim = hypre_GetDefaultDeviceGridDimension(G_offd_nnz, "thread", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy1, gDim, bDim, G_offd_nnz, dim, G_offd_J, Pi_offd_J ); gDim = hypre_GetDefaultDeviceGridDimension(G_offd_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy2, gDim, bDim, G_offd_nrows, dim, G_offd_I, G_offd_data, Gx_data, Gy_data, Gz_data, Pi_offd_data ); } else #endif { if (G_offd_ncols) for (i = 0; i < G_offd_nrows + 1; i++) { Pi_offd_I[i] = dim * G_offd_I[i]; } for (i = 0; i < G_offd_nnz; i++) for (d = 0; d < dim; d++) { Pi_offd_J[dim * i + d] = dim * G_offd_J[i] + d; } for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i + 1]; j++) { *Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; if (dim >= 2) { *Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; } if (dim == 3) { *Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i]; } } } for (i = 0; i < G_offd_ncols; i++) for (d = 0; d < dim; d++) { Pi_cmap[dim * i + d] = (HYPRE_BigInt)dim * G_cmap[i] + (HYPRE_BigInt)d; } } } *Pi_ptr = Pi; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSComputePixyz * * Construct the components Pix, Piy, Piz of the interpolation matrix Pi, * which maps the space of vector linear finite elements to the space of * edge finite elements. * * The construction is based on the fact that each component has the same * sparsity structure as G, and the entries can be computed from the vectors * Gx, Gy, Gz. *--------------------------------------------------------------------------*/ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) __global__ void hypreCUDAKernel_AMSComputePixyz_copy(HYPRE_Int nrows, HYPRE_Int dim, HYPRE_Int *i_in, HYPRE_Real *data_in, HYPRE_Real *Gx_data, HYPRE_Real *Gy_data, HYPRE_Real *Gz_data, HYPRE_Real *data_x_out, HYPRE_Real *data_y_out, HYPRE_Real *data_z_out ) { const HYPRE_Int i = hypre_cuda_get_grid_warp_id<1, 1>(); if (i >= nrows) { return; } const HYPRE_Int lane_id = hypre_cuda_get_lane_id<1>(); HYPRE_Int j, istart, iend; HYPRE_Real t, G[3], *Gdata[3], *Odata[3]; Gdata[0] = Gx_data; Gdata[1] = Gy_data; Gdata[2] = Gz_data; Odata[0] = data_x_out; Odata[1] = data_y_out; Odata[2] = data_z_out; if (lane_id < 2) { j = read_only_load(i_in + i + lane_id); } istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); if (lane_id < dim) { t = read_only_load(Gdata[lane_id] + i); } for (HYPRE_Int d = 0; d < dim; d++) { G[d] = __shfl_sync(HYPRE_WARP_FULL_MASK, t, d); } for (j = istart + lane_id; j < iend; j += HYPRE_WARP_SIZE) { const HYPRE_Real v = data_in ? fabs(read_only_load(&data_in[j])) * 0.5 : 1.0; for (HYPRE_Int d = 0; d < dim; d++) { Odata[d][j] = v * G[d]; } } } #endif HYPRE_Int hypre_AMSComputePixyz(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *G, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz, HYPRE_Int dim, hypre_ParCSRMatrix **Pix_ptr, hypre_ParCSRMatrix **Piy_ptr, hypre_ParCSRMatrix **Piz_ptr) { hypre_ParCSRMatrix *Pix, *Piy, *Piz; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(G) ); #endif /* Compute Pix, Piy, Piz */ { HYPRE_Int i, j; HYPRE_Real *Gx_data, *Gy_data, *Gz_data; MPI_Comm comm = hypre_ParCSRMatrixComm(G); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G); HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(G); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(G); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G)); HYPRE_Int num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G)); HYPRE_Int num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G)); Pix = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Pix) = 1; hypre_ParCSRMatrixInitialize(Pix); if (dim >= 2) { Piy = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Piy) = 1; hypre_ParCSRMatrixInitialize(Piy); } if (dim == 3) { Piz = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Piz) = 1; hypre_ParCSRMatrixInitialize(Piz); } Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx)); if (dim >= 2) { Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy)); } if (dim == 3) { Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz)); } /* Fill-in the diagonal part */ if (dim == 3) { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix); HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag); HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag); HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag); hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy); HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag); HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag); HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag); hypre_CSRMatrix *Piz_diag = hypre_ParCSRMatrixDiag(Piz); HYPRE_Int *Piz_diag_I = hypre_CSRMatrixI(Piz_diag); HYPRE_Int *Piz_diag_J = hypre_CSRMatrixJ(Piz_diag); HYPRE_Real *Piz_diag_data = hypre_CSRMatrixData(Piz_diag); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_diag_I, G_diag_I, G_diag_I)), G_diag_nrows + 1, thrust::make_zip_iterator(thrust::make_tuple(Pix_diag_I, Piy_diag_I, Piz_diag_I)) ); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_diag_J, G_diag_J, G_diag_J)), G_diag_nnz, thrust::make_zip_iterator(thrust::make_tuple(Pix_diag_J, Piy_diag_J, Piz_diag_J)) ); dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); dim3 gDim = hypre_GetDefaultDeviceGridDimension(G_diag_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim, G_diag_nrows, dim, G_diag_I, G_diag_data, Gx_data, Gy_data, Gz_data, Pix_diag_data, Piy_diag_data, Piz_diag_data ); } else #endif { for (i = 0; i < G_diag_nrows + 1; i++) { Pix_diag_I[i] = G_diag_I[i]; Piy_diag_I[i] = G_diag_I[i]; Piz_diag_I[i] = G_diag_I[i]; } for (i = 0; i < G_diag_nnz; i++) { Pix_diag_J[i] = G_diag_J[i]; Piy_diag_J[i] = G_diag_J[i]; Piz_diag_J[i] = G_diag_J[i]; } for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i + 1]; j++) { *Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; *Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; *Piz_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i]; } } } else if (dim == 2) { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix); HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag); HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag); HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag); hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy); HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag); HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag); HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_diag_I, G_diag_I)), G_diag_nrows + 1, thrust::make_zip_iterator(thrust::make_tuple(Pix_diag_I, Piy_diag_I)) ); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_diag_J, G_diag_J)), G_diag_nnz, thrust::make_zip_iterator(thrust::make_tuple(Pix_diag_J, Piy_diag_J)) ); dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); dim3 gDim = hypre_GetDefaultDeviceGridDimension(G_diag_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim, G_diag_nrows, dim, G_diag_I, G_diag_data, Gx_data, Gy_data, NULL, Pix_diag_data, Piy_diag_data, NULL ); } else #endif { for (i = 0; i < G_diag_nrows + 1; i++) { Pix_diag_I[i] = G_diag_I[i]; Piy_diag_I[i] = G_diag_I[i]; } for (i = 0; i < G_diag_nnz; i++) { Pix_diag_J[i] = G_diag_J[i]; Piy_diag_J[i] = G_diag_J[i]; } for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i + 1]; j++) { *Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; *Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; } } } else { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix); HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag); HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag); HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( copy_n, G_diag_I, G_diag_nrows + 1, Pix_diag_I ); HYPRE_THRUST_CALL( copy_n, G_diag_J, G_diag_nnz, Pix_diag_J ); dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); dim3 gDim = hypre_GetDefaultDeviceGridDimension(G_diag_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim, G_diag_nrows, dim, G_diag_I, G_diag_data, Gx_data, NULL, NULL, Pix_diag_data, NULL, NULL ); } else #endif { for (i = 0; i < G_diag_nrows + 1; i++) { Pix_diag_I[i] = G_diag_I[i]; } for (i = 0; i < G_diag_nnz; i++) { Pix_diag_J[i] = G_diag_J[i]; } for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i + 1]; j++) { *Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; } } } /* Fill-in the off-diagonal part */ if (dim == 3) { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix); HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd); HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd); HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd); hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy); HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd); HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd); HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd); hypre_CSRMatrix *Piz_offd = hypre_ParCSRMatrixOffd(Piz); HYPRE_Int *Piz_offd_I = hypre_CSRMatrixI(Piz_offd); HYPRE_Int *Piz_offd_J = hypre_CSRMatrixJ(Piz_offd); HYPRE_Real *Piz_offd_data = hypre_CSRMatrixData(Piz_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix); HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy); HYPRE_BigInt *Piz_cmap = hypre_ParCSRMatrixColMapOffd(Piz); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { if (G_offd_ncols) { HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_offd_I, G_offd_I, G_offd_I)), G_offd_nrows + 1, thrust::make_zip_iterator(thrust::make_tuple(Pix_offd_I, Piy_offd_I, Piz_offd_I)) ); } HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_offd_J, G_offd_J, G_offd_J)), G_offd_nnz, thrust::make_zip_iterator(thrust::make_tuple(Pix_offd_J, Piy_offd_J, Piz_offd_J)) ); dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); dim3 gDim = hypre_GetDefaultDeviceGridDimension(G_offd_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim, G_offd_nrows, dim, G_offd_I, G_offd_data, Gx_data, Gy_data, Gz_data, Pix_offd_data, Piy_offd_data, Piz_offd_data ); } else #endif { if (G_offd_ncols) for (i = 0; i < G_offd_nrows + 1; i++) { Pix_offd_I[i] = G_offd_I[i]; Piy_offd_I[i] = G_offd_I[i]; Piz_offd_I[i] = G_offd_I[i]; } for (i = 0; i < G_offd_nnz; i++) { Pix_offd_J[i] = G_offd_J[i]; Piy_offd_J[i] = G_offd_J[i]; Piz_offd_J[i] = G_offd_J[i]; } for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i + 1]; j++) { *Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; *Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; *Piz_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i]; } } for (i = 0; i < G_offd_ncols; i++) { Pix_cmap[i] = G_cmap[i]; Piy_cmap[i] = G_cmap[i]; Piz_cmap[i] = G_cmap[i]; } } else if (dim == 2) { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix); HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd); HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd); HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd); hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy); HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd); HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd); HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix); HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { if (G_offd_ncols) { HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_offd_I, G_offd_I)), G_offd_nrows + 1, thrust::make_zip_iterator(thrust::make_tuple(Pix_offd_I, Piy_offd_I)) ); } HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_offd_J, G_offd_J)), G_offd_nnz, thrust::make_zip_iterator(thrust::make_tuple(Pix_offd_J, Piy_offd_J)) ); dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); dim3 gDim = hypre_GetDefaultDeviceGridDimension(G_offd_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim, G_offd_nrows, dim, G_offd_I, G_offd_data, Gx_data, Gy_data, NULL, Pix_offd_data, Piy_offd_data, NULL ); } else #endif { if (G_offd_ncols) for (i = 0; i < G_offd_nrows + 1; i++) { Pix_offd_I[i] = G_offd_I[i]; Piy_offd_I[i] = G_offd_I[i]; } for (i = 0; i < G_offd_nnz; i++) { Pix_offd_J[i] = G_offd_J[i]; Piy_offd_J[i] = G_offd_J[i]; } for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i + 1]; j++) { *Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; *Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; } } for (i = 0; i < G_offd_ncols; i++) { Pix_cmap[i] = G_cmap[i]; Piy_cmap[i] = G_cmap[i]; } } else { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix); HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd); HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd); HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { if (G_offd_ncols) { HYPRE_THRUST_CALL( copy_n, G_offd_I, G_offd_nrows + 1, Pix_offd_I ); } HYPRE_THRUST_CALL( copy_n, G_offd_J, G_offd_nnz, Pix_offd_J ); dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); dim3 gDim = hypre_GetDefaultDeviceGridDimension(G_offd_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim, G_offd_nrows, dim, G_offd_I, G_offd_data, Gx_data, NULL, NULL, Pix_offd_data, NULL, NULL ); } else #endif { if (G_offd_ncols) for (i = 0; i < G_offd_nrows + 1; i++) { Pix_offd_I[i] = G_offd_I[i]; } for (i = 0; i < G_offd_nnz; i++) { Pix_offd_J[i] = G_offd_J[i]; } for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i + 1]; j++) { *Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; } } for (i = 0; i < G_offd_ncols; i++) { Pix_cmap[i] = G_cmap[i]; } } } *Pix_ptr = Pix; if (dim >= 2) { *Piy_ptr = Piy; } if (dim == 3) { *Piz_ptr = Piz; } return hypre_error_flag; } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) __global__ void hypreCUDAKernel_AMSComputeGPi_copy2(HYPRE_Int nrows, HYPRE_Int dim, HYPRE_Int *i_in, HYPRE_Real *data_in, HYPRE_Real *Gx_data, HYPRE_Real *Gy_data, HYPRE_Real *Gz_data, HYPRE_Real *data_out) { const HYPRE_Int i = hypre_cuda_get_grid_warp_id<1, 1>(); if (i >= nrows) { return; } const HYPRE_Int lane_id = hypre_cuda_get_lane_id<1>(); HYPRE_Int j, istart, iend; HYPRE_Real t, G[3], *Gdata[3]; Gdata[0] = Gx_data; Gdata[1] = Gy_data; Gdata[2] = Gz_data; if (lane_id < 2) { j = read_only_load(i_in + i + lane_id); } istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); if (lane_id < dim - 1) { t = read_only_load(Gdata[lane_id] + i); } for (HYPRE_Int d = 0; d < dim - 1; d++) { G[d] = __shfl_sync(HYPRE_WARP_FULL_MASK, t, d); } for (j = istart + lane_id; j < iend; j += HYPRE_WARP_SIZE) { const HYPRE_Real u = read_only_load(&data_in[j]); const HYPRE_Real v = fabs(u) * 0.5; const HYPRE_Int k = j * dim; data_out[k] = u; for (HYPRE_Int d = 0; d < dim - 1; d++) { data_out[k + d + 1] = v * G[d]; } } } #endif /*-------------------------------------------------------------------------- * hypre_AMSComputeGPi * * Construct the matrix [G,Pi] which can be considered an interpolation * matrix from S_h^4 (4 copies of the scalar linear finite element space) * to the edge finite elements space. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSComputeGPi(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *G, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz, HYPRE_Int dim, hypre_ParCSRMatrix **GPi_ptr) { hypre_ParCSRMatrix *GPi; /* Take into account G */ dim++; /* Compute GPi = [Pi_x, Pi_y, Pi_z, G] */ { HYPRE_Int i, j, d; HYPRE_Real *Gx_data, *Gy_data, *Gz_data; MPI_Comm comm = hypre_ParCSRMatrixComm(G); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G); HYPRE_BigInt global_num_cols = dim * hypre_ParCSRMatrixGlobalNumCols(G); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G); HYPRE_BigInt *col_starts; HYPRE_Int col_starts_size; HYPRE_Int num_cols_offd = dim * hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G)); HYPRE_Int num_nonzeros_diag = dim * hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G)); HYPRE_Int num_nonzeros_offd = dim * hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G)); HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G); col_starts_size = 2; col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST); for (i = 0; i < col_starts_size; i++) { col_starts[i] = (HYPRE_BigInt) dim * col_starts_G[i]; } GPi = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(GPi) = 1; hypre_ParCSRMatrixInitialize(GPi); Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx)); if (dim >= 3) { Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy)); } if (dim == 4) { Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz)); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(G), hypre_ParCSRMatrixMemoryLocation(GPi) ); #endif /* Fill-in the diagonal part */ { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *GPi_diag = hypre_ParCSRMatrixDiag(GPi); HYPRE_Int *GPi_diag_I = hypre_CSRMatrixI(GPi_diag); HYPRE_Int *GPi_diag_J = hypre_CSRMatrixJ(GPi_diag); HYPRE_Real *GPi_diag_data = hypre_CSRMatrixData(GPi_diag); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( transform, G_diag_I, G_diag_I + G_diag_nrows + 1, GPi_diag_I, dim * _1 ); dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); dim3 gDim = hypre_GetDefaultDeviceGridDimension(G_diag_nnz, "thread", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy1, gDim, bDim, G_diag_nnz, dim, G_diag_J, GPi_diag_J ); gDim = hypre_GetDefaultDeviceGridDimension(G_diag_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputeGPi_copy2, gDim, bDim, G_diag_nrows, dim, G_diag_I, G_diag_data, Gx_data, Gy_data, Gz_data, GPi_diag_data ); } else #endif { for (i = 0; i < G_diag_nrows + 1; i++) { GPi_diag_I[i] = dim * G_diag_I[i]; } for (i = 0; i < G_diag_nnz; i++) for (d = 0; d < dim; d++) { GPi_diag_J[dim * i + d] = dim * G_diag_J[i] + d; } for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i + 1]; j++) { *GPi_diag_data++ = G_diag_data[j]; *GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; if (dim >= 3) { *GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; } if (dim == 4) { *GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i]; } } } } /* Fill-in the off-diagonal part */ { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *GPi_offd = hypre_ParCSRMatrixOffd(GPi); HYPRE_Int *GPi_offd_I = hypre_CSRMatrixI(GPi_offd); HYPRE_Int *GPi_offd_J = hypre_CSRMatrixJ(GPi_offd); HYPRE_Real *GPi_offd_data = hypre_CSRMatrixData(GPi_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *GPi_cmap = hypre_ParCSRMatrixColMapOffd(GPi); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { if (G_offd_ncols) { HYPRE_THRUST_CALL( transform, G_offd_I, G_offd_I + G_offd_nrows + 1, GPi_offd_I, dim * _1 ); } dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); dim3 gDim = hypre_GetDefaultDeviceGridDimension(G_offd_nnz, "thread", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy1, gDim, bDim, G_offd_nnz, dim, G_offd_J, GPi_offd_J ); gDim = hypre_GetDefaultDeviceGridDimension(G_offd_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputeGPi_copy2, gDim, bDim, G_offd_nrows, dim, G_offd_I, G_offd_data, Gx_data, Gy_data, Gz_data, GPi_offd_data ); } else #endif { if (G_offd_ncols) for (i = 0; i < G_offd_nrows + 1; i++) { GPi_offd_I[i] = dim * G_offd_I[i]; } for (i = 0; i < G_offd_nnz; i++) for (d = 0; d < dim; d++) { GPi_offd_J[dim * i + d] = dim * G_offd_J[i] + d; } for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i + 1]; j++) { *GPi_offd_data++ = G_offd_data[j]; *GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; if (dim >= 3) { *GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; } if (dim == 4) { *GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i]; } } } for (i = 0; i < G_offd_ncols; i++) for (d = 0; d < dim; d++) { GPi_cmap[dim * i + d] = dim * G_cmap[i] + d; } } } *GPi_ptr = GPi; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetup * * Construct the AMS solver components. * * The following functions need to be called before hypre_AMSSetup(): * - hypre_AMSSetDimension() (if solving a 2D problem) * - hypre_AMSSetDiscreteGradient() * - hypre_AMSSetCoordinateVectors() or hypre_AMSSetEdgeConstantVectors *--------------------------------------------------------------------------*/ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) __global__ void hypreCUDAKernel_FixInterNodes( HYPRE_Int nrows, HYPRE_Int *G0t_diag_i, HYPRE_Complex *G0t_diag_data, HYPRE_Int *G0t_offd_i, HYPRE_Complex *G0t_offd_data, HYPRE_Real *interior_nodes_data) { HYPRE_Int row_i = hypre_cuda_get_grid_warp_id<1, 1>(); if (row_i >= nrows) { return; } HYPRE_Int lane = hypre_cuda_get_lane_id<1>(); HYPRE_Int not1 = 0; if (lane == 0) { not1 = read_only_load(&interior_nodes_data[row_i]) != 1.0; } not1 = __shfl_sync(HYPRE_WARP_FULL_MASK, not1, 0); if (!not1) { return; } HYPRE_Int p1, q1, p2 = 0, q2 = 0; bool nonempty_offd = G0t_offd_data != NULL; if (lane < 2) { p1 = read_only_load(G0t_diag_i + row_i + lane); if (nonempty_offd) { p2 = read_only_load(G0t_offd_i + row_i + lane); } } q1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 1); p1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 0); if (nonempty_offd) { q2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 1); p2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 0); } for (HYPRE_Int j = p1 + lane; j < q1; j += HYPRE_WARP_SIZE) { G0t_diag_data[j] = 0.0; } for (HYPRE_Int j = p2 + lane; j < q2; j += HYPRE_WARP_SIZE) { G0t_offd_data[j] = 0.0; } } __global__ void hypreCUDAKernel_AMSSetupScaleGGt( HYPRE_Int Gt_num_rows, HYPRE_Int *Gt_diag_i, HYPRE_Int *Gt_diag_j, HYPRE_Real *Gt_diag_data, HYPRE_Int *Gt_offd_i, HYPRE_Real *Gt_offd_data, HYPRE_Real *Gx_data, HYPRE_Real *Gy_data, HYPRE_Real *Gz_data ) { HYPRE_Int row_i = hypre_cuda_get_grid_warp_id<1, 1>(); if (row_i >= Gt_num_rows) { return; } HYPRE_Int lane = hypre_cuda_get_lane_id<1>(); HYPRE_Real h2 = 0.0; HYPRE_Int ne, p1, q1, p2 = 0, q2 = 0; if (lane < 2) { p1 = read_only_load(Gt_diag_i + row_i + lane); } q1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 1); p1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 0); ne = q1 - p1; if (ne == 0) { return; } if (Gt_offd_data != NULL) { if (lane < 2) { p2 = read_only_load(Gt_offd_i + row_i + lane); } q2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 1); p2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 0); } for (HYPRE_Int j = p1 + lane; j < q1; j += HYPRE_WARP_SIZE) { const HYPRE_Int k = read_only_load(&Gt_diag_j[j]); const HYPRE_Real Gx = read_only_load(&Gx_data[k]); const HYPRE_Real Gy = read_only_load(&Gy_data[k]); const HYPRE_Real Gz = read_only_load(&Gz_data[k]); h2 += Gx * Gx + Gy * Gy + Gz * Gz; } h2 = warp_allreduce_sum(h2) / ne; for (HYPRE_Int j = p1 + lane; j < q1; j += HYPRE_WARP_SIZE) { Gt_diag_data[j] *= h2; } for (HYPRE_Int j = p2 + lane; j < q2; j += HYPRE_WARP_SIZE) { Gt_offd_data[j] *= h2; } } #endif HYPRE_Int hypre_AMSSetup(void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); #endif hypre_AMSData *ams_data = (hypre_AMSData *) solver; HYPRE_Int input_info = 0; ams_data -> A = A; /* Modifications for problems with zero-conductivity regions */ if (ams_data -> interior_nodes) { hypre_ParCSRMatrix *G0t, *Aorig = A; /* Make sure that multiple Setup()+Solve() give identical results */ ams_data -> solve_counter = 0; /* Construct the discrete gradient matrix for the zero-conductivity region by eliminating the zero-conductivity nodes from G^t. The range of G0 represents the kernel of A, i.e. the gradients of nodal basis functions supported in zero-conductivity regions. */ hypre_ParCSRMatrixTranspose(ams_data -> G, &G0t, 1); { HYPRE_Int i, j; HYPRE_Int nv = hypre_ParCSRMatrixNumCols(ams_data -> G); hypre_CSRMatrix *G0td = hypre_ParCSRMatrixDiag(G0t); HYPRE_Int *G0tdI = hypre_CSRMatrixI(G0td); HYPRE_Real *G0tdA = hypre_CSRMatrixData(G0td); hypre_CSRMatrix *G0to = hypre_ParCSRMatrixOffd(G0t); HYPRE_Int *G0toI = hypre_CSRMatrixI(G0to); HYPRE_Real *G0toA = hypre_CSRMatrixData(G0to); HYPRE_Real *interior_nodes_data = hypre_VectorData( hypre_ParVectorLocalVector((hypre_ParVector*) ams_data -> interior_nodes)); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); dim3 gDim = hypre_GetDefaultDeviceGridDimension(nv, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_FixInterNodes, gDim, bDim, nv, G0tdI, G0tdA, G0toI, G0toA, interior_nodes_data ); } else #endif { for (i = 0; i < nv; i++) { if (interior_nodes_data[i] != 1) { for (j = G0tdI[i]; j < G0tdI[i + 1]; j++) { G0tdA[j] = 0.0; } if (G0toI) for (j = G0toI[i]; j < G0toI[i + 1]; j++) { G0toA[j] = 0.0; } } } } } hypre_ParCSRMatrixTranspose(G0t, & ams_data -> G0, 1); /* Construct the subspace matrix A_G0 = G0^T G0 */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { ams_data -> A_G0 = hypre_ParCSRMatMat(G0t, ams_data -> G0); } else #endif { ams_data -> A_G0 = hypre_ParMatmul(G0t, ams_data -> G0); } hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G0); /* Create AMG solver for A_G0 */ HYPRE_BoomerAMGCreate(&ams_data -> B_G0); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G0, ams_data -> B_G_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G0, ams_data -> B_G_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G0, ams_data -> B_G_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G0, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G0, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_G0, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G0, 3); /* use just a few V-cycles */ HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G0, ams_data -> B_G_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_G0, ams_data -> B_G_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G0, ams_data -> B_G_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G0, 2); /* don't coarsen to 0 */ /* Generally, don't use exact solve on the coarsest level (matrix may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G0, ams_data -> B_G_coarse_relax_type, 3); HYPRE_BoomerAMGSetup(ams_data -> B_G0, (HYPRE_ParCSRMatrix)ams_data -> A_G0, 0, 0); /* Construct the preconditioner for ams_data->A = A + G0 G0^T. NOTE: this can be optimized significantly by taking into account that the sparsity pattern of A is subset of the sparsity pattern of G0 G0^T */ { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_ParCSRMatrix *A; if (exec == HYPRE_EXEC_DEVICE) { A = hypre_ParCSRMatMat(ams_data -> G0, G0t); } else #endif { A = hypre_ParMatmul(ams_data -> G0, G0t); } hypre_ParCSRMatrix *B = Aorig; hypre_ParCSRMatrix **C_ptr = &ams_data -> A; hypre_ParCSRMatrix *C; HYPRE_Real factor, lfactor; /* scale (penalize) G0 G0^T before adding it to the matrix */ { HYPRE_Int i; HYPRE_Int B_num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(B)); HYPRE_Real *B_diag_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(B)); HYPRE_Real *B_offd_data = hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(B)); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(B)); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(B)); lfactor = -1; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_Int nnz_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B)); HYPRE_Int nnz_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B)); #if defined(HYPRE_DEBUG) HYPRE_Int nnz; hypre_TMemcpy(&nnz, &B_diag_i[B_num_rows], HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_assert(nnz == nnz_diag); hypre_TMemcpy(&nnz, &B_offd_i[B_num_rows], HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_assert(nnz == nnz_offd); #endif if (nnz_diag) { lfactor = HYPRE_THRUST_CALL( reduce, thrust::make_transform_iterator(B_diag_data, absolute_value<HYPRE_Real>()), thrust::make_transform_iterator(B_diag_data + nnz_diag, absolute_value<HYPRE_Real>()), -1.0, thrust::maximum<HYPRE_Real>() ); } if (nnz_offd) { lfactor = HYPRE_THRUST_CALL( reduce, thrust::make_transform_iterator(B_offd_data, absolute_value<HYPRE_Real>()), thrust::make_transform_iterator(B_offd_data + nnz_offd, absolute_value<HYPRE_Real>()), lfactor, thrust::maximum<HYPRE_Real>() ); } } else #endif { for (i = 0; i < B_diag_i[B_num_rows]; i++) if (fabs(B_diag_data[i]) > lfactor) { lfactor = fabs(B_diag_data[i]); } for (i = 0; i < B_offd_i[B_num_rows]; i++) if (fabs(B_offd_data[i]) > lfactor) { lfactor = fabs(B_offd_data[i]); } } lfactor *= 1e-10; /* scaling factor: max|A_ij|*1e-10 */ hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX, hypre_ParCSRMatrixComm(A)); } hypre_ParCSRMatrixAdd(factor, A, 1.0, B, &C); /*hypre_CSRMatrix *A_local, *B_local, *C_local, *C_tmp; MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)); HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)); HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A)); HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B)); HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B)); HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B)); A_local = hypre_MergeDiagAndOffd(A); B_local = hypre_MergeDiagAndOffd(B);*/ /* scale (penalize) G0 G0^T before adding it to the matrix */ /*{ HYPRE_Int i, nnz = hypre_CSRMatrixNumNonzeros(A_local); HYPRE_Real *data = hypre_CSRMatrixData(A_local); HYPRE_Real *dataB = hypre_CSRMatrixData(B_local); HYPRE_Int nnzB = hypre_CSRMatrixNumNonzeros(B_local); HYPRE_Real factor, lfactor; lfactor = -1; for (i = 0; i < nnzB; i++) if (fabs(dataB[i]) > lfactor) lfactor = fabs(dataB[i]); lfactor *= 1e-10; hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX, hypre_ParCSRMatrixComm(A)); for (i = 0; i < nnz; i++) data[i] *= factor; } C_tmp = hypre_CSRMatrixBigAdd(A_local, B_local); C_local = hypre_CSRMatrixBigDeleteZeros(C_tmp,0.0); if (C_local) hypre_CSRMatrixDestroy(C_tmp); else C_local = C_tmp; C = hypre_ParCSRMatrixCreate (comm, global_num_rows, global_num_cols, row_starts, col_starts, A_num_cols_offd + B_num_cols_offd, A_num_nonzeros_diag + B_num_nonzeros_diag, A_num_nonzeros_offd + B_num_nonzeros_offd); GenerateDiagAndOffd(C_local, C, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A)); hypre_CSRMatrixDestroy(A_local); hypre_CSRMatrixDestroy(B_local); hypre_CSRMatrixDestroy(C_local); */ hypre_ParCSRMatrixDestroy(A); *C_ptr = C; } hypre_ParCSRMatrixDestroy(G0t); } /* Make sure that the first entry in each row is the diagonal one. */ /* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(ams_data -> A)); */ /* Compute the l1 norm of the rows of A */ if (ams_data -> A_relax_type >= 1 && ams_data -> A_relax_type <= 4) { HYPRE_Real *l1_norm_data = NULL; hypre_ParCSRComputeL1Norms(ams_data -> A, ams_data -> A_relax_type, NULL, &l1_norm_data); ams_data -> A_l1_norms = hypre_SeqVectorCreate(hypre_ParCSRMatrixNumRows(ams_data -> A)); hypre_VectorData(ams_data -> A_l1_norms) = l1_norm_data; hypre_SeqVectorInitialize_v2(ams_data -> A_l1_norms, hypre_ParCSRMatrixMemoryLocation(ams_data -> A)); } /* Chebyshev? */ if (ams_data -> A_relax_type == 16) { hypre_ParCSRMaxEigEstimateCG(ams_data->A, 1, 10, &ams_data->A_max_eig_est, &ams_data->A_min_eig_est); } /* If not given, compute Gx, Gy and Gz */ { if (ams_data -> x != NULL && (ams_data -> dim == 1 || ams_data -> y != NULL) && (ams_data -> dim <= 2 || ams_data -> z != NULL)) { input_info = 1; } if (ams_data -> Gx != NULL && (ams_data -> dim == 1 || ams_data -> Gy != NULL) && (ams_data -> dim <= 2 || ams_data -> Gz != NULL)) { input_info = 2; } if (input_info == 1) { ams_data -> Gx = hypre_ParVectorInRangeOf(ams_data -> G); hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> x, 0.0, ams_data -> Gx); if (ams_data -> dim >= 2) { ams_data -> Gy = hypre_ParVectorInRangeOf(ams_data -> G); hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> y, 0.0, ams_data -> Gy); } if (ams_data -> dim == 3) { ams_data -> Gz = hypre_ParVectorInRangeOf(ams_data -> G); hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> z, 0.0, ams_data -> Gz); } } } if (ams_data -> Pi == NULL && ams_data -> Pix == NULL) { if (ams_data -> cycle_type == 20) /* Construct the combined interpolation matrix [G,Pi] */ hypre_AMSComputeGPi(ams_data -> A, ams_data -> G, ams_data -> Gx, ams_data -> Gy, ams_data -> Gz, ams_data -> dim, &ams_data -> Pi); else if (ams_data -> cycle_type > 10) /* Construct Pi{x,y,z} instead of Pi = [Pix,Piy,Piz] */ hypre_AMSComputePixyz(ams_data -> A, ams_data -> G, ams_data -> Gx, ams_data -> Gy, ams_data -> Gz, ams_data -> dim, &ams_data -> Pix, &ams_data -> Piy, &ams_data -> Piz); else /* Construct the Pi interpolation matrix */ hypre_AMSComputePi(ams_data -> A, ams_data -> G, ams_data -> Gx, ams_data -> Gy, ams_data -> Gz, ams_data -> dim, &ams_data -> Pi); } /* Keep Gx, Gy and Gz only if use the method with discrete divergence stabilization (where we use them to compute the local mesh size). */ if (input_info == 1 && ams_data -> cycle_type != 9) { hypre_ParVectorDestroy(ams_data -> Gx); if (ams_data -> dim >= 2) { hypre_ParVectorDestroy(ams_data -> Gy); } if (ams_data -> dim == 3) { hypre_ParVectorDestroy(ams_data -> Gz); } } /* Create the AMG solver on the range of G^T */ if (!ams_data -> beta_is_zero && ams_data -> cycle_type != 20) { HYPRE_BoomerAMGCreate(&ams_data -> B_G); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G, ams_data -> B_G_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G, ams_data -> B_G_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G, ams_data -> B_G_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_G, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G, ams_data -> B_G_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_G, ams_data -> B_G_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G, ams_data -> B_G_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G, 2); /* don't coarsen to 0 */ /* Generally, don't use exact solve on the coarsest level (matrix may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G, ams_data -> B_G_coarse_relax_type, 3); if (ams_data -> cycle_type == 0) { HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 2); } /* If not given, construct the coarse space matrix by RAP */ if (!ams_data -> A_G) { if (!hypre_ParCSRMatrixCommPkg(ams_data -> G)) { hypre_MatvecCommPkgCreate(ams_data -> G); } if (!hypre_ParCSRMatrixCommPkg(ams_data -> A)) { hypre_MatvecCommPkgCreate(ams_data -> A); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { ams_data -> A_G = hypre_ParCSRMatrixRAPKT(ams_data -> G, ams_data -> A, ams_data -> G, 1); } else #endif { hypre_BoomerAMGBuildCoarseOperator(ams_data -> G, ams_data -> A, ams_data -> G, &ams_data -> A_G); } /* Make sure that A_G has no zero rows (this can happen if beta is zero in part of the domain). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G); ams_data -> owns_A_G = 1; } HYPRE_BoomerAMGSetup(ams_data -> B_G, (HYPRE_ParCSRMatrix)ams_data -> A_G, NULL, NULL); } if (ams_data -> cycle_type > 10 && ams_data -> cycle_type != 20) /* Create the AMG solvers on the range of Pi{x,y,z}^T */ { HYPRE_BoomerAMGCreate(&ams_data -> B_Pix); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pix, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pix, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pix, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Pix, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pix, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pix, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pix, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pix, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pix, 2); HYPRE_BoomerAMGCreate(&ams_data -> B_Piy); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piy, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piy, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piy, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Piy, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piy, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piy, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piy, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piy, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piy, 2); HYPRE_BoomerAMGCreate(&ams_data -> B_Piz); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piz, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piz, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piz, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Piz, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piz, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piz, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piz, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piz, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piz, 2); /* Generally, don't use exact solve on the coarsest level (matrices may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_coarse_relax_type, 3); HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_coarse_relax_type, 3); HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_coarse_relax_type, 3); if (ams_data -> cycle_type == 0) { HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 2); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 2); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 2); } /* Construct the coarse space matrices by RAP */ if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pix)) { hypre_MatvecCommPkgCreate(ams_data -> Pix); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { ams_data -> A_Pix = hypre_ParCSRMatrixRAPKT(ams_data -> Pix, ams_data -> A, ams_data -> Pix, 1); } else #endif { hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pix, ams_data -> A, ams_data -> Pix, &ams_data -> A_Pix); } /* Make sure that A_Pix has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pix); HYPRE_BoomerAMGSetup(ams_data -> B_Pix, (HYPRE_ParCSRMatrix)ams_data -> A_Pix, NULL, NULL); if (ams_data -> Piy) { if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piy)) { hypre_MatvecCommPkgCreate(ams_data -> Piy); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { ams_data -> A_Piy = hypre_ParCSRMatrixRAPKT(ams_data -> Piy, ams_data -> A, ams_data -> Piy, 1); } else #endif { hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piy, ams_data -> A, ams_data -> Piy, &ams_data -> A_Piy); } /* Make sure that A_Piy has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piy); HYPRE_BoomerAMGSetup(ams_data -> B_Piy, (HYPRE_ParCSRMatrix)ams_data -> A_Piy, NULL, NULL); } if (ams_data -> Piz) { if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piz)) { hypre_MatvecCommPkgCreate(ams_data -> Piz); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { ams_data -> A_Piz = hypre_ParCSRMatrixRAPKT(ams_data -> Piz, ams_data -> A, ams_data -> Piz, 1); } else #endif { hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piz, ams_data -> A, ams_data -> Piz, &ams_data -> A_Piz); } /* Make sure that A_Piz has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piz); HYPRE_BoomerAMGSetup(ams_data -> B_Piz, (HYPRE_ParCSRMatrix)ams_data -> A_Piz, NULL, NULL); } } else /* Create the AMG solver on the range of Pi^T */ { HYPRE_BoomerAMGCreate(&ams_data -> B_Pi); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pi, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pi, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pi, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Pi, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pi, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pi, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pi, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pi, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pi, 2); /* don't coarsen to 0 */ /* Generally, don't use exact solve on the coarsest level (matrix may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_coarse_relax_type, 3); if (ams_data -> cycle_type == 0) { HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 2); } /* If not given, construct the coarse space matrix by RAP and notify BoomerAMG that this is a dim x dim block system. */ if (!ams_data -> A_Pi) { if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pi)) { hypre_MatvecCommPkgCreate(ams_data -> Pi); } if (!hypre_ParCSRMatrixCommPkg(ams_data -> A)) { hypre_MatvecCommPkgCreate(ams_data -> A); } if (ams_data -> cycle_type == 9) { /* Add a discrete divergence term to A before computing Pi^t A Pi */ { hypre_ParCSRMatrix *Gt, *GGt, *ApGGt; hypre_ParCSRMatrixTranspose(ams_data -> G, &Gt, 1); /* scale GGt by h^2 */ { HYPRE_Real h2; HYPRE_Int i, j, k, ne; hypre_CSRMatrix *Gt_diag = hypre_ParCSRMatrixDiag(Gt); HYPRE_Int Gt_num_rows = hypre_CSRMatrixNumRows(Gt_diag); HYPRE_Int *Gt_diag_I = hypre_CSRMatrixI(Gt_diag); HYPRE_Int *Gt_diag_J = hypre_CSRMatrixJ(Gt_diag); HYPRE_Real *Gt_diag_data = hypre_CSRMatrixData(Gt_diag); hypre_CSRMatrix *Gt_offd = hypre_ParCSRMatrixOffd(Gt); HYPRE_Int *Gt_offd_I = hypre_CSRMatrixI(Gt_offd); HYPRE_Real *Gt_offd_data = hypre_CSRMatrixData(Gt_offd); HYPRE_Real *Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gx)); HYPRE_Real *Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gy)); HYPRE_Real *Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gz)); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); dim3 gDim = hypre_GetDefaultDeviceGridDimension(Gt_num_rows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSSetupScaleGGt, gDim, bDim, Gt_num_rows, Gt_diag_I, Gt_diag_J, Gt_diag_data, Gt_offd_I, Gt_offd_data, Gx_data, Gy_data, Gz_data ); } else #endif { for (i = 0; i < Gt_num_rows; i++) { /* determine the characteristic mesh size for vertex i */ h2 = 0.0; ne = 0; for (j = Gt_diag_I[i]; j < Gt_diag_I[i + 1]; j++) { k = Gt_diag_J[j]; h2 += Gx_data[k] * Gx_data[k] + Gy_data[k] * Gy_data[k] + Gz_data[k] * Gz_data[k]; ne++; } if (ne != 0) { h2 /= ne; for (j = Gt_diag_I[i]; j < Gt_diag_I[i + 1]; j++) { Gt_diag_data[j] *= h2; } for (j = Gt_offd_I[i]; j < Gt_offd_I[i + 1]; j++) { Gt_offd_data[j] *= h2; } } } } } /* we only needed Gx, Gy and Gz to compute the local mesh size */ if (input_info == 1) { hypre_ParVectorDestroy(ams_data -> Gx); if (ams_data -> dim >= 2) { hypre_ParVectorDestroy(ams_data -> Gy); } if (ams_data -> dim == 3) { hypre_ParVectorDestroy(ams_data -> Gz); } } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { GGt = hypre_ParCSRMatMat(ams_data -> G, Gt); } #endif else { GGt = hypre_ParMatmul(ams_data -> G, Gt); } hypre_ParCSRMatrixDestroy(Gt); /* hypre_ParCSRMatrixAdd(GGt, A, &ams_data -> A); */ hypre_ParCSRMatrixAdd(1.0, GGt, 1.0, ams_data -> A, &ApGGt); /*{ hypre_ParCSRMatrix *A = GGt; hypre_ParCSRMatrix *B = ams_data -> A; hypre_ParCSRMatrix **C_ptr = &ApGGt; hypre_ParCSRMatrix *C; hypre_CSRMatrix *A_local, *B_local, *C_local; MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)); HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)); HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A)); HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B)); HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B)); HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B)); A_local = hypre_MergeDiagAndOffd(A); B_local = hypre_MergeDiagAndOffd(B); C_local = hypre_CSRMatrixBigAdd(A_local, B_local); hypre_CSRMatrixBigJtoJ(C_local); C = hypre_ParCSRMatrixCreate (comm, global_num_rows, global_num_cols, row_starts, col_starts, A_num_cols_offd + B_num_cols_offd, A_num_nonzeros_diag + B_num_nonzeros_diag, A_num_nonzeros_offd + B_num_nonzeros_offd); GenerateDiagAndOffd(C_local, C, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A)); hypre_CSRMatrixDestroy(A_local); hypre_CSRMatrixDestroy(B_local); hypre_CSRMatrixDestroy(C_local); *C_ptr = C; }*/ hypre_ParCSRMatrixDestroy(GGt); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { ams_data -> A_Pi = hypre_ParCSRMatrixRAPKT(ams_data -> Pi, ApGGt, ams_data -> Pi, 1); } else #endif { hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi, ApGGt, ams_data -> Pi, &ams_data -> A_Pi); } } } else { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { ams_data -> A_Pi = hypre_ParCSRMatrixRAPKT(ams_data -> Pi, ams_data -> A, ams_data -> Pi, 1); } else #endif { hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi, ams_data -> A, ams_data -> Pi, &ams_data -> A_Pi); } } ams_data -> owns_A_Pi = 1; if (ams_data -> cycle_type != 20) { HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim); } else { HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim + 1); } /* HYPRE_BoomerAMGSetNodal(ams_data -> B_Pi, 1); */ } /* Make sure that A_Pi has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pi); HYPRE_BoomerAMGSetup(ams_data -> B_Pi, (HYPRE_ParCSRMatrix)ams_data -> A_Pi, 0, 0); } /* Allocate temporary vectors */ ams_data -> r0 = hypre_ParVectorInRangeOf(ams_data -> A); ams_data -> g0 = hypre_ParVectorInRangeOf(ams_data -> A); if (ams_data -> A_G) { ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_G); ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_G); } if (ams_data -> r1 == NULL && ams_data -> A_Pix) { ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix); ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix); } if (ams_data -> Pi) { ams_data -> r2 = hypre_ParVectorInDomainOf(ams_data -> Pi); ams_data -> g2 = hypre_ParVectorInDomainOf(ams_data -> Pi); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSolve * * Solve the system A x = b. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSolve(void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; HYPRE_Int i, my_id = -1; HYPRE_Real r0_norm, r_norm, b_norm, relative_resid = 0, old_resid; char cycle[30]; hypre_ParCSRMatrix *Ai[5], *Pi[5]; HYPRE_Solver Bi[5]; HYPRE_PtrToSolverFcn HBi[5]; hypre_ParVector *ri[5], *gi[5]; HYPRE_Int needZ = 0; hypre_ParVector *z = ams_data -> zz; Ai[0] = ams_data -> A_G; Pi[0] = ams_data -> G; Ai[1] = ams_data -> A_Pi; Pi[1] = ams_data -> Pi; Ai[2] = ams_data -> A_Pix; Pi[2] = ams_data -> Pix; Ai[3] = ams_data -> A_Piy; Pi[3] = ams_data -> Piy; Ai[4] = ams_data -> A_Piz; Pi[4] = ams_data -> Piz; Bi[0] = ams_data -> B_G; HBi[0] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; Bi[1] = ams_data -> B_Pi; HBi[1] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGBlockSolve; Bi[2] = ams_data -> B_Pix; HBi[2] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; Bi[3] = ams_data -> B_Piy; HBi[3] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; Bi[4] = ams_data -> B_Piz; HBi[4] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; ri[0] = ams_data -> r1; gi[0] = ams_data -> g1; ri[1] = ams_data -> r2; gi[1] = ams_data -> g2; ri[2] = ams_data -> r1; gi[2] = ams_data -> g1; ri[3] = ams_data -> r1; gi[3] = ams_data -> g1; ri[4] = ams_data -> r1; gi[4] = ams_data -> g1; /* may need to create an additional temporary vector for relaxation */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { needZ = ams_data -> A_relax_type == 2 || ams_data -> A_relax_type == 4 || ams_data -> A_relax_type == 16; } else #endif { needZ = hypre_NumThreads() > 1 || ams_data -> A_relax_type == 16; } if (needZ && !z) { z = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(z); ams_data -> zz = z; } if (ams_data -> print_level > 0) { hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(A), &my_id); } /* Compatible subspace projection for problems with zero-conductivity regions. Note that this modifies the input (r.h.s.) vector b! */ if ( (ams_data -> B_G0) && (++ams_data->solve_counter % ( ams_data -> projection_frequency ) == 0) ) { /* hypre_printf("Projecting onto the compatible subspace...\n"); */ hypre_AMSProjectOutGradients(ams_data, b); } if (ams_data -> beta_is_zero) { switch (ams_data -> cycle_type) { case 0: hypre_sprintf(cycle, "%s", "0"); break; case 1: case 3: case 5: case 7: default: hypre_sprintf(cycle, "%s", "020"); break; case 2: case 4: case 6: case 8: hypre_sprintf(cycle, "%s", "(0+2)"); break; case 11: case 13: hypre_sprintf(cycle, "%s", "0345430"); break; case 12: hypre_sprintf(cycle, "%s", "(0+3+4+5)"); break; case 14: hypre_sprintf(cycle, "%s", "0(+3+4+5)0"); break; } } else { switch (ams_data -> cycle_type) { case 0: hypre_sprintf(cycle, "%s", "010"); break; case 1: default: hypre_sprintf(cycle, "%s", "01210"); break; case 2: hypre_sprintf(cycle, "%s", "(0+1+2)"); break; case 3: hypre_sprintf(cycle, "%s", "02120"); break; case 4: hypre_sprintf(cycle, "%s", "(010+2)"); break; case 5: hypre_sprintf(cycle, "%s", "0102010"); break; case 6: hypre_sprintf(cycle, "%s", "(020+1)"); break; case 7: hypre_sprintf(cycle, "%s", "0201020"); break; case 8: hypre_sprintf(cycle, "%s", "0(+1+2)0"); break; case 9: hypre_sprintf(cycle, "%s", "01210"); break; case 11: hypre_sprintf(cycle, "%s", "013454310"); break; case 12: hypre_sprintf(cycle, "%s", "(0+1+3+4+5)"); break; case 13: hypre_sprintf(cycle, "%s", "034515430"); break; case 14: hypre_sprintf(cycle, "%s", "01(+3+4+5)10"); break; case 20: hypre_sprintf(cycle, "%s", "020"); break; } } for (i = 0; i < ams_data -> maxit; i++) { /* Compute initial residual norms */ if (ams_data -> maxit > 1 && i == 0) { hypre_ParVectorCopy(b, ams_data -> r0); hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0); r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0, ams_data -> r0)); r0_norm = r_norm; b_norm = sqrt(hypre_ParVectorInnerProd(b, b)); if (b_norm) { relative_resid = r_norm / b_norm; } else { relative_resid = r_norm; } if (my_id == 0 && ams_data -> print_level > 0) { hypre_printf(" relative\n"); hypre_printf(" residual factor residual\n"); hypre_printf(" -------- ------ --------\n"); hypre_printf(" Initial %e %e\n", r_norm, relative_resid); } } /* Apply the preconditioner */ hypre_ParCSRSubspacePrec(ams_data -> A, ams_data -> A_relax_type, ams_data -> A_relax_times, ams_data -> A_l1_norms ? hypre_VectorData(ams_data -> A_l1_norms) : NULL, ams_data -> A_relax_weight, ams_data -> A_omega, ams_data -> A_max_eig_est, ams_data -> A_min_eig_est, ams_data -> A_cheby_order, ams_data -> A_cheby_fraction, Ai, Bi, HBi, Pi, ri, gi, b, x, ams_data -> r0, ams_data -> g0, cycle, z); /* Compute new residual norms */ if (ams_data -> maxit > 1) { old_resid = r_norm; hypre_ParVectorCopy(b, ams_data -> r0); hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0); r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0, ams_data -> r0)); if (b_norm) { relative_resid = r_norm / b_norm; } else { relative_resid = r_norm; } if (my_id == 0 && ams_data -> print_level > 0) hypre_printf(" Cycle %2d %e %f %e \n", i + 1, r_norm, r_norm / old_resid, relative_resid); } if (relative_resid < ams_data -> tol) { i++; break; } } if (my_id == 0 && ams_data -> print_level > 0 && ams_data -> maxit > 1) hypre_printf("\n\n Average Convergence Factor = %f\n\n", pow((r_norm / r0_norm), (1.0 / (HYPRE_Real) i))); ams_data -> num_iterations = i; ams_data -> rel_resid_norm = relative_resid; if (ams_data -> num_iterations == ams_data -> maxit && ams_data -> tol > 0.0) { hypre_error(HYPRE_ERROR_CONV); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRSubspacePrec * * General subspace preconditioner for A0 y = x, based on ParCSR storage. * * P[i] and A[i] are the interpolation and coarse grid matrices for * the (i+1)'th subspace. B[i] is an AMG solver for A[i]. r[i] and g[i] * are temporary vectors. A0_* are the fine grid smoothing parameters. * * The default mode is multiplicative, '+' changes the next correction * to additive, based on residual computed at '('. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRSubspacePrec(/* fine space matrix */ hypre_ParCSRMatrix *A0, /* relaxation parameters */ HYPRE_Int A0_relax_type, HYPRE_Int A0_relax_times, HYPRE_Real *A0_l1_norms, HYPRE_Real A0_relax_weight, HYPRE_Real A0_omega, HYPRE_Real A0_max_eig_est, HYPRE_Real A0_min_eig_est, HYPRE_Int A0_cheby_order, HYPRE_Real A0_cheby_fraction, /* subspace matrices */ hypre_ParCSRMatrix **A, /* subspace preconditioners */ HYPRE_Solver *B, /* hypre solver functions for B */ HYPRE_PtrToSolverFcn *HB, /* subspace interpolations */ hypre_ParCSRMatrix **P, /* temporary subspace vectors */ hypre_ParVector **r, hypre_ParVector **g, /* right-hand side */ hypre_ParVector *x, /* current approximation */ hypre_ParVector *y, /* current residual */ hypre_ParVector *r0, /* temporary vector */ hypre_ParVector *g0, char *cycle, /* temporary vector */ hypre_ParVector *z) { char *op; HYPRE_Int use_saved_residual = 0; for (op = cycle; *op != '\0'; op++) { /* do nothing */ if (*op == ')') { continue; } /* compute the residual: r = x - Ay */ else if (*op == '(') { hypre_ParVectorCopy(x, r0); hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, r0); } /* switch to additive correction */ else if (*op == '+') { use_saved_residual = 1; continue; } /* smooth: y += S (x - Ay) */ else if (*op == '0') { hypre_ParCSRRelax(A0, x, A0_relax_type, A0_relax_times, A0_l1_norms, A0_relax_weight, A0_omega, A0_max_eig_est, A0_min_eig_est, A0_cheby_order, A0_cheby_fraction, y, g0, z); } /* subspace correction: y += P B^{-1} P^t r */ else { HYPRE_Int i = *op - '1'; if (i < 0) { hypre_error_in_arg(16); } /* skip empty subspaces */ if (!A[i]) { continue; } /* compute the residual? */ if (use_saved_residual) { use_saved_residual = 0; hypre_ParCSRMatrixMatvecT(1.0, P[i], r0, 0.0, r[i]); } else { hypre_ParVectorCopy(x, g0); hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, g0); hypre_ParCSRMatrixMatvecT(1.0, P[i], g0, 0.0, r[i]); } hypre_ParVectorSetConstantValues(g[i], 0.0); (*HB[i]) (B[i], (HYPRE_Matrix)A[i], (HYPRE_Vector)r[i], (HYPRE_Vector)g[i]); hypre_ParCSRMatrixMatvec(1.0, P[i], g[i], 0.0, g0); hypre_ParVectorAxpy(1.0, g0, y); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSGetNumIterations * * Get the number of AMS iterations. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSGetNumIterations(void *solver, HYPRE_Int *num_iterations) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; *num_iterations = ams_data -> num_iterations; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSGetFinalRelativeResidualNorm * * Get the final relative residual norm in AMS. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSGetFinalRelativeResidualNorm(void *solver, HYPRE_Real *rel_resid_norm) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; *rel_resid_norm = ams_data -> rel_resid_norm; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSProjectOutGradients * * For problems with zero-conductivity regions, project the vector onto the * compatible subspace: x = (I - G0 (G0^t G0)^{-1} G0^T) x, where G0 is the * discrete gradient restricted to the interior nodes of the regions with * zero conductivity. This ensures that x is orthogonal to the gradients in * the range of G0. * * This function is typically called after the solution iteration is complete, * in order to facilitate the visualization of the computed field. Without it * the values in the zero-conductivity regions contain kernel components. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSProjectOutGradients(void *solver, hypre_ParVector *x) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (ams_data -> B_G0) { hypre_ParCSRMatrixMatvecT(1.0, ams_data -> G0, x, 0.0, ams_data -> r1); hypre_ParVectorSetConstantValues(ams_data -> g1, 0.0); hypre_BoomerAMGSolve(ams_data -> B_G0, ams_data -> A_G0, ams_data -> r1, ams_data -> g1); hypre_ParCSRMatrixMatvec(1.0, ams_data -> G0, ams_data -> g1, 0.0, ams_data -> g0); hypre_ParVectorAxpy(-1.0, ams_data -> g0, x); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSConstructDiscreteGradient * * Construct and return the lowest-order discrete gradient matrix G, based on: * - a matrix on the egdes (e.g. the stiffness matrix A) * - a vector on the vertices (e.g. the x coordinates) * - the array edge_vertex, which lists the global indexes of the * vertices of the local edges. * * We assume that edge_vertex lists the edge vertices consecutively, * and that the orientation of all edges is consistent. More specificaly: * If edge_orientation = 1, the edges are already oriented. * If edge_orientation = 2, the orientation of edge i depends only on the * sign of edge_vertex[2*i+1] - edge_vertex[2*i]. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSConstructDiscreteGradient(hypre_ParCSRMatrix *A, hypre_ParVector *x_coord, HYPRE_BigInt *edge_vertex, HYPRE_Int edge_orientation, hypre_ParCSRMatrix **G_ptr) { hypre_ParCSRMatrix *G; HYPRE_Int nedges; nedges = hypre_ParCSRMatrixNumRows(A); /* Construct the local part of G based on edge_vertex and the edge and vertex partitionings from A and x_coord */ { HYPRE_Int i, *I = hypre_CTAlloc(HYPRE_Int, nedges + 1, HYPRE_MEMORY_HOST); HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2 * nedges, HYPRE_MEMORY_HOST); hypre_CSRMatrix *local = hypre_CSRMatrixCreate (nedges, hypre_ParVectorGlobalSize(x_coord), 2 * nedges); for (i = 0; i <= nedges; i++) { I[i] = 2 * i; } if (edge_orientation == 1) { /* Assume that the edges are already oriented */ for (i = 0; i < 2 * nedges; i += 2) { data[i] = -1.0; data[i + 1] = 1.0; } } else if (edge_orientation == 2) { /* Assume that the edge orientation is based on the vertex indexes */ for (i = 0; i < 2 * nedges; i += 2) { if (edge_vertex[i] < edge_vertex[i + 1]) { data[i] = -1.0; data[i + 1] = 1.0; } else { data[i] = 1.0; data[i + 1] = -1.0; } } } else { hypre_error_in_arg(4); } hypre_CSRMatrixI(local) = I; hypre_CSRMatrixBigJ(local) = edge_vertex; hypre_CSRMatrixData(local) = data; hypre_CSRMatrixRownnz(local) = NULL; hypre_CSRMatrixOwnsData(local) = 1; hypre_CSRMatrixNumRownnz(local) = nedges; /* Generate the discrete gradient matrix */ G = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParVectorGlobalSize(x_coord), hypre_ParCSRMatrixRowStarts(A), hypre_ParVectorPartitioning(x_coord), 0, 0, 0); hypre_CSRMatrixBigJtoJ(local); GenerateDiagAndOffd(local, G, hypre_ParVectorFirstIndex(x_coord), hypre_ParVectorLastIndex(x_coord)); /* Account for empty rows in G. These may appear when A includes only the interior (non-Dirichlet b.c.) edges. */ { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); G_diag->num_cols = hypre_VectorSize(hypre_ParVectorLocalVector(x_coord)); } /* Free the local matrix */ hypre_CSRMatrixDestroy(local); } *G_ptr = G; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSFEISetup * * Construct an AMS solver object based on the following data: * * A - the edge element stiffness matrix * num_vert - number of vertices (nodes) in the processor * num_local_vert - number of vertices owned by the processor * vert_number - global indexes of the vertices in the processor * vert_coord - coordinates of the vertices in the processor * num_edges - number of edges owned by the processor * edge_vertex - the vertices of the edges owned by the processor. * Vertices are in local numbering (the same as in * vert_number), and edge orientation is always from * the first to the second vertex. * * Here we distinguish between vertices that belong to elements in the * current processor, and the subset of these vertices that is owned by * the processor. * * This function is written specifically for input from the FEI and should * be called before hypre_AMSSetup(). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSFEISetup(void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x, HYPRE_Int num_vert, HYPRE_Int num_local_vert, HYPRE_BigInt *vert_number, HYPRE_Real *vert_coord, HYPRE_Int num_edges, HYPRE_BigInt *edge_vertex) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; HYPRE_Int i, j; hypre_ParCSRMatrix *G; hypre_ParVector *x_coord, *y_coord, *z_coord; HYPRE_Real *x_data, *y_data, *z_data; MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt vert_part[2], num_global_vert; HYPRE_BigInt vert_start, vert_end; HYPRE_BigInt big_local_vert = (HYPRE_BigInt) num_local_vert; /* Find the processor partitioning of the vertices */ hypre_MPI_Scan(&big_local_vert, &vert_part[1], 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); vert_part[0] = vert_part[1] - big_local_vert; hypre_MPI_Allreduce(&big_local_vert, &num_global_vert, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); /* Construct hypre parallel vectors for the vertex coordinates */ x_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part); hypre_ParVectorInitialize(x_coord); hypre_ParVectorOwnsData(x_coord) = 1; x_data = hypre_VectorData(hypre_ParVectorLocalVector(x_coord)); y_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part); hypre_ParVectorInitialize(y_coord); hypre_ParVectorOwnsData(y_coord) = 1; y_data = hypre_VectorData(hypre_ParVectorLocalVector(y_coord)); z_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part); hypre_ParVectorInitialize(z_coord); hypre_ParVectorOwnsData(z_coord) = 1; z_data = hypre_VectorData(hypre_ParVectorLocalVector(z_coord)); vert_start = hypre_ParVectorFirstIndex(x_coord); vert_end = hypre_ParVectorLastIndex(x_coord); /* Save coordinates of locally owned vertices */ for (i = 0; i < num_vert; i++) { if (vert_number[i] >= vert_start && vert_number[i] <= vert_end) { j = (HYPRE_Int)(vert_number[i] - vert_start); x_data[j] = vert_coord[3 * i]; y_data[j] = vert_coord[3 * i + 1]; z_data[j] = vert_coord[3 * i + 2]; } } /* Change vertex numbers from local to global */ for (i = 0; i < 2 * num_edges; i++) { edge_vertex[i] = vert_number[edge_vertex[i]]; } /* Construct the local part of G based on edge_vertex */ { /* HYPRE_Int num_edges = hypre_ParCSRMatrixNumRows(A); */ HYPRE_Int *I = hypre_CTAlloc(HYPRE_Int, num_edges + 1, HYPRE_MEMORY_HOST); HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2 * num_edges, HYPRE_MEMORY_HOST); hypre_CSRMatrix *local = hypre_CSRMatrixCreate (num_edges, num_global_vert, 2 * num_edges); for (i = 0; i <= num_edges; i++) { I[i] = 2 * i; } /* Assume that the edge orientation is based on the vertex indexes */ for (i = 0; i < 2 * num_edges; i += 2) { data[i] = 1.0; data[i + 1] = -1.0; } hypre_CSRMatrixI(local) = I; hypre_CSRMatrixBigJ(local) = edge_vertex; hypre_CSRMatrixData(local) = data; hypre_CSRMatrixRownnz(local) = NULL; hypre_CSRMatrixOwnsData(local) = 1; hypre_CSRMatrixNumRownnz(local) = num_edges; G = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), num_global_vert, hypre_ParCSRMatrixRowStarts(A), vert_part, 0, 0, 0); hypre_CSRMatrixBigJtoJ(local); GenerateDiagAndOffd(local, G, vert_start, vert_end); //hypre_CSRMatrixJ(local) = NULL; hypre_CSRMatrixDestroy(local); } ams_data -> G = G; ams_data -> x = x_coord; ams_data -> y = y_coord; ams_data -> z = z_coord; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSFEIDestroy * * Free the additional memory allocated in hypre_AMSFEISetup(). * * This function is written specifically for input from the FEI and should * be called before hypre_AMSDestroy(). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSFEIDestroy(void *solver) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (ams_data -> G) { hypre_ParCSRMatrixDestroy(ams_data -> G); } if (ams_data -> x) { hypre_ParVectorDestroy(ams_data -> x); } if (ams_data -> y) { hypre_ParVectorDestroy(ams_data -> y); } if (ams_data -> z) { hypre_ParVectorDestroy(ams_data -> z); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRComputeL1Norms Threads * * Compute the l1 norms of the rows of a given matrix, depending on * the option parameter: * * option 1 = Compute the l1 norm of the rows * option 2 = Compute the l1 norm of the (processor) off-diagonal * part of the rows plus the diagonal of A * option 3 = Compute the l2 norm^2 of the rows * option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid * Smoothers for Ultra-Parallel Computing" * * The above computations are done in a CF manner, whenever the provided * cf_marker is not NULL. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRComputeL1NormsThreads(hypre_ParCSRMatrix *A, HYPRE_Int option, HYPRE_Int num_threads, HYPRE_Int *cf_marker, HYPRE_Real **l1_norm_ptr) { HYPRE_Int i, j, k; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real diag; HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows, hypre_ParCSRMatrixMemoryLocation(A)); HYPRE_Int ii, ns, ne, rest, size; HYPRE_Int *cf_marker_offd = NULL; HYPRE_Int cf_diag; /* collect the cf marker data from other procs */ if (cf_marker != NULL) { HYPRE_Int index; HYPRE_Int num_sends; HYPRE_Int start; HYPRE_Int *int_buf_data = NULL; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; if (num_cols_offd) { cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)) int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, cf_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,k,ns,ne,rest,size,diag,cf_diag) HYPRE_SMP_SCHEDULE #endif for (k = 0; k < num_threads; k++) { size = num_rows / num_threads; rest = num_rows - size * num_threads; if (k < rest) { ns = k * size + k; ne = (k + 1) * size + k + 1; } else { ns = k * size + rest; ne = (k + 1) * size + rest; } if (option == 1) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the l1 norm of the diag part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++) { l1_norm[i] += fabs(A_diag_data[j]); } /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++) { l1_norm[i] += fabs(A_offd_data[j]); } } } else { cf_diag = cf_marker[i]; /* Add the CF l1 norm of the diag part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++) if (cf_diag == cf_marker[A_diag_J[j]]) { l1_norm[i] += fabs(A_diag_data[j]); } /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) { l1_norm[i] += fabs(A_offd_data[j]); } } } } } else if (option == 2) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++) { ii = A_diag_J[j]; if (ii == i || ii < ns || ii >= ne) { l1_norm[i] += fabs(A_diag_data[j]); } } /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++) { l1_norm[i] += fabs(A_offd_data[j]); } } } else { cf_diag = cf_marker[i]; /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++) { ii = A_diag_J[j]; if ((ii == i || ii < ns || ii >= ne) && (cf_diag == cf_marker[A_diag_J[j]])) { l1_norm[i] += fabs(A_diag_data[j]); } } /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) { l1_norm[i] += fabs(A_offd_data[j]); } } } } } else if (option == 3) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++) { l1_norm[i] += A_diag_data[j] * A_diag_data[j]; } if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++) { l1_norm[i] += A_offd_data[j] * A_offd_data[j]; } } } else if (option == 4) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++) { ii = A_diag_J[j]; if (ii == i || ii < ns || ii >= ne) { if (ii == i) { diag = fabs(A_diag_data[j]); l1_norm[i] += fabs(A_diag_data[j]); } else { l1_norm[i] += 0.5 * fabs(A_diag_data[j]); } } } /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++) { l1_norm[i] += 0.5 * fabs(A_offd_data[j]); } } } else { cf_diag = cf_marker[i]; /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++) { ii = A_diag_J[j]; if ((ii == i || ii < ns || ii >= ne) && (cf_diag == cf_marker[A_diag_J[j]])) { if (ii == i) { diag = fabs(A_diag_data[j]); l1_norm[i] += fabs(A_diag_data[j]); } else { l1_norm[i] += 0.5 * fabs(A_diag_data[j]); } } } /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) { l1_norm[i] += 0.5 * fabs(A_offd_data[j]); } } } /* Truncate according to Remark 6.2 */ if (l1_norm[i] <= 4.0 / 3.0 * diag) { l1_norm[i] = diag; } } } else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */ { /* Set the diag element */ for (i = ns; i < ne; i++) { l1_norm[i] = A_diag_data[A_diag_I[i]]; if (l1_norm[i] == 0) { l1_norm[i] = 1.0; } } } if (option < 5) { /* Handle negative definite matrices */ for (i = ns; i < ne; i++) if (A_diag_data[A_diag_I[i]] < 0) { l1_norm[i] = -l1_norm[i]; } for (i = ns; i < ne; i++) /* if (fabs(l1_norm[i]) < DBL_EPSILON) */ if (fabs(l1_norm[i]) == 0.0) { hypre_error_in_arg(1); break; } } } hypre_TFree(cf_marker_offd, HYPRE_MEMORY_HOST); *l1_norm_ptr = l1_norm; return hypre_error_flag; }
eltwise_layernorm.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Evangelos Georganas (Intel Corp.) ******************************************************************************/ #include <libxsmm.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #include <immintrin.h> /* include c-based dnn library */ #include "../deeplearning/common/dnn_common.h" #if defined(_OPENMP) # include <omp.h> #endif #define EPS 1e-9 LIBXSMM_INLINE void sfill_matrix ( float *matrix, unsigned int ld, unsigned int m, unsigned int n ) { unsigned int i, j; double dtmp; if ( ld < m ) { fprintf(stderr,"Error is sfill_matrix: ld=%u m=%u mismatched!\n",ld,m); exit(EXIT_FAILURE); } for ( j = 1; j <= n; j++ ) { /* Fill through the leading dimension */ for ( i = 1; i <= ld; i++ ) { dtmp = 1.0 - 2.0*libxsmm_rng_f64(); matrix [ (j-1)*ld + (i-1) ] = (float) dtmp; } } } LIBXSMM_INLINE void naive_layernorm(int m, int n, int ld_in, float *sinp, float *gamma, float *beta, float *sout_ref, float *mean_data_ref, float *rstd_data_ref) { int i, j; #if defined(_OPENMP) #pragma omp parallel for private(j) #endif for (j = 0; j < n; j++) { float mean_val_ref = 0, rstd_val_ref = 0, scale_ref = 0, bias_ref = 0, gamma_val_ref = 0, beta_val_ref = 0; mean_data_ref[j] = 0; rstd_data_ref[j] = 0; for (i = 0; i < m; i++) { mean_data_ref[j] += sinp[j*ld_in + i]; rstd_data_ref[j] += sinp[j*ld_in + i] * sinp[j*ld_in + i]; } mean_val_ref = mean_data_ref[j]/m; rstd_val_ref = (rstd_data_ref[j]/m)-mean_val_ref*mean_val_ref; rstd_val_ref = 1/((float)sqrt(rstd_val_ref)); mean_data_ref[j] = mean_val_ref; rstd_data_ref[j] = rstd_val_ref; scale_ref = rstd_val_ref; bias_ref = -1.f * rstd_val_ref * mean_val_ref; for (i = 0; i < m; i++) { gamma_val_ref = gamma[i]; beta_val_ref = beta[i]; sout_ref[j*ld_in+i] += (sinp[j*ld_in+i] * scale_ref + bias_ref) * gamma_val_ref + beta_val_ref; } } } LIBXSMM_INLINE void naive_layernorm_bwd(int m, int n, int ld_in, float *dY, float *X, float *mean, float *rstd, float *gamma, float *dX, float *dgamma, float *dbeta) { float a, b, c, ds, db, scale = (float)(1.0 / m); int i, j; for (i = 0; i < m; i++) { dgamma[i] = 0; dbeta[i] = 0; } for (j = 0; j < n; j++) { a = rstd[j]; b = -1.f * a * mean[j]; ds = 0; db = 0; for (i = 0; i < m; i++) { dgamma[i] += dY[j*ld_in+i] * (a * X[j*ld_in+i] + b); dbeta[i] += dY[j*ld_in+i]; ds += dY[j*ld_in+i] * X[j*ld_in+i] * gamma[i]; db += dY[j*ld_in+i] * gamma[i]; } b = (db * mean[j] - ds) * a * a * a * scale; c = -1.f * b * mean[j] - db * a * scale; for (i = 0; i < m; i++) { dX[j*ld_in+i] = a * dY[j*ld_in+i] * gamma[i] + b * X[j*ld_in+i] + c; } } } LIBXSMM_INLINE void optimized_layernorm(int m, int n, int ld_in, float *sinp, float *gamma, float *beta, float *sout, float *mean_data, float *rstd_data, libxsmm_meltwfunction_reduce reduce_kernel, libxsmm_meltwfunction_scale scalemean_kernel, libxsmm_meltwfunction_scale scaleout_kernel, float * bias_aux) { int i; float reverse_m = (float)(1.0 / m); #if defined(__AVX512F__) __m512 minus_ones = _mm512_set1_ps(-1.f); #endif libxsmm_meltw_reduce_param reduce_params; libxsmm_meltw_scale_param scalemean_params; libxsmm_meltw_scale_param scaleout_params; reduce_params.in_ptr = sinp; reduce_params.out_ptr_0 = mean_data; reduce_params.out_ptr_1 = rstd_data; reduce_kernel(&reduce_params); scalemean_params.in_ptr = mean_data; scalemean_params.out_ptr = mean_data; scalemean_params.scale_vals_ptr = &reverse_m; scalemean_kernel(&scalemean_params); scalemean_params.in_ptr = rstd_data; scalemean_params.out_ptr = rstd_data; scalemean_kernel(&scalemean_params); /* Calculate rstd and auxiliary bias vectors*/ #if defined(__AVX512F__) for (i = 0; i < n-15; i+= 16) { __m512 vrstd = _mm512_loadu_ps(rstd_data+i); __m512 vmean = _mm512_loadu_ps(mean_data+i); vrstd = _mm512_rsqrt14_ps(_mm512_sub_ps(vrstd, _mm512_mul_ps(vmean, vmean))); _mm512_storeu_ps(rstd_data+i, vrstd); _mm512_storeu_ps(bias_aux+i, _mm512_mul_ps(minus_ones, _mm512_mul_ps(vmean, vrstd))); } if (i < n) { int rem = n - i; __mmask16 mask = (1 << rem) - 1; __m512 vrstd = _mm512_maskz_loadu_ps(mask, rstd_data+i); __m512 vmean = _mm512_maskz_loadu_ps(mask, mean_data+i); vrstd = _mm512_maskz_rsqrt14_ps(mask, _mm512_sub_ps(vrstd, _mm512_mul_ps(vmean, vmean))); _mm512_mask_storeu_ps(rstd_data+i, mask, vrstd ); _mm512_mask_storeu_ps(bias_aux+i, mask, _mm512_mul_ps(minus_ones, _mm512_mul_ps(vmean, vrstd))); } #else for (i = 0; i < n; i++) { rstd_data[i] = (float)(1.0 / sqrt(rstd_data[i] - mean_data[i] * mean_data[i])); bias_aux[i] = -1.f * mean_data[i] * rstd_data[i]; } #endif scaleout_params.in_ptr = sinp; scaleout_params.out_ptr = sout; scaleout_params.scale_vals_ptr = rstd_data; scaleout_params.bias_vals_ptr = bias_aux; scaleout_params.scale_vals_ptr2 = gamma; scaleout_params.bias_vals_ptr2 = beta; scaleout_kernel(&scaleout_params); } LIBXSMM_INLINE void optimized_blocked_layernorm(int m, int n, int bm, int bn, float *data_in, float *gamma_data, float *beta_data, float *mean_data, float *rstd_data) { int ld = bm, ld_vector = bn; libxsmm_meltw_redu_flags jit_reduce_flags = LIBXSMM_MELTW_FLAG_REDUCE_NONE; libxsmm_meltwfunction_reduce reduce_rows_kernel, reduce_cols_kernel; libxsmm_meltw_scal_flags jit_scale_flags = 0; libxsmm_meltwfunction_scale scale_kernel; libxsmm_meltw_scal_flags jit_scaleout_flags = 0; libxsmm_meltwfunction_scale scaleout_kernel; #if defined(_OPENMP) int threads = omp_get_max_threads(); /* number of threads */ #else int threads = 1; /* number of threads */ #endif int nBlocks = n/bn; int mBlocks = m/bm; float *const scratch = (float*)libxsmm_aligned_scratch((2 * n * mBlocks + n) * sizeof(float), 0/*auto-alignment*/); float *sums_ptr = scratch; float *sums_sq_ptr = scratch + n * mBlocks; float *aux_bias_ptr = scratch + 2 * n * mBlocks; LIBXSMM_VLA_DECL(3, float, sums, sums_ptr, mBlocks, bn); LIBXSMM_VLA_DECL(3, float, sums_sq, sums_sq_ptr, mBlocks, bn); LIBXSMM_VLA_DECL(2, float, mean, mean_data, bn); LIBXSMM_VLA_DECL(2, float, rstd, rstd_data, bn); LIBXSMM_VLA_DECL(2, float, gamma, gamma_data, bm); LIBXSMM_VLA_DECL(2, float, beta, beta_data, bm); LIBXSMM_VLA_DECL(2, float, aux_bias, aux_bias_ptr, bn); LIBXSMM_VLA_DECL(4, float, X, data_in, mBlocks, bn, bm); /*libxsmm_barrier *barrier;*/ /* Generate JITED kernels for optimized code */ jit_reduce_flags = LIBXSMM_MELTW_FLAG_REDUCE_ROWS | LIBXSMM_MELTW_FLAG_REDUCE_OP_ADD | LIBXSMM_MELTW_FLAG_REDUCE_ELTS | LIBXSMM_MELTW_FLAG_REDUCE_ELTS_SQUARED; reduce_rows_kernel = libxsmm_dispatch_meltw_reduce(bm, bn, &ld, &ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags); jit_reduce_flags = LIBXSMM_MELTW_FLAG_REDUCE_COLS | LIBXSMM_MELTW_FLAG_REDUCE_OP_ADD | LIBXSMM_MELTW_FLAG_REDUCE_ELTS; reduce_cols_kernel = libxsmm_dispatch_meltw_reduce(bn, mBlocks, &ld, &ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags); jit_scale_flags = LIBXSMM_MELTW_FLAG_SCALE_ROWS | LIBXSMM_MELTW_FLAG_SCALE_MULT; scale_kernel = libxsmm_dispatch_meltw_scale(bn, 1, &ld_vector, &ld_vector, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_scale_flags); jit_scaleout_flags = LIBXSMM_MELTW_FLAG_SCALE_ROWS_COLS | LIBXSMM_MELTW_FLAG_SCALE_MULT | LIBXSMM_MELTW_FLAG_SCALE_ADD_BIAS; scaleout_kernel = libxsmm_dispatch_meltw_scale(bm, bn, &ld, &ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_scaleout_flags); #if defined(_OPENMP) # pragma omp parallel #endif { int i, imin, im, in; float reverse_m = (float)(1.0 / m); #if defined(__AVX512F__) __m512 minus_ones = _mm512_set1_ps(-1.f); #endif #if defined(_OPENMP) const int ltid = omp_get_thread_num(); #else const int ltid = 0; #endif const int work_mn = nBlocks * mBlocks; const int chunksize_mn = (work_mn % threads == 0) ? (work_mn /threads) : ((work_mn / threads) + 1); const int thr_begin_mn = (ltid * chunksize_mn < work_mn) ? (ltid * chunksize_mn) : work_mn; const int thr_end_mn = ((ltid + 1) * chunksize_mn < work_mn) ? ((ltid + 1) * chunksize_mn) : work_mn; const int work_n = nBlocks; const int chunksize_n = (work_n % threads == 0) ? (work_n /threads) : ((work_n / threads) + 1); const int thr_begin_n = (ltid * chunksize_n < work_n) ? (ltid * chunksize_n) : work_n; const int thr_end_n = ((ltid + 1) * chunksize_n < work_n) ? ((ltid + 1) * chunksize_n) : work_n; libxsmm_meltw_reduce_param reduce_rows_params, reduce_cols_params;; libxsmm_meltw_scale_param scale_params; libxsmm_meltw_scale_param scaleout_params; /*libxsmm_barrier_init(barrier, ltid);*/ for (imin = thr_begin_mn; imin < thr_end_mn; imin++) { in = imin / mBlocks; im = imin % mBlocks; reduce_rows_params.in_ptr = &LIBXSMM_VLA_ACCESS(4, X, in, im, 0, 0, mBlocks, bn, bm); reduce_rows_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(3, sums, in, im, 0, mBlocks, bn); reduce_rows_params.out_ptr_1 = &LIBXSMM_VLA_ACCESS(3, sums_sq, in, im, 0, mBlocks, bn); reduce_rows_kernel(&reduce_rows_params); } #pragma omp barrier /*libxsmm_barrier_wait(barrier, ltid);*/ scale_params.scale_vals_ptr = &reverse_m; for (in = thr_begin_n; in < thr_end_n; in++) { reduce_cols_params.in_ptr = &LIBXSMM_VLA_ACCESS(3, sums, in, 0, 0, mBlocks, bn); reduce_cols_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(2, mean, in, 0, bn); reduce_cols_kernel(&reduce_cols_params); scale_params.in_ptr = &LIBXSMM_VLA_ACCESS(2, mean, in, 0, bn); scale_params.out_ptr = &LIBXSMM_VLA_ACCESS(2, mean, in, 0, bn); scale_kernel(&scale_params); reduce_cols_params.in_ptr = &LIBXSMM_VLA_ACCESS(3, sums_sq, in, 0, 0, mBlocks, bn); reduce_cols_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(2, rstd, in, 0, bn); reduce_cols_kernel(&reduce_cols_params); scale_params.in_ptr = &LIBXSMM_VLA_ACCESS(2, rstd, in, 0, bn); scale_params.out_ptr = &LIBXSMM_VLA_ACCESS(2, rstd, in, 0, bn); scale_kernel(&scale_params); } #pragma omp barrier /*libxsmm_barrier_wait(barrier, ltid);*/ /* Calculate rstd and auxiliary bias vectors*/ for (in = thr_begin_n; in < thr_end_n; in++) { float *rstd_ptr = &LIBXSMM_VLA_ACCESS(2, rstd, in, 0, bn); float *mean_ptr = &LIBXSMM_VLA_ACCESS(2, mean, in, 0, bn); float *bias_ptr = &LIBXSMM_VLA_ACCESS(2, aux_bias, in, 0, bn); #if defined(__AVX512F__) for (i = 0; i < bn-15; i+= 16) { __m512 vrstd = _mm512_loadu_ps(rstd_ptr+i); __m512 vmean = _mm512_loadu_ps(mean_ptr+i); vrstd = _mm512_rsqrt14_ps(_mm512_sub_ps(vrstd, _mm512_mul_ps(vmean, vmean))); _mm512_storeu_ps(rstd_ptr+i, vrstd); _mm512_storeu_ps(bias_ptr+i, _mm512_mul_ps(minus_ones, _mm512_mul_ps(vmean, vrstd))); } if (i < bn) { int rem = bn - i; __mmask16 mask = (1 << rem) - 1; __m512 vrstd = _mm512_maskz_loadu_ps(mask, rstd_ptr+i); __m512 vmean = _mm512_maskz_loadu_ps(mask, mean_ptr+i); vrstd = _mm512_maskz_rsqrt14_ps(mask, _mm512_sub_ps(vrstd, _mm512_mul_ps(vmean, vmean))); _mm512_mask_storeu_ps(rstd_ptr+i, mask, vrstd ); _mm512_mask_storeu_ps(bias_ptr+i, mask, _mm512_mul_ps(minus_ones, _mm512_mul_ps(vmean, vrstd))); } #else for (i = 0; i < bn; i++) { rstd_ptr[i] = (float)(1.0 / sqrt(rstd_ptr[i] - mean_ptr[i] * mean_ptr[i])); bias_ptr[i] = -1.f * mean_ptr[i] * mean_ptr[i]; } #endif } #pragma omp barrier /*libxsmm_barrier_wait(barrier, ltid);*/ for (imin = thr_begin_mn; imin < thr_end_mn; imin++) { in = imin / mBlocks; im = imin % mBlocks; scaleout_params.in_ptr = &LIBXSMM_VLA_ACCESS(4, X, in, im, 0, 0, mBlocks, bn, bm); scaleout_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, X, in, im, 0, 0, mBlocks, bn, bm); scaleout_params.scale_vals_ptr = &LIBXSMM_VLA_ACCESS(2, rstd, in, 0, bn); scaleout_params.bias_vals_ptr = &LIBXSMM_VLA_ACCESS(2, aux_bias, in, 0, bn); scaleout_params.scale_vals_ptr2 = &LIBXSMM_VLA_ACCESS(2, gamma, im, 0, bm); scaleout_params.bias_vals_ptr2 = &LIBXSMM_VLA_ACCESS(2, beta, im, 0, bm); scaleout_kernel(&scaleout_params); } #pragma omp barrier /*libxsmm_barrier_wait(barrier, ltid);*/ } libxsmm_free(scratch); } LIBXSMM_INLINE void optimized_blocked_layernorm_bwd(int m, int n, int bm, int bn, float *_dY, float *_X, float *_mean, float *_rstd, float *_gamma, float *_dX, float *_dgamma, float *_dbeta) { int ld = bm, ld_vector = bn; libxsmm_meltw_redu_flags jit_reduce_flags = LIBXSMM_MELTW_FLAG_REDUCE_NONE; libxsmm_meltwfunction_reduce reduce_rows_kernel, reduce_cols_kernel, reduce_cols_kernel2, reduce_cols_kernel3; int nBlocks = n/bn; int mBlocks = m/bm; float *const scratch = (float*)libxsmm_aligned_scratch((2 * n * mBlocks + 2 * m * nBlocks + 2 * n) * sizeof(float), 0/*auto-alignment*/); float *dgamma_aux_ptr = scratch; float *dbeta_aux_ptr = scratch + m * nBlocks; float *ds_aux_ptr = scratch + 2 * m * nBlocks; float *db_aux_ptr = scratch + 2 * m * nBlocks + n * mBlocks; float *db_ptr = scratch + 2 * m * nBlocks + 2 * n * mBlocks; float *ds_ptr = scratch + 2 * m * nBlocks + 2 * n * mBlocks + n; LIBXSMM_VLA_DECL(3, float, ds_aux, ds_aux_ptr, mBlocks, bn); LIBXSMM_VLA_DECL(3, float, db_aux, db_aux_ptr, mBlocks, bn); LIBXSMM_VLA_DECL(3, float, dgamma_aux, dgamma_aux_ptr, nBlocks, bm); LIBXSMM_VLA_DECL(3, float, dbeta_aux, dbeta_aux_ptr, nBlocks, bm); LIBXSMM_VLA_DECL(4, float, dY, _dY, mBlocks, bn, bm); LIBXSMM_VLA_DECL(4, float, X, _X, mBlocks, bn, bm); LIBXSMM_VLA_DECL(4, float, dX, _dX, mBlocks, bn, bm); LIBXSMM_VLA_DECL(2, float, mean, _mean, bn); LIBXSMM_VLA_DECL(2, float, rstd, _rstd, bn); LIBXSMM_VLA_DECL(2, float, gamma, _gamma, bm); LIBXSMM_VLA_DECL(2, float, dgamma, _dgamma, bm); LIBXSMM_VLA_DECL(2, float, dbeta, _dbeta, bm); LIBXSMM_VLA_DECL(2, float, ds, ds_ptr, bn); LIBXSMM_VLA_DECL(2, float, db, db_ptr, bn); #if defined(_OPENMP) int threads = omp_get_max_threads(); /* number of threads */ #else int threads = 1; /* number of threads */ #endif /* Generate JITED kernels for optimized code */ jit_reduce_flags = LIBXSMM_MELTW_FLAG_REDUCE_ROWS | LIBXSMM_MELTW_FLAG_REDUCE_OP_ADD | LIBXSMM_MELTW_FLAG_REDUCE_ELTS; reduce_rows_kernel = libxsmm_dispatch_meltw_reduce(bm, bn, &ld, &ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags); jit_reduce_flags = LIBXSMM_MELTW_FLAG_REDUCE_COLS | LIBXSMM_MELTW_FLAG_REDUCE_OP_ADD | LIBXSMM_MELTW_FLAG_REDUCE_ELTS; reduce_cols_kernel = libxsmm_dispatch_meltw_reduce(bm, bn, &ld, &ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags); reduce_cols_kernel2 = libxsmm_dispatch_meltw_reduce(bm, nBlocks, &ld, &ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags); reduce_cols_kernel3 = libxsmm_dispatch_meltw_reduce(bn, mBlocks, &ld_vector, &ld_vector, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags); #if !defined(_OPENMP) float *const aux = (float*)libxsmm_aligned_scratch((3 * bm * bn) * sizeof(float), 0/*auto-alignment*/); #else float *const aux = (float*)libxsmm_aligned_scratch((3 * bm * bn) * sizeof(float) * omp_get_max_threads(), 0/*auto-alignment*/); # pragma omp parallel #endif { int imin, im, in, ii, jj; float reverse_m = (float)(1.0 / m); #if defined(__AVX512F__) __m512 minus_ones = _mm512_set1_ps(-1.f); __m512 scale = _mm512_set1_ps(reverse_m); #endif #if defined(_OPENMP) const int ltid = omp_get_thread_num(); #else const int ltid = 0; #endif const int work_mn = nBlocks * mBlocks; const int chunksize_mn = (work_mn % threads == 0) ? (work_mn /threads) : ((work_mn / threads) + 1); const int thr_begin_mn = (ltid * chunksize_mn < work_mn) ? (ltid * chunksize_mn) : work_mn; const int thr_end_mn = ((ltid + 1) * chunksize_mn < work_mn) ? ((ltid + 1) * chunksize_mn) : work_mn; const int work_n = nBlocks; const int chunksize_n = (work_n % threads == 0) ? (work_n /threads) : ((work_n / threads) + 1); const int thr_begin_n = (ltid * chunksize_n < work_n) ? (ltid * chunksize_n) : work_n; const int thr_end_n = ((ltid + 1) * chunksize_n < work_n) ? ((ltid + 1) * chunksize_n) : work_n; const int work_m = mBlocks; const int chunksize_m = (work_m % threads == 0) ? (work_m /threads) : ((work_m / threads) + 1); const int thr_begin_m = (ltid * chunksize_m < work_m) ? (ltid * chunksize_m) : work_m; const int thr_end_m = ((ltid + 1) * chunksize_m < work_m) ? ((ltid + 1) * chunksize_m) : work_m; libxsmm_meltw_reduce_param reduce_rows_params, reduce_cols_params;; for (imin = thr_begin_mn; imin < thr_end_mn; imin++) { float *const tmp = aux + bm*bn * (ltid*3 + 0); /* aux block for db */ float *const tmp2 = aux + bm*bn * (ltid*3 + 1); /* aux block for ds */ float *const tmp3 = aux + bm*bn * (ltid*3 + 2); /* aux block for dgamma */ in = imin / mBlocks; im = imin % mBlocks; #if defined(__AVX512F__) /* Prepare blocks for reductions */ for (jj = 0; jj < bn; jj++) { __m512 vrstd = _mm512_set1_ps(LIBXSMM_VLA_ACCESS(2, rstd, in, jj, bn)); __m512 vmean = _mm512_set1_ps(LIBXSMM_VLA_ACCESS(2, mean, in, jj, bn)); __m512 vb = _mm512_mul_ps(vrstd, _mm512_mul_ps(minus_ones, vmean)); for (ii = 0; ii < bm-15; ii+=16) { __m512 vgamma = _mm512_loadu_ps((float*)&LIBXSMM_VLA_ACCESS(2, gamma, im, ii, bm)); __m512 vdY = _mm512_loadu_ps((float*)&LIBXSMM_VLA_ACCESS(4, dY, in, im, jj, ii, mBlocks, bn, bm)); __m512 vX = _mm512_loadu_ps((float*)&LIBXSMM_VLA_ACCESS(4, X, in, im, jj, ii, mBlocks, bn, bm)); __m512 vaux = _mm512_fmadd_ps(vrstd, vX, vb); __m512 vtmp = _mm512_mul_ps(vgamma, vdY); _mm512_storeu_ps((float*)tmp+jj*bm+ii, vtmp); _mm512_storeu_ps((float*)tmp2+jj*bm+ii, _mm512_mul_ps(vtmp, vX)); _mm512_storeu_ps((float*)tmp3+jj*bm+ii, _mm512_mul_ps(vdY, vaux)); } if (ii < bm) { int rem = bm - ii; __mmask16 mask = (1 << rem) - 1; __m512 vgamma = _mm512_maskz_loadu_ps(mask, (float*)&LIBXSMM_VLA_ACCESS(2, gamma, im, ii, bm)); __m512 vdY = _mm512_maskz_loadu_ps(mask, (float*)&LIBXSMM_VLA_ACCESS(4, dY, in, im, jj, ii, mBlocks, bn, bm)); __m512 vX = _mm512_maskz_loadu_ps(mask, (float*)&LIBXSMM_VLA_ACCESS(4, X, in, im, jj, ii, mBlocks, bn, bm)); __m512 vaux = _mm512_fmadd_ps(vrstd, vX, vb); __m512 vtmp = _mm512_mul_ps(vgamma, vdY); _mm512_mask_storeu_ps((float*)tmp+jj*bm+ii, mask, vtmp); _mm512_mask_storeu_ps((float*)tmp2+jj*bm+ii, mask, _mm512_mul_ps(vtmp, vX)); _mm512_mask_storeu_ps((float*)tmp3+jj*bm+ii, mask, _mm512_mul_ps(vdY, vaux)); } } #endif /* Now perform reductions */ reduce_rows_params.in_ptr = tmp; reduce_rows_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(3, db_aux, in, im, 0, mBlocks, bn); reduce_rows_kernel(&reduce_rows_params); reduce_rows_params.in_ptr = tmp2; reduce_rows_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(3, ds_aux, in, im, 0, mBlocks, bn); reduce_rows_kernel(&reduce_rows_params); reduce_cols_params.in_ptr = (float*)&LIBXSMM_VLA_ACCESS(4, dY, in, im, 0, 0, mBlocks, bn, bm); reduce_cols_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(3, dbeta_aux, im, in, 0, nBlocks, bm); reduce_cols_kernel(&reduce_cols_params); reduce_cols_params.in_ptr = tmp3; reduce_cols_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(3, dgamma_aux, im, in, 0, nBlocks, bm); reduce_cols_kernel(&reduce_cols_params); } #pragma omp barrier /* Second level of reductions */ for (in = thr_begin_n; in < thr_end_n; in++) { reduce_cols_params.in_ptr = &LIBXSMM_VLA_ACCESS(3, db_aux, in, 0, 0, mBlocks, bn); reduce_cols_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(2, db, in, 0, bn); reduce_cols_kernel3(&reduce_cols_params); reduce_cols_params.in_ptr = &LIBXSMM_VLA_ACCESS(3, ds_aux, in, 0, 0, mBlocks, bn); reduce_cols_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(2, ds, in, 0, bn); reduce_cols_kernel3(&reduce_cols_params); } for (im = thr_begin_m; im < thr_end_m; im++) { reduce_cols_params.in_ptr = &LIBXSMM_VLA_ACCESS(3, dbeta_aux, im, 0, 0, nBlocks, bm); reduce_cols_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(2, dbeta, im, 0, bm); reduce_cols_kernel2(&reduce_cols_params); reduce_cols_params.in_ptr = &LIBXSMM_VLA_ACCESS(3, dgamma_aux, im, 0, 0, nBlocks, bm); reduce_cols_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(2, dgamma, im, 0, bm); reduce_cols_kernel2(&reduce_cols_params); } #pragma omp barrier /* Calculate auxiliary b/c vectors -- overwritten on db/ds */ for (in = thr_begin_n; in < thr_end_n; in++) { #if defined(__AVX512F__) for (ii = 0; ii < bn-15; ii+=16) { __m512 vmean = _mm512_loadu_ps(&LIBXSMM_VLA_ACCESS(2, mean, in, ii, bn)); __m512 vrstd = _mm512_loadu_ps(&LIBXSMM_VLA_ACCESS(2, rstd, in, ii, bn)); __m512 vdb = _mm512_loadu_ps(&LIBXSMM_VLA_ACCESS(2, db, in, ii, bn)); __m512 vds = _mm512_loadu_ps(&LIBXSMM_VLA_ACCESS(2, ds, in, ii, bn)); __m512 ascale = _mm512_mul_ps(vrstd, scale); __m512 vrstd3 = _mm512_mul_ps(_mm512_mul_ps(vrstd, vrstd), ascale); __m512 vb = _mm512_mul_ps(_mm512_fmsub_ps(vdb, vmean, vds), vrstd3); __m512 vc = _mm512_sub_ps(_mm512_mul_ps(_mm512_mul_ps(minus_ones, vb), vmean), _mm512_mul_ps(vdb, ascale)); _mm512_storeu_ps((float*)&LIBXSMM_VLA_ACCESS(2, db, in, ii, bn), vb); _mm512_storeu_ps((float*)&LIBXSMM_VLA_ACCESS(2, ds, in, ii, bn), vc); } if (ii < bn) { int rem = bn - ii; __mmask16 mask = (1 << rem) - 1; __m512 vmean = _mm512_maskz_loadu_ps(mask, &LIBXSMM_VLA_ACCESS(2, mean, in, ii, bn)); __m512 vrstd = _mm512_maskz_loadu_ps(mask, &LIBXSMM_VLA_ACCESS(2, rstd, in, ii, bn)); __m512 vdb = _mm512_maskz_loadu_ps(mask, &LIBXSMM_VLA_ACCESS(2, db, in, ii, bn)); __m512 vds = _mm512_maskz_loadu_ps(mask, &LIBXSMM_VLA_ACCESS(2, ds, in, ii, bn)); __m512 ascale = _mm512_mul_ps(vrstd, scale); __m512 vrstd3 = _mm512_mul_ps(_mm512_mul_ps(vrstd, vrstd), ascale); __m512 vb = _mm512_mul_ps(_mm512_fmsub_ps(vdb, vmean, vds), vrstd3); __m512 vc = _mm512_sub_ps(_mm512_mul_ps(_mm512_mul_ps(minus_ones, vb), vmean), _mm512_mul_ps(vdb, ascale)); _mm512_mask_storeu_ps((float*)&LIBXSMM_VLA_ACCESS(2, db, in, ii, bn), mask, vb); _mm512_mask_storeu_ps((float*)&LIBXSMM_VLA_ACCESS(2, ds, in, ii, bn), mask, vc); } #endif } #pragma omp barrier /* Final computation of dX */ for (imin = thr_begin_mn; imin < thr_end_mn; imin++) { in = imin / mBlocks; im = imin % mBlocks; #if defined(__AVX512F__) for (jj = 0; jj < bn; jj++) { __m512 va = _mm512_set1_ps(LIBXSMM_VLA_ACCESS(2, rstd, in, jj, bn)); __m512 vb = _mm512_set1_ps(LIBXSMM_VLA_ACCESS(2, db, in, jj, bn)); __m512 vc = _mm512_set1_ps(LIBXSMM_VLA_ACCESS(2, ds, in, jj, bn)); for (ii = 0; ii < bm-15; ii+=16) { __m512 vgamma = _mm512_loadu_ps((float*)&LIBXSMM_VLA_ACCESS(2, gamma, im, ii, bm)); __m512 vdY = _mm512_loadu_ps((float*)&LIBXSMM_VLA_ACCESS(4, dY, in, im, jj, ii, mBlocks, bn, bm)); __m512 vX = _mm512_loadu_ps((float*)&LIBXSMM_VLA_ACCESS(4, X, in, im, jj, ii, mBlocks, bn, bm)); __m512 vaux1 = _mm512_fmadd_ps(vb, vX, vc); __m512 vaux2 = _mm512_mul_ps(va, _mm512_mul_ps(vdY, vgamma)); _mm512_storeu_ps((float*)&LIBXSMM_VLA_ACCESS(4, dX, in, im, jj, ii, mBlocks, bn, bm), _mm512_add_ps(vaux1, vaux2)); } if (ii < bm) { int rem = bm - ii; __mmask16 mask = (1 << rem) - 1; __m512 vgamma = _mm512_maskz_loadu_ps(mask, (float*)&LIBXSMM_VLA_ACCESS(2, gamma, im, ii, bm)); __m512 vdY = _mm512_maskz_loadu_ps(mask, (float*)&LIBXSMM_VLA_ACCESS(4, dY, in, im, jj, ii, mBlocks, bn, bm)); __m512 vX = _mm512_maskz_loadu_ps(mask, (float*)&LIBXSMM_VLA_ACCESS(4, X, in, im, jj, ii, mBlocks, bn, bm)); __m512 vaux1 = _mm512_fmadd_ps(vb, vX, vc); __m512 vaux2 = _mm512_mul_ps(va, _mm512_mul_ps(vdY, vgamma)); _mm512_mask_storeu_ps((float*)&LIBXSMM_VLA_ACCESS(4, dX, in, im, jj, ii, mBlocks, bn, bm), mask, _mm512_add_ps(vaux1, vaux2)); } } #endif } #pragma omp barrier } libxsmm_free(scratch); libxsmm_free(aux); } int main(int argc, char* argv[]) { unsigned int m = 64, n = 64, iters = 10000, k = 0; libxsmm_blasint ld_in = 64, ld_vector = 64, block_size = 64; float *sinp, *gamma, *beta, *sout, *sout_nc, *mean_data, *rstd_data, *sout_ref, *mean_data_ref, *rstd_data_ref, *bias_aux; float *dY_ref, *X_ref, *mean_ref, *rstd_ref, *gamma_ref, *dX_ref, *dgamma_ref, *dbeta_ref; float *dY_bwd, *X_bwd, *dX_bwd, *dgamma_bwd, *dbeta_bwd, *dX_bwd_nc; libxsmm_matdiff_info norms_out, norms_mean, norms_rstd, norms_dx, norms_dbeta, norms_dgamma; unsigned long long l_start, l_end; double l_total = 0, l_total2 = 0; libxsmm_meltw_redu_flags jit_reduce_flags = LIBXSMM_MELTW_FLAG_REDUCE_NONE; libxsmm_meltwfunction_reduce reduce_kernel; libxsmm_meltw_scal_flags jit_scalemean_flags = 0; libxsmm_meltwfunction_scale scalemean_kernel; libxsmm_meltw_scal_flags jit_scaleout_flags = 0; libxsmm_meltwfunction_scale scaleout_kernel; libxsmm_init(); libxsmm_matdiff_clear(&norms_out); libxsmm_matdiff_clear(&norms_mean); libxsmm_matdiff_clear(&norms_rstd); libxsmm_matdiff_clear(&norms_dx); libxsmm_matdiff_clear(&norms_dbeta); libxsmm_matdiff_clear(&norms_dgamma); if ( argc > 1 ) m = atoi(argv[1]); if ( argc > 2 ) n = atoi(argv[2]); if ( argc > 3 ) iters = atoi(argv[3]); if ( argc > 4 ) block_size = atoi(argv[4]); libxsmm_init(); ld_in = m; n = LIBXSMM_MAX(n,1); ld_vector = n; ld_in = LIBXSMM_MAX(ld_in,(libxsmm_blasint)m); /* Allocate arrays */ sinp = (float*) malloc(ld_in*n*sizeof(float)); gamma = (float*) malloc(m*sizeof(float) ); beta = (float*) malloc(m*sizeof(float) ); sout = (float*) malloc(ld_in*n*sizeof(float) ); sout_nc = (float*) malloc(ld_in*n*sizeof(float) ); mean_data = (float*) malloc(n*sizeof(float) ); rstd_data = (float*) malloc(n*sizeof(float) ); dY_ref = (float*) malloc(m*n*sizeof(float)); dY_bwd = (float*) malloc(m*n*sizeof(float)); X_ref = (float*) malloc(m*n*sizeof(float)); X_bwd = (float*) malloc(m*n*sizeof(float)); mean_ref = (float*) malloc(n*sizeof(float)); rstd_ref = (float*) malloc(n*sizeof(float)); gamma_ref = (float*) malloc(m*sizeof(float)); dX_ref = (float*) malloc(m*n*sizeof(float)); dX_bwd = (float*) malloc(m*n*sizeof(float)); dX_bwd_nc = (float*) malloc(m*n*sizeof(float)); dgamma_ref= (float*) malloc(m*sizeof(float)); dgamma_bwd= (float*) malloc(m*sizeof(float)); dbeta_ref = (float*) malloc(m*sizeof(float)); dbeta_bwd = (float*) malloc(m*sizeof(float)); /* Allocate reference arrays */ mean_data_ref = (float*) malloc(n*sizeof(float) ); rstd_data_ref = (float*) malloc(n*sizeof(float) ); sout_ref = (float*) malloc(ld_in*n*sizeof(float) ); /* Allocate auxiliary arrays for optimized version */ bias_aux = (float*) malloc(n*sizeof(float) ); /* Fill matrices with random data */ sfill_matrix ( sinp, ld_in, m, n ); sfill_matrix ( gamma, ld_in, m, 1 ); sfill_matrix ( beta, ld_in, m, 1 ); sfill_matrix ( dY_ref, ld_in, m, n ); matrix_copy_NC_to_NCNC( dY_ref, dY_bwd, 1, n, m, block_size, block_size ); sfill_matrix ( X_ref, ld_in, m, n ); matrix_copy_NC_to_NCNC( X_ref, X_bwd, 1, n, m, block_size, block_size ); sfill_matrix ( mean_ref, n, n, 1 ); sfill_matrix ( rstd_ref, n, n, 1 ); sfill_matrix ( gamma_ref, m, m, 1 ); /* Calculate reference results... */ naive_layernorm(m, n, ld_in, sinp, gamma, beta, sout_ref, mean_data_ref, rstd_data_ref); naive_layernorm_bwd(m, n, ld_in, dY_ref, X_ref, mean_ref, rstd_ref, gamma_ref, dX_ref, dgamma_ref, dbeta_ref); #if 0 /* Generate JITED kernels for optimized code */ jit_reduce_flags = LIBXSMM_MELTW_FLAG_REDUCE_ROWS | LIBXSMM_MELTW_FLAG_REDUCE_OP_ADD | LIBXSMM_MELTW_FLAG_REDUCE_ELTS | LIBXSMM_MELTW_FLAG_REDUCE_ELTS_SQUARED; printf("JITing reduce kernel... \n"); reduce_kernel = libxsmm_dispatch_meltw_reduce(m, n, &ld_in, &ld_in, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags); jit_scalemean_flags = LIBXSMM_MELTW_FLAG_SCALE_ROWS | LIBXSMM_MELTW_FLAG_SCALE_MULT; printf("JITing mean-scale kernel... \n"); scalemean_kernel = libxsmm_dispatch_meltw_scale(n, 1, &ld_vector, &ld_vector, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_scalemean_flags); jit_scaleout_flags = LIBXSMM_MELTW_FLAG_SCALE_ROWS_COLS | LIBXSMM_MELTW_FLAG_SCALE_MULT | LIBXSMM_MELTW_FLAG_SCALE_ADD_BIAS; printf("JITing scaling kernel for output... \n"); scaleout_kernel = libxsmm_dispatch_meltw_scale(m, n, &ld_in, &ld_in, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_scaleout_flags); #endif /* Calculate blocked results... */ #if 0 optimized_layernorm(m, n, ld_in, sinp, gamma, beta, sout, mean_data, rstd_data, reduce_kernel, scalemean_kernel, scaleout_kernel, bias_aux); #else matrix_copy_NC_to_NCNC( sinp, sout, 1, n, m, block_size, block_size ); optimized_blocked_layernorm(m, n, block_size, block_size, sout, gamma, beta, mean_data, rstd_data); matrix_copy_NCNC_to_NC( sout, sout_nc, 1, n, m, block_size, block_size ); optimized_blocked_layernorm_bwd(m, n, block_size, block_size, dY_bwd, X_bwd, mean_ref, rstd_ref, gamma_ref, dX_bwd, dgamma_bwd, dbeta_bwd); matrix_copy_NCNC_to_NC( dX_bwd, dX_bwd_nc, 1, n, m, block_size, block_size ); #endif /* compare */ printf("##########################################\n"); printf("# Correctness FWD - Output #\n"); printf("##########################################\n"); #if 0 libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, ld_in*n, 1, sout_ref, sout, 0, 0); #else libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, ld_in*n, 1, sout_ref, sout_nc, 0, 0); #endif printf("L1 reference : %.25g\n", norms_out.l1_ref); printf("L1 test : %.25g\n", norms_out.l1_tst); printf("L2 abs.error : %.24f\n", norms_out.l2_abs); printf("L2 rel.error : %.24f\n", norms_out.l2_rel); printf("Linf abs.error: %.24f\n", norms_out.linf_abs); printf("Linf rel.error: %.24f\n", norms_out.linf_rel); printf("Check-norm : %.24f\n\n", norms_out.normf_rel); /* compare */ printf("##########################################\n"); printf("# Correctness FWD - Mean #\n"); printf("##########################################\n"); libxsmm_matdiff(&norms_mean, LIBXSMM_DATATYPE_F32, n, 1, mean_data_ref, mean_data, 0, 0); printf("L1 reference : %.25g\n", norms_mean.l1_ref); printf("L1 test : %.25g\n", norms_mean.l1_tst); printf("L2 abs.error : %.24f\n", norms_mean.l2_abs); printf("L2 rel.error : %.24f\n", norms_mean.l2_rel); printf("Linf abs.error: %.24f\n", norms_mean.linf_abs); printf("Linf rel.error: %.24f\n", norms_mean.linf_rel); printf("Check-norm : %.24f\n\n", norms_mean.normf_rel); /* compare */ printf("##########################################\n"); printf("# Correctness FWD - Rstd #\n"); printf("##########################################\n"); libxsmm_matdiff(&norms_rstd, LIBXSMM_DATATYPE_F32, n, 1, rstd_data_ref, rstd_data, 0, 0); printf("L1 reference : %.25g\n", norms_rstd.l1_ref); printf("L1 test : %.25g\n", norms_rstd.l1_tst); printf("L2 abs.error : %.24f\n", norms_rstd.l2_abs); printf("L2 rel.error : %.24f\n", norms_rstd.l2_rel); printf("Linf abs.error: %.24f\n", norms_rstd.linf_abs); printf("Linf rel.error: %.24f\n", norms_rstd.linf_rel); printf("Check-norm : %.24f\n\n", norms_rstd.normf_rel); /* compare */ printf("##########################################\n"); printf("# Correctness BWD - dX #\n"); printf("##########################################\n"); libxsmm_matdiff(&norms_dx, LIBXSMM_DATATYPE_F32, ld_in*n, 1, dX_ref, dX_bwd_nc, 0, 0); printf("L1 reference : %.25g\n", norms_dx.l1_ref); printf("L1 test : %.25g\n", norms_dx.l1_tst); printf("L2 abs.error : %.24f\n", norms_dx.l2_abs); printf("L2 rel.error : %.24f\n", norms_dx.l2_rel); printf("Linf abs.error: %.24f\n", norms_dx.linf_abs); printf("Linf rel.error: %.24f\n", norms_dx.linf_rel); printf("Check-norm : %.24f\n\n", norms_dx.normf_rel); /* compare */ printf("##########################################\n"); printf("# Correctness BWD - dbeta #\n"); printf("##########################################\n"); libxsmm_matdiff(&norms_dbeta, LIBXSMM_DATATYPE_F32, m, 1, dbeta_ref, dbeta_bwd, 0, 0); printf("L1 reference : %.25g\n", norms_dbeta.l1_ref); printf("L1 test : %.25g\n", norms_dbeta.l1_tst); printf("L2 abs.error : %.24f\n", norms_dbeta.l2_abs); printf("L2 rel.error : %.24f\n", norms_dbeta.l2_rel); printf("Linf abs.error: %.24f\n", norms_dbeta.linf_abs); printf("Linf rel.error: %.24f\n", norms_dbeta.linf_rel); printf("Check-norm : %.24f\n\n", norms_dbeta.normf_rel); /* compare */ printf("##########################################\n"); printf("# Correctness BWD - dgamma #\n"); printf("##########################################\n"); libxsmm_matdiff(&norms_dgamma, LIBXSMM_DATATYPE_F32, m, 1, dgamma_ref, dgamma_bwd, 0, 0); printf("L1 reference : %.25g\n", norms_dgamma.l1_ref); printf("L1 test : %.25g\n", norms_dgamma.l1_tst); printf("L2 abs.error : %.24f\n", norms_dgamma.l2_abs); printf("L2 rel.error : %.24f\n", norms_dgamma.l2_rel); printf("Linf abs.error: %.24f\n", norms_dgamma.linf_abs); printf("Linf rel.error: %.24f\n", norms_dgamma.linf_rel); printf("Check-norm : %.24f\n\n", norms_dgamma.normf_rel); l_start = libxsmm_timer_tick(); /* Calculate reference results... */ for (k = 0; k < iters; k++) { naive_layernorm(m, n, ld_in, sinp, gamma, beta, sout_ref, mean_data_ref, rstd_data_ref); } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); printf("Reference fwd time = %.5g\n", ((double)(l_total))); l_start = libxsmm_timer_tick(); for (k = 0; k < iters; k++) { #if 1 optimized_blocked_layernorm(m, n, block_size, block_size, sout, gamma, beta, mean_data, rstd_data); #else optimized_layernorm(m, n, ld_in, sinp, gamma, beta, sout, mean_data, rstd_data, reduce_kernel, scalemean_kernel, scaleout_kernel, bias_aux); #endif } l_end = libxsmm_timer_tick(); l_total2 = libxsmm_timer_duration(l_start, l_end); printf("Optimized fwd time = %.5g\n", ((double)(l_total2))); printf("Speedup fwd is = %.5g\n", ((double)(l_total/l_total2))); l_start = libxsmm_timer_tick(); /* Calculate reference results... */ for (k = 0; k < iters; k++) { naive_layernorm_bwd(m, n, ld_in, dY_ref, X_ref, mean_ref, rstd_ref, gamma_ref, dX_ref, dgamma_ref, dbeta_ref); } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); printf("Reference bwd time = %.5g\n", ((double)(l_total))); l_start = libxsmm_timer_tick(); for (k = 0; k < iters; k++) { optimized_blocked_layernorm_bwd(m, n, block_size, block_size, dY_bwd, X_bwd, mean_ref, rstd_ref, gamma_ref, dX_bwd, dgamma_bwd, dbeta_bwd); } l_end = libxsmm_timer_tick(); l_total2 = libxsmm_timer_duration(l_start, l_end); printf("Optimized bwd time = %.5g\n", ((double)(l_total2))); printf("Speedup bwd is = %.5g\n", ((double)(l_total/l_total2))); /* Free allocated arrays */ free(sinp); free(gamma); free(beta); free(sout); free(mean_data); free(rstd_data); free(mean_data_ref); free(rstd_data_ref); free(sout_ref); free(bias_aux); free(dY_ref); free(X_ref); free(mean_ref); free(rstd_ref); free(gamma_ref); free(dX_ref); free(dgamma_ref); free(dbeta_ref); free(dY_bwd); free(X_bwd); free(dX_bwd); free(dgamma_bwd); free(dbeta_bwd); free(dX_bwd_nc); return EXIT_SUCCESS; }
rumi-64-128-18r.c
/* * Date: 11 December 2015 * Contact: Thomas Peyrin - thomas.peyrin@gmail.com */ /* * Simulation of boomerang analysis for Skinny * Date: March 21, 2020 * Author: Hosein Hadipour * Contact: hsn.hadipour@gmail.com */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <omp.h> #include <stdint.h> #include <stdbool.h> #include <string.h> // using namespace std; typedef unsigned long long int UINT64; // #define DEBUG 1 #define Nthreads 1 #define STEP ((1 << 10) - 1) #define PROGRAMNUMBER 1 // Table that encodes the parameters of the various Skinny versions: // (block size, key size, number of rounds) //Skinny-64-64: 32 rounds //Skinny-64-128: 36 rounds //Skinny-64-192: 40 rounds //Skinny-128-128: 40 rounds //Skinny-128-256: 48 rounds //Skinny-128-384: 56 rounds int versions[6][3] = {{64, 64, 32}, {64, 128, 36}, {64, 192, 40}, {128, 128, 40}, {128, 256, 48}, {128, 384, 56}}; // Packing of data is done as follows (state[i][j] stands for row i and column j): // 0 1 2 3 // 4 5 6 7 // 8 9 10 11 //12 13 14 15 // 4-bit Sbox const unsigned char sbox_4[16] = {12, 6, 9, 0, 1, 10, 2, 11, 3, 8, 5, 13, 4, 14, 7, 15}; const unsigned char sbox_4_inv[16] = {3, 4, 6, 8, 12, 10, 1, 14, 9, 2, 5, 7, 0, 11, 13, 15}; // 8-bit Sbox const unsigned char sbox_8[256] = {0x65, 0x4c, 0x6a, 0x42, 0x4b, 0x63, 0x43, 0x6b, 0x55, 0x75, 0x5a, 0x7a, 0x53, 0x73, 0x5b, 0x7b, 0x35, 0x8c, 0x3a, 0x81, 0x89, 0x33, 0x80, 0x3b, 0x95, 0x25, 0x98, 0x2a, 0x90, 0x23, 0x99, 0x2b, 0xe5, 0xcc, 0xe8, 0xc1, 0xc9, 0xe0, 0xc0, 0xe9, 0xd5, 0xf5, 0xd8, 0xf8, 0xd0, 0xf0, 0xd9, 0xf9, 0xa5, 0x1c, 0xa8, 0x12, 0x1b, 0xa0, 0x13, 0xa9, 0x05, 0xb5, 0x0a, 0xb8, 0x03, 0xb0, 0x0b, 0xb9, 0x32, 0x88, 0x3c, 0x85, 0x8d, 0x34, 0x84, 0x3d, 0x91, 0x22, 0x9c, 0x2c, 0x94, 0x24, 0x9d, 0x2d, 0x62, 0x4a, 0x6c, 0x45, 0x4d, 0x64, 0x44, 0x6d, 0x52, 0x72, 0x5c, 0x7c, 0x54, 0x74, 0x5d, 0x7d, 0xa1, 0x1a, 0xac, 0x15, 0x1d, 0xa4, 0x14, 0xad, 0x02, 0xb1, 0x0c, 0xbc, 0x04, 0xb4, 0x0d, 0xbd, 0xe1, 0xc8, 0xec, 0xc5, 0xcd, 0xe4, 0xc4, 0xed, 0xd1, 0xf1, 0xdc, 0xfc, 0xd4, 0xf4, 0xdd, 0xfd, 0x36, 0x8e, 0x38, 0x82, 0x8b, 0x30, 0x83, 0x39, 0x96, 0x26, 0x9a, 0x28, 0x93, 0x20, 0x9b, 0x29, 0x66, 0x4e, 0x68, 0x41, 0x49, 0x60, 0x40, 0x69, 0x56, 0x76, 0x58, 0x78, 0x50, 0x70, 0x59, 0x79, 0xa6, 0x1e, 0xaa, 0x11, 0x19, 0xa3, 0x10, 0xab, 0x06, 0xb6, 0x08, 0xba, 0x00, 0xb3, 0x09, 0xbb, 0xe6, 0xce, 0xea, 0xc2, 0xcb, 0xe3, 0xc3, 0xeb, 0xd6, 0xf6, 0xda, 0xfa, 0xd3, 0xf3, 0xdb, 0xfb, 0x31, 0x8a, 0x3e, 0x86, 0x8f, 0x37, 0x87, 0x3f, 0x92, 0x21, 0x9e, 0x2e, 0x97, 0x27, 0x9f, 0x2f, 0x61, 0x48, 0x6e, 0x46, 0x4f, 0x67, 0x47, 0x6f, 0x51, 0x71, 0x5e, 0x7e, 0x57, 0x77, 0x5f, 0x7f, 0xa2, 0x18, 0xae, 0x16, 0x1f, 0xa7, 0x17, 0xaf, 0x01, 0xb2, 0x0e, 0xbe, 0x07, 0xb7, 0x0f, 0xbf, 0xe2, 0xca, 0xee, 0xc6, 0xcf, 0xe7, 0xc7, 0xef, 0xd2, 0xf2, 0xde, 0xfe, 0xd7, 0xf7, 0xdf, 0xff}; const unsigned char sbox_8_inv[256] = {0xac, 0xe8, 0x68, 0x3c, 0x6c, 0x38, 0xa8, 0xec, 0xaa, 0xae, 0x3a, 0x3e, 0x6a, 0x6e, 0xea, 0xee, 0xa6, 0xa3, 0x33, 0x36, 0x66, 0x63, 0xe3, 0xe6, 0xe1, 0xa4, 0x61, 0x34, 0x31, 0x64, 0xa1, 0xe4, 0x8d, 0xc9, 0x49, 0x1d, 0x4d, 0x19, 0x89, 0xcd, 0x8b, 0x8f, 0x1b, 0x1f, 0x4b, 0x4f, 0xcb, 0xcf, 0x85, 0xc0, 0x40, 0x15, 0x45, 0x10, 0x80, 0xc5, 0x82, 0x87, 0x12, 0x17, 0x42, 0x47, 0xc2, 0xc7, 0x96, 0x93, 0x03, 0x06, 0x56, 0x53, 0xd3, 0xd6, 0xd1, 0x94, 0x51, 0x04, 0x01, 0x54, 0x91, 0xd4, 0x9c, 0xd8, 0x58, 0x0c, 0x5c, 0x08, 0x98, 0xdc, 0x9a, 0x9e, 0x0a, 0x0e, 0x5a, 0x5e, 0xda, 0xde, 0x95, 0xd0, 0x50, 0x05, 0x55, 0x00, 0x90, 0xd5, 0x92, 0x97, 0x02, 0x07, 0x52, 0x57, 0xd2, 0xd7, 0x9d, 0xd9, 0x59, 0x0d, 0x5d, 0x09, 0x99, 0xdd, 0x9b, 0x9f, 0x0b, 0x0f, 0x5b, 0x5f, 0xdb, 0xdf, 0x16, 0x13, 0x83, 0x86, 0x46, 0x43, 0xc3, 0xc6, 0x41, 0x14, 0xc1, 0x84, 0x11, 0x44, 0x81, 0xc4, 0x1c, 0x48, 0xc8, 0x8c, 0x4c, 0x18, 0x88, 0xcc, 0x1a, 0x1e, 0x8a, 0x8e, 0x4a, 0x4e, 0xca, 0xce, 0x35, 0x60, 0xe0, 0xa5, 0x65, 0x30, 0xa0, 0xe5, 0x32, 0x37, 0xa2, 0xa7, 0x62, 0x67, 0xe2, 0xe7, 0x3d, 0x69, 0xe9, 0xad, 0x6d, 0x39, 0xa9, 0xed, 0x3b, 0x3f, 0xab, 0xaf, 0x6b, 0x6f, 0xeb, 0xef, 0x26, 0x23, 0xb3, 0xb6, 0x76, 0x73, 0xf3, 0xf6, 0x71, 0x24, 0xf1, 0xb4, 0x21, 0x74, 0xb1, 0xf4, 0x2c, 0x78, 0xf8, 0xbc, 0x7c, 0x28, 0xb8, 0xfc, 0x2a, 0x2e, 0xba, 0xbe, 0x7a, 0x7e, 0xfa, 0xfe, 0x25, 0x70, 0xf0, 0xb5, 0x75, 0x20, 0xb0, 0xf5, 0x22, 0x27, 0xb2, 0xb7, 0x72, 0x77, 0xf2, 0xf7, 0x2d, 0x79, 0xf9, 0xbd, 0x7d, 0x29, 0xb9, 0xfd, 0x2b, 0x2f, 0xbb, 0xbf, 0x7b, 0x7f, 0xfb, 0xff}; // ShiftAndSwitchRows permutation const unsigned char P[16] = {0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12}; const unsigned char P_inv[16] = {0, 1, 2, 3, 5, 6, 7, 4, 10, 11, 8, 9, 15, 12, 13, 14}; // Tweakey permutation const unsigned char TWEAKEY_P[16] = {9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7}; const unsigned char TWEAKEY_P_inv[16] = {8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1}; // round constants const unsigned char RC[62] = { 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E, 0x1C, 0x38, 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A, 0x34, 0x29, 0x12, 0x24, 0x08, 0x11, 0x22, 0x04, 0x09, 0x13, 0x26, 0x0c, 0x19, 0x32, 0x25, 0x0a, 0x15, 0x2a, 0x14, 0x28, 0x10, 0x20}; FILE *fic; void string_state(unsigned char state[16], int ver) { for (int i = 0; i < (versions[ver][0] >> 3); i++) { printf("%02x", state[i]); } } void string_tweak(unsigned char state[16], int ver) { for (int i = 0; i < (versions[ver][1] >> 3); i++) { printf("%02x", state[i]); } } void display_matrix(unsigned char state[4][4], int ver) { int i; unsigned char input[16]; if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); for (i = 0; i < 8; i++) fprintf(fic, "%02x", input[i]); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; for (i = 0; i < 16; i++) fprintf(fic, "%02x", input[i]); } } void display_cipher_state(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int k; fprintf(fic, "S = "); display_matrix(state, ver); for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { fprintf(fic, " - TK%i = ", k + 1); display_matrix(keyCells[k], ver); } } // Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state void AddKey(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int i, j, k; unsigned char pos; unsigned char keyCells_tmp[3][4][4]; // apply the subtweakey to the internal state for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { state[i][j] ^= keyCells[0][i][j]; if (2 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j]; else if (3 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j]; } } // update the subtweakey states with the permutation for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the TWEAKEY permutation pos = TWEAKEY_P[j + 4 * i]; keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3]; } } } // update the subtweakey states with the LFSRs for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { //application of LFSRs for TK updates if (k == 1) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01); } else if (k == 2) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j]) & 0x8) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80); } } } } for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { keyCells[k][i][j] = keyCells_tmp[k][i][j]; } } } } // Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state (inverse function} void AddKey_inv(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int i, j, k; unsigned char pos; unsigned char keyCells_tmp[3][4][4]; // update the subtweakey states with the permutation for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the inverse TWEAKEY permutation pos = TWEAKEY_P_inv[j + 4 * i]; keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3]; } } } // update the subtweakey states with the LFSRs for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 2; i <= 3; i++) { for (j = 0; j < 4; j++) { //application of inverse LFSRs for TK updates if (k == 1) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8) ^ ((keyCells_tmp[k][i][j]) & 0x8); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80); } else if (k == 2) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01); } } } } for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { keyCells[k][i][j] = keyCells_tmp[k][i][j]; } } } // apply the subtweakey to the internal state for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { state[i][j] ^= keyCells[0][i][j]; if (2 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j]; else if (3 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j]; } } } // Apply the constants: using a LFSR counter on 6 bits, we XOR the 6 bits to the first 6 bits of the internal state void AddConstants(unsigned char state[4][4], int r) { state[0][0] ^= (RC[r] & 0xf); state[1][0] ^= ((RC[r] >> 4) & 0x3); state[2][0] ^= 0x2; } // apply the 4-bit Sbox void SubCell4(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_4[state[i][j]]; } // apply the 4-bit inverse Sbox void SubCell4_inv(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_4_inv[state[i][j]]; } // apply the 8-bit Sbox void SubCell8(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_8[state[i][j]]; } // apply the 8-bit inverse Sbox void SubCell8_inv(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_8_inv[state[i][j]]; } // Apply the ShiftRows function void ShiftRows(unsigned char state[4][4]) { int i, j, pos; unsigned char state_tmp[4][4]; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the ShiftRows permutation pos = P[j + 4 * i]; state_tmp[i][j] = state[pos >> 2][pos & 0x3]; } } for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { state[i][j] = state_tmp[i][j]; } } } // Apply the inverse ShiftRows function void ShiftRows_inv(unsigned char state[4][4]) { int i, j, pos; unsigned char state_tmp[4][4]; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the inverse ShiftRows permutation pos = P_inv[j + 4 * i]; state_tmp[i][j] = state[pos >> 2][pos & 0x3]; } } for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { state[i][j] = state_tmp[i][j]; } } } // Apply the linear diffusion matrix //M = //1 0 1 1 //1 0 0 0 //0 1 1 0 //1 0 1 0 void MixColumn(unsigned char state[4][4]) { int j; unsigned char temp; for (j = 0; j < 4; j++) { state[1][j] ^= state[2][j]; state[2][j] ^= state[0][j]; state[3][j] ^= state[2][j]; temp = state[3][j]; state[3][j] = state[2][j]; state[2][j] = state[1][j]; state[1][j] = state[0][j]; state[0][j] = temp; } } // Apply the inverse linear diffusion matrix void MixColumn_inv(unsigned char state[4][4]) { int j; unsigned char temp; for (j = 0; j < 4; j++) { temp = state[3][j]; state[3][j] = state[0][j]; state[0][j] = state[1][j]; state[1][j] = state[2][j]; state[2][j] = temp; state[3][j] ^= state[2][j]; state[2][j] ^= state[0][j]; state[1][j] ^= state[2][j]; } } // decryption function of Skinny void dec(unsigned char *input, const unsigned char *userkey, int ver, int r) { unsigned char state[4][4]; unsigned char dummy[4][4] = {{0}}; unsigned char keyCells[3][4][4]; int i; memset(keyCells, 0, 48); for (i = 0; i < 16; i++) { if (versions[ver][0] == 64) { if (i & 1) { state[i >> 2][i & 0x3] = input[i >> 1] & 0xF; keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF; } else { state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF; keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF; } } else if (versions[ver][0] == 128) { state[i >> 2][i & 0x3] = input[i] & 0xFF; keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF; if (versions[ver][1] >= 256) keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF; if (versions[ver][1] >= 384) keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF; } } for (i = r - 1; i >= 0; i--) { AddKey(dummy, keyCells, ver); } #ifdef DEBUG fprintf(fic, "DEC - initial state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif for (i = r - 1; i >= 0; i--) { MixColumn_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after MixColumn_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif ShiftRows_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after ShiftRows_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddKey_inv(state, keyCells, ver); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after AddKey_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddConstants(state, i); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after AddConstants_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) SubCell4_inv(state); else SubCell8_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after SubCell_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif } #ifdef DEBUG fprintf(fic, "DEC - final state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; } } // encryption function of Skinny void enc(unsigned char *input, const unsigned char *userkey, int ver, int r) { unsigned char state[4][4]; unsigned char keyCells[3][4][4]; int i; memset(keyCells, 0, 48); for (i = 0; i < 16; i++) { if (versions[ver][0] == 64) { if (i & 1) { state[i >> 2][i & 0x3] = input[i >> 1] & 0xF; keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF; } else { state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF; keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF; } } else if (versions[ver][0] == 128) { state[i >> 2][i & 0x3] = input[i] & 0xFF; keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF; if (versions[ver][1] >= 256) keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF; if (versions[ver][1] >= 384) keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF; } } #ifdef DEBUG fprintf(fic, "ENC - initial state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif for (i = 0; i < r; i++) { if (versions[ver][0] == 64) SubCell4(state); else SubCell8(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after SubCell: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddConstants(state, i); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after AddConstants: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddKey(state, keyCells, ver); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after AddKey: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif ShiftRows(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after ShiftRows: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif MixColumn(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after MixColumn: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif } //The last subtweakey should not be added #ifdef DEBUG fprintf(fic, "ENC - final state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; } } // generate test vectors for all the versions of Skinny void TestVectors(int ver) { unsigned char p[16]; unsigned char c[16]; unsigned char k[48]; int n; for (n = 1; n < 10; n++) { int i; for (i = 0; i < (versions[ver][0] >> 3); i++) c[i] = p[i] = rand() & 0xff; for (i = 0; i < (versions[ver][0] >> 3); i++) printf("%02x", p[i]); printf("\n"); for (i = 0; i < (versions[ver][1] >> 3); i++) k[i] = rand() & 0xff; fprintf(fic, "TK = "); for (i = 0; i < (versions[ver][1] >> 3); i++) fprintf(fic, "%02x", k[i]); fprintf(fic, "\n"); fprintf(fic, "P = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", p[i]); fprintf(fic, "\n"); enc(c, k, ver, 10); fprintf(fic, "C = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", c[i]); fprintf(fic, "\n"); dec(c, k, ver, 10); fprintf(fic, "P' = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", c[i]); fprintf(fic, "\n\n"); } } int boomerang(int r, int ver, unsigned long long N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2) { int i; unsigned char p1[16], p2[16]; unsigned char p1_old[16], p2_old[16]; unsigned char c3_old[16], c4_old[16]; unsigned char c3[16], c4[16]; unsigned char k1[48], k2[48], k3[48], k4[48]; // randomly choose k1 for (i = 0; i < (versions[ver][1] >> 3); i++) k1[i] = rand() & 0xff; // derive k2 for (i = 0; i < (versions[ver][1] >> 3); i++) k2[i] = k1[i] ^ dk1[i]; // derive k3 for (i = 0; i < (versions[ver][1] >> 3); i++) k3[i] = k1[i] ^ dk2[i]; // derive k4 for (i = 0; i < (versions[ver][1] >> 3); i++) k4[i] = k2[i] ^ dk2[i]; int num = 0; for (UINT64 t = 0; t < N3; t++) { // randomly choose p1 for (i = 0; i < (versions[ver][0] >> 3); i++) { p1[i] = rand() & 0xff; p1_old[i] = p1[i]; } // derive p2 for (i = 0; i < (versions[ver][0] >> 3); i++) { p2[i] = p1[i] ^ dp[i]; p2_old[i] = p2[i]; } enc(p1, k1, ver, r); enc(p2, k2, ver, r); // derive c3 for (i = 0; i < (versions[ver][0] >> 3); i++) { c3[i] = p1[i] ^ dc[i]; c3_old[i] = c3[i]; } // derive c4 for (i = 0; i < (versions[ver][0] >> 3); i++) { c4[i] = p2[i] ^ dc[i]; c4_old[i] = c4[i]; } dec(c3, k3, ver, r); dec(c4, k4, ver, r); bool flag = 1; for (i = 0; i < (versions[ver][0] >> 3); i++) if ((c3[i] ^ c4[i]) != dp[i]) flag = 0; if (flag) { num++; printf("%s\n", "A right quartet found :)\n"); printf("p1: "); string_state(p1_old, ver); printf("\n"); printf("p2: "); string_state(p2_old, ver); printf("\n"); printf("p3: "); string_state(c3, ver); printf("\n"); printf("p4: "); string_state(c4, ver); printf("\n"); printf("c1: "); string_state(p1, ver); printf("\n"); printf("c2: "); string_state(p2, ver); printf("\n"); printf("c3: "); string_state(c3_old, ver); printf("\n"); printf("c4: "); string_state(c4_old, ver); printf("\n"); printf("k1: "); string_tweak(k1, ver); printf("\n"); printf("k2: "); string_tweak(k2, ver); printf("\n"); printf("k3: "); string_tweak(k3, ver); printf("\n"); printf("k4: "); string_tweak(k4, ver); printf("\n"); } } return num; } double send_boomerangs(int R, int ver, int N1, UINT64 N2, UINT64 N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2) { // Parallel execution int NUM[N1]; int counter; printf("#Rounds: %d rounds\n", R); printf("#Total Queries = (#Parallel threads) * (#Bunches per thread) * (#Queries per bunch) = %d * %llu * %llu = 2^(%f)\n", N1, N2, N3, log(N1 * N2 * N3) / log(2)); printf("#Queries per thread = (#Bunches per thread) * (#Queries per bunch) = %llu * %llu = 2^(%f)\n", N2, N3, log(N2 * N3) / log(2)); clock_t clock_timer; double wall_timer; clock_timer = clock(); wall_timer = omp_get_wtime(); omp_set_num_threads(N1); #pragma omp parallel for for (counter = 0; counter < N1; counter++) { int num = 0; int ID = omp_get_thread_num(); for (UINT64 j = 0; j < N2; j++) { num += boomerang(R, ver, N3, dp, dc, dk1, dk2); if ((j & STEP) == 0){ printf("PID: %d \t Bunch Number: %llu/%llu\n", ID, j, N2); } } NUM[ID] = num; } printf("%s: %0.4f\n", "time on clock", (double)(clock() - clock_timer) / CLOCKS_PER_SEC); printf("%s: %0.4f\n", "time on wall", omp_get_wtime() - wall_timer); double sum = 0; double sum_temp = 1; for (int i = 0; i < N1; i++) sum += NUM[i]; printf("sum = %f\n", sum); sum_temp = (double)(N1 * N2 * N3) / sum; printf("2^(-%f)\n\n", log(sum_temp) / log(2)); printf("##########################\n"); return sum; } void convert_hexstr_to_statearray(int ver, char hex_str[], unsigned char dx[16]) { for (int i = 0; i < (versions[ver][0] >> 3); i++) { char hex[2]; hex[0] = hex_str[2 * i]; hex[1] = hex_str[2 * i + 1]; dx[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff); } } void convert_hexstr_to_tweakarray(int ver, char hex_str[], unsigned char dt[48]) { for (int i = 0; i < (versions[ver][1] >> 3); i++) { char hex[2]; hex[0] = hex_str[2 * i]; hex[1] = hex_str[2 * i + 1]; dt[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff); } } void init_prng(int offset) { //int initial_seed = 0x5EC7F2B0; //int initial_seed = 0x30051991; My birthday! unsigned int initial_seed = time(NULL) + offset*1000000; srand(initial_seed); // Initialization, should only be called once. int r = rand(); printf("[+] PRNG initialized to 0x%08X\n", initial_seed); } int main(int argc, char *argv[]) { //srand((unsigned)time(NULL)); // Initialization, should only be called once. int r = rand(); init_prng(atoi(argv[1])); // //test all versions of Skinny // for (i = 0; i < (sizeof(versions) / sizeof(*versions)); i++) // { // sprintf(name, "test_vectors_%i_%i.txt", versions[i][0], versions[i][1]); // fic = fopen(name, "w"); // fprintf(fic, "\n\nSkinny-%i/%i: \n", versions[i][0], versions[i][1]); // TestVectors(i); // fclose(fic); // printf("Generating test vectors for Skinny-%i/%i - saved in file test_vectors_%i_%i.txt \n", versions[i][0], versions[i][1], versions[i][0], versions[i][1]); // } unsigned char dp[16]; unsigned char dc[16]; unsigned char dk1[48]; unsigned char dk2[48]; // ####################################################################################################### // ####################################################################################################### // ############################## User must change only the following lines ############################## int n = 1; // Number of indipendent experiments int R = 18; // Number of rounds int ver = 1; // Determine the version: // [0 = Skinny-64-64] // [1 = Skinny-64-128] // [2 = Skinny-64-192] // [3 = Skinny-128-128] // [4 = Skinny-128-256] // [5 = Skinny-128-384] char dp_str[] = "0000000000000008"; char dc_str[] = "0454000404070404"; char dk1_str[] = "00000000C000000000000000F0000000"; char dk2_str[] = "00000000000040000000000000007000"; // ####################################################################################################### // ####################################################################################################### convert_hexstr_to_statearray(ver, dp_str, dp); convert_hexstr_to_statearray(ver, dc_str, dc); convert_hexstr_to_tweakarray(ver, dk1_str, dk1); convert_hexstr_to_tweakarray(ver, dk2_str, dk2); //########################## Number of queries ######################### int N1 = Nthreads; // Number of paralle threads : N1 int deg = 16; UINT64 N2 = 1 << deg; // Number of bunches per threads : N2 = 2^(deg) UINT64 N3 = 1 << 16; // Number of queries per bunches : N3 //################### Number of total queries : N1*N2*N3 ############### double sum = 0; for (int i = 0; i < n; i++) { sum += send_boomerangs(R, ver, N1, N2, N3, dp, dc, dk1, dk2); } printf("Program number = %d", PROGRAMNUMBER); printf("\nAverage = 2^(-%0.4f)\n", (log(n) + log(N1) + log(N2) + log(N3) - log(sum))/log(2)); // sum = (double)(n * N1 * N2 * N3) / sum; // printf("\nAverage = 2^(-%0.2f)\n", log(sum) / log(2)); return 0; }
nodal_residualbased_block_builder_and_solver.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // // #if !defined(KRATOS_NODAL_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER) #define KRATOS_NODAL_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER /* System includes */ #include <set> #ifdef _OPENMP #include <omp.h> #endif /* #include <unordered_set> */ /* #ifdef USE_GOOGLE_HASH */ /* #include "sparsehash/dense_hash_set" //included in external libraries */ /* #endif */ #ifdef USE_GOOGLE_HASH #include "sparsehash/dense_hash_set" //included in external libraries #else #include <unordered_set> #endif /* Project includes */ #include "utilities/timer.h" #include "includes/define.h" #include "includes/key_hash.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" #include "includes/model_part.h" #include "utilities/openmp_utils.h" #include "includes/kratos_flags.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class NodalResidualBasedBlockBuilderAndSolver * @ingroup KratosCore * @brief Current class provides an implementation for standard builder and solving operations. * @details The RHS is constituted by the unbalanced loads (residual) * Degrees of freedom are reordered putting the restrained degrees of freedom at * the end of the system ordered in reverse order with respect to the DofSet. * Imposition of the dirichlet conditions is naturally dealt with as the residual already contains * this information. * Calculation of the reactions involves a cost very similiar to the calculation of the total residual * @author Riccardo Rossi */ template <class TSparseSpace, class TDenseSpace, //= DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class NodalResidualBasedBlockBuilderAndSolver : public BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(NodalResidualBasedBlockBuilderAndSolver); typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef Node<3> NodeType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; typedef typename BaseType::ElementsContainerType ElementsContainerType; typedef Vector VectorType; ///@} ///@name Life Cycle ///@{ /** Constructor. */ NodalResidualBasedBlockBuilderAndSolver( typename TLinearSolver::Pointer pNewLinearSystemSolver) : BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSystemSolver) { } /** Destructor. */ ~NodalResidualBasedBlockBuilderAndSolver() override { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Function to perform the build of the RHS. The vector could be sized as the total number * of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param b The RHS vector */ void Build( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &b) override { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; // Getting the elements from the model const int nelements = static_cast<int>(rModelPart.Elements().size()); // Getting the array of the conditions const int nconditions = static_cast<int>(rModelPart.Conditions().size()); ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin(); ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin(); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; // assemble all elements double start_build = OpenMPUtils::GetCurrentTime(); #pragma omp parallel firstprivate(nelements, nconditions, LHS_Contribution, RHS_Contribution, EquationId) { #pragma omp for schedule(guided, 512) nowait for (int k = 0; k < nelements; k++) { ModelPart::ElementsContainerType::iterator it = el_begin + k; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool element_is_active = true; if ((it)->IsDefined(ACTIVE)) element_is_active = (it)->Is(ACTIVE); if (element_is_active) { //calculate elemental contribution pScheme->CalculateSystemContributions(*(it.base()), LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution #ifdef USE_LOCKS_IN_ASSEMBLY Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array); #else Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif // clean local elemental memory pScheme->CleanMemory(*(it.base())); } } //#pragma omp parallel for firstprivate(nconditions, LHS_Contribution, RHS_Contribution, EquationId ) schedule(dynamic, 1024) #pragma omp for schedule(guided, 512) for (int k = 0; k < nconditions; k++) { ModelPart::ConditionsContainerType::iterator it = cond_begin + k; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool condition_is_active = true; if ((it)->IsDefined(ACTIVE)) condition_is_active = (it)->Is(ACTIVE); if (condition_is_active) { //calculate elemental contribution pScheme->Condition_CalculateSystemContributions(*(it.base()), LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution #ifdef USE_LOCKS_IN_ASSEMBLY Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array); #else Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif // clean local elemental memory pScheme->CleanMemory(*(it.base())); } } } const double stop_build = OpenMPUtils::GetCurrentTime(); KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Build time: " << stop_build - start_build << std::endl; //for (int i = 0; i < A_size; i++) // omp_destroy_lock(&lock_array[i]); KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished parallel building" << std::endl; KRATOS_CATCH("") } void BuildNodally( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &b) { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; std::cout << "Build Nodally Continuity Equation" << std::endl; //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different terms Element::EquationIdVectorType EquationId; ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); /* const double start_build = OpenMPUtils::GetCurrentTime(); */ /* #pragma omp parallel */ { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { VectorType nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); const unsigned int neighSize = nodalSFDneighboursId.size(); if (neighSize > 1) { const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); const double timeInterval = CurrentProcessInfo[DELTA_TIME]; LHS_Contribution = ZeroMatrix(neighSize, neighSize); RHS_Contribution = ZeroVector(neighSize); if (EquationId.size() != neighSize) EquationId.resize(neighSize, false); double deviatoricCoeff = itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT); double volumetricCoeff = itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT) + 2.0 * deviatoricCoeff / 3.0; const unsigned int xpos = itNode->GetDofPosition(VELOCITY_X); double deltaPressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) - itNode->FastGetSolutionStepValue(PRESSURE, 1); double volumetricDefRate = itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE); LHS_Contribution(0, 0) += nodalVolume / volumetricCoeff; RHS_Contribution[0] += (-deltaPressure / volumetricCoeff + volumetricDefRate) * nodalVolume; bool stabilizationNeeded = false; if ((itNode->Is(FLUID) || (itNode->Is(SOLID) && itNode->FastGetSolutionStepValue(POISSON_RATIO) > 0.49999))) { stabilizationNeeded = true; } else { for (unsigned int i = 0; i < neighSize; i++) { unsigned int idNode = nodalSFDneighboursId[i]; EquationId[i] = rModelPart.Nodes()[idNode].GetDof(PRESSURE, xpos).EquationId(); } } if (stabilizationNeeded == true) { /* Vector& rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); */ unsigned int firstRow = 0; unsigned int firstCol = 0; double meanMeshSize = itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE); double characteristicLength = 2.0 * meanMeshSize; /* double nodalFreesurfaceArea=itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA); */ double density = itNode->FastGetSolutionStepValue(DENSITY); /* double tauStab=1.0/(8.0*deviatoricCoeff/(meanMeshSize*meanMeshSize)+2.0*density/timeInterval); */ double nodalVelocity = 0; if (dimension == 2) { nodalVelocity = sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X) * itNode->FastGetSolutionStepValue(VELOCITY_X) + itNode->FastGetSolutionStepValue(VELOCITY_Y) * itNode->FastGetSolutionStepValue(VELOCITY_Y)); } else if (dimension == 3) { nodalVelocity = sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X) * itNode->FastGetSolutionStepValue(VELOCITY_X) + itNode->FastGetSolutionStepValue(VELOCITY_Y) * itNode->FastGetSolutionStepValue(VELOCITY_Y) + itNode->FastGetSolutionStepValue(VELOCITY_Z) * itNode->FastGetSolutionStepValue(VELOCITY_Z)); } double tauStab = 1.0 * (characteristicLength * characteristicLength * timeInterval) / (density * nodalVelocity * timeInterval * characteristicLength + density * characteristicLength * characteristicLength + 8.0 * deviatoricCoeff * timeInterval); /* tauStab*=10.0; */ /* tauStab=0.0000001; */ /* tauStab=100.0; */ LHS_Contribution(0, 0) += +nodalVolume * tauStab * density / (volumetricCoeff * timeInterval); RHS_Contribution[0] += -nodalVolume * tauStab * density / (volumetricCoeff * timeInterval) * (deltaPressure - itNode->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) * timeInterval); if (itNode->Is(FREE_SURFACE)) { /* LHS_Contribution(0,0) += + 2.0 * tauStab * nodalFreesurfaceArea / meanMeshSize; */ /* RHS_Contribution[0] += - 2.0 * tauStab * nodalFreesurfaceArea / meanMeshSize * itNode->FastGetSolutionStepValue(PRESSURE,0); */ /* double boundLHScontribution=4.0 * tauStab * nodalVolume /(meanMeshSize*meanMeshSize); */ /* std::cout<<"boundLHScontribution "<<boundLHScontribution<<std::endl; */ /* if(itNode->IsNot(RIGID)){ */ LHS_Contribution(0, 0) += +4.0 * 2.0 * tauStab * nodalVolume / (meanMeshSize * meanMeshSize); RHS_Contribution[0] += -4.0 * 2.0 * tauStab * nodalVolume / (meanMeshSize * meanMeshSize) * itNode->FastGetSolutionStepValue(PRESSURE, 0); /* } */ /* else { */ /* LHS_Contribution(0,0) += + 4.0/3.0 * tauStab * nodalVolume /(meanMeshSize*meanMeshSize); */ /* RHS_Contribution[0] += - 4.0/3.0 * tauStab * nodalVolume /(meanMeshSize*meanMeshSize) * itNode->FastGetSolutionStepValue(PRESSURE,0); */ /* } */ const array_1d<double, 3> &Normal = itNode->FastGetSolutionStepValue(NORMAL); Vector &SpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); array_1d<double, 3> nodalAcceleration = 0.5 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 1); /* nodalAcceleration= (itNode->FastGetSolutionStepValue(VELOCITY,0)-itNode->FastGetSolutionStepValue(VELOCITY,1))/timeInterval; */ double nodalNormalAcceleration = 0; double nodalNormalProjDefRate = 0; if (dimension == 2) { nodalNormalProjDefRate = Normal[0] * SpatialDefRate[0] * Normal[0] + Normal[1] * SpatialDefRate[1] * Normal[1] + 2 * Normal[0] * SpatialDefRate[2] * Normal[1]; /* nodalNormalAcceleration=Normal[0]*itNode->FastGetSolutionStepValue(ACCELERATION_X,1) + Normal[1]*itNode->FastGetSolutionStepValue(ACCELERATION_Y,1); */ /* nodalNormalAcceleration=(itNode->FastGetSolutionStepValue(VELOCITY_X,0)-itNode->FastGetSolutionStepValue(VELOCITY_X,1))*Normal[0]/timeInterval + */ /* (itNode->FastGetSolutionStepValue(VELOCITY_Y,0)-itNode->FastGetSolutionStepValue(VELOCITY_Y,1))*Normal[1]/timeInterval; */ nodalNormalAcceleration = Normal[0] * nodalAcceleration[0] + Normal[1] * nodalAcceleration[1]; } else if (dimension == 3) { nodalNormalProjDefRate = Normal[0] * SpatialDefRate[0] * Normal[0] + Normal[1] * SpatialDefRate[1] * Normal[1] + Normal[2] * SpatialDefRate[2] * Normal[2] + 2 * Normal[0] * SpatialDefRate[3] * Normal[1] + 2 * Normal[0] * SpatialDefRate[4] * Normal[2] + 2 * Normal[1] * SpatialDefRate[5] * Normal[2]; /* nodalNormalAcceleration=Normal[0]*itNode->FastGetSolutionStepValue(ACCELERATION_X) + Normal[1]*itNode->FastGetSolutionStepValue(ACCELERATION_Y) + Normal[2]*itNode->FastGetSolutionStepValue(ACCELERATION_Z); */ /* nodalNormalAcceleration=Normal[0]*nodalAcceleration[0] + Normal[1]*nodalAcceleration[1] + Normal[2]*nodalAcceleration[2]; */ } /* RHS_Contribution[0] += tauStab * (density*nodalNormalAcceleration - 4.0*deviatoricCoeff*nodalNormalProjDefRate/meanMeshSize) * nodalFreesurfaceArea; */ double accelerationContribution = 2.0 * density * nodalNormalAcceleration / meanMeshSize; double deviatoricContribution = 8.0 * deviatoricCoeff * nodalNormalProjDefRate / (meanMeshSize * meanMeshSize); /* std::cout<<"nodalNormalAcceleration= "<<nodalNormalAcceleration<<std::endl; */ /* std::cout<<"nodalNormalProjDefRate= "<<nodalNormalProjDefRate<<std::endl; */ /* std::cout<<"meanMeshSize "<<meanMeshSize<<std::endl; */ /* accelerationContribution=0; */ /* deviatoricContribution=0; */ /* if(itNode->IsNot(RIGID)){ */ RHS_Contribution[0] += 2.0 * tauStab * (accelerationContribution + deviatoricContribution) * nodalVolume; /* }else{ */ /* RHS_Contribution[0] += 1.0/3.0* tauStab * (accelerationContribution - deviatoricContribution) * nodalVolume; */ /* } */ } for (unsigned int i = 0; i < neighSize; i++) { unsigned int idNode = nodalSFDneighboursId[i]; EquationId[i] = rModelPart.Nodes()[idNode].GetDof(PRESSURE, xpos).EquationId(); double Density = rModelPart.Nodes()[idNode].FastGetSolutionStepValue(DENSITY); array_1d<double, 3> &VolumeAcceleration = rModelPart.Nodes()[idNode].FastGetSolutionStepValue(VOLUME_ACCELERATION); double dNdXi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol]; double dNdYi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 1]; double dNdZi = 0; if (dimension == 2) { RHS_Contribution[i] += -tauStab * Density * (dNdXi * VolumeAcceleration[0] + dNdYi * VolumeAcceleration[1]) * nodalVolume; } else if (dimension == 3) { dNdZi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 2]; RHS_Contribution[i] += -tauStab * Density * (dNdXi * VolumeAcceleration[0] + dNdYi * VolumeAcceleration[1] + dNdZi * VolumeAcceleration[2]) * nodalVolume; } firstRow = 0; for (unsigned int j = 0; j < neighSize; j++) { unsigned int idNodeJ = nodalSFDneighboursId[j]; double dNdXj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow]; double dNdYj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 1]; if (dimension == 2) { ////////////////// Laplacian term for LHS LHS_Contribution(i, j) += +tauStab * (dNdXi * dNdXj + dNdYi * dNdYj) * nodalVolume; ////////////////// Laplacian term L_ij*P_j for RHS RHS_Contribution[i] += -tauStab * (dNdXi * dNdXj + dNdYi * dNdYj) * nodalVolume * rModelPart.Nodes()[idNodeJ].FastGetSolutionStepValue(PRESSURE, 0); } else if (dimension == 3) { double dNdZj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 2]; ////////////////// Laplacian term for LHS LHS_Contribution(i, j) += +tauStab * (dNdXi * dNdXj + dNdYi * dNdYj + dNdZi * dNdZj) * nodalVolume; ////////////////// Laplacian term L_ij*P_j for RHS RHS_Contribution[i] += -tauStab * (dNdXi * dNdXj + dNdYi * dNdYj + dNdZi * dNdZj) * nodalVolume * rModelPart.Nodes()[idNodeJ].FastGetSolutionStepValue(PRESSURE, 0); } /* std::cout << "dNdXi= " <<dNdXi<< "dNdYi= " <<dNdYi<< "dNdYj= " <<dNdYj<< "dNdXj= " <<dNdXj<< std::endl; */ firstRow += dimension; } firstCol += dimension; } } //assemble the elemental contribution #ifdef USE_LOCKS_IN_ASSEMBLY Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array); #else Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif /* AssembleLHS(A, LHS_Contribution, EquationId); */ /* AssembleRHS(b, RHS_Contribution, EquationId); */ } } } /* /\* std::cout<<".... Build Nodally Continuity Equation DONE!"<<std::endl; *\/ */ /* const double stop_build = OpenMPUtils::GetCurrentTime(); */ /* KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Build time: " << stop_build - start_build << std::endl; */ /* //for (int i = 0; i < A_size; i++) */ /* // omp_destroy_lock(&lock_array[i]); */ /* KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished parallel building" << std::endl; */ KRATOS_CATCH("") } /** * @brief Function to perform the building of the LHS * @details Depending on the implementation choosen the size of the matrix could * be equal to the total number of Dofs or to the number of unrestrained dofs * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix */ void BuildLHS( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A) override { KRATOS_TRY TSystemVectorType tmp(A.size1(), 0.0); this->Build(pScheme, rModelPart, A, tmp); KRATOS_CATCH("") } /** * @brief Build a rectangular matrix of size n*N where "n" is the number of unrestrained degrees of freedom * and "N" is the total number of degrees of freedom involved. * @details This matrix is obtained by building the total matrix without the lines corresponding to the fixed * degrees of freedom (but keeping the columns!!) * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix */ void BuildLHS_CompleteOnFreeRows( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A) override { KRATOS_TRY TSystemVectorType tmp(A.size1(), 0.0); this->Build(pScheme, rModelPart, A, tmp); KRATOS_CATCH("") } /** * @brief This is a call to the linear system solver * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void SystemSolve( TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { KRATOS_TRY double norm_b; if (TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if (norm_b != 0.00) { //do solve BaseType::mpLinearSystemSolver->Solve(A, Dx, b); } else TSparseSpace::SetToZero(Dx); //prints informations about the current time KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** *@brief This is a call to the linear system solver (taking into account some physical particularities of the problem) * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector * @param rModelPart The model part of the problem to solve */ void SystemSolveWithPhysics( TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b, ModelPart &rModelPart) { KRATOS_TRY double norm_b; if (TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if (norm_b != 0.00) { //provide physical data as needed if (BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded()) BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart); //do solve BaseType::mpLinearSystemSolver->Solve(A, Dx, b); } else { TSparseSpace::SetToZero(Dx); KRATOS_WARNING("NodalResidualBasedBlockBuilderAndSolver") << "ATTENTION! setting the RHS to zero!" << std::endl; } //prints informations about the current time KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** * @brief Function to perform the building and solving phase at the same time. * @details It is ideally the fastest and safer function to use when it is possible to solve * just after building * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { KRATOS_TRY std::cout << "CONTINUITY EQ: buildAndSolve " << std::endl; Timer::Start("Build"); /* Build(pScheme, rModelPart, A, b); */ //boost::timer build_time; BuildNodally(pScheme, rModelPart, A, b); //std::cout << "CONTINUITY EQ: build_time : " << build_time.elapsed() << std::endl; Timer::Stop("Build"); ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b); KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl; const double start_solve = OpenMPUtils::GetCurrentTime(); Timer::Start("Solve"); //boost::timer solve_time; SystemSolveWithPhysics(A, Dx, b, rModelPart); //std::cout << "CONTINUITY EQ: solve_time : " << solve_time.elapsed() << std::endl; Timer::Stop("Solve"); const double stop_solve = OpenMPUtils::GetCurrentTime(); KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl; KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl; KRATOS_CATCH("") } /** * @brief Corresponds to the previews, but the System's matrix is considered already built and only the RHS is built again * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void BuildRHSAndSolve( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { KRATOS_TRY BuildRHS(pScheme, rModelPart, b); SystemSolve(A, Dx, b); KRATOS_CATCH("") } /** * @brief Function to perform the build of the RHS. * @details The vector could be sized as the total number of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void BuildRHS( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemVectorType &b) override { KRATOS_TRY BuildRHSNoDirichlet(pScheme, rModelPart, b); const int ndofs = static_cast<int>(BaseType::mDofSet.size()); //NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver #pragma omp parallel for firstprivate(ndofs) for (int k = 0; k < ndofs; k++) { typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k; const std::size_t i = dof_iterator->EquationId(); if (dof_iterator->IsFixed()) b[i] = 0.0f; } KRATOS_CATCH("") } /** * @brief Builds the list of the DofSets involved in the problem by "asking" to each element * and condition its Dofs. * @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the * way the matrix and RHS are built * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart) override { KRATOS_TRY; KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Setting up the dofs" << std::endl; //Gets the array of elements from the modeler ElementsArrayType &pElements = rModelPart.Elements(); const int nelements = static_cast<int>(pElements.size()); Element::DofsVectorType ElementalDofList; ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); unsigned int nthreads = OpenMPUtils::GetNumThreads(); // typedef boost::fast_pool_allocator< NodeType::DofType::Pointer > allocator_type; // typedef std::unordered_set < NodeType::DofType::Pointer, // DofPointerHasher, // DofPointerComparor, // allocator_type > set_type; #ifdef USE_GOOGLE_HASH typedef google::dense_hash_set<NodeType::DofType::Pointer, DofPointerHasher> set_type; #else typedef std::unordered_set<NodeType::DofType::Pointer, DofPointerHasher> set_type; #endif // std::vector<set_type> dofs_aux_list(nthreads); // std::vector<allocator_type> allocators(nthreads); KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2)) << "Number of threads" << nthreads << "\n" << std::endl; for (int i = 0; i < static_cast<int>(nthreads); i++) { #ifdef USE_GOOGLE_HASH dofs_aux_list[i].set_empty_key(NodeType::DofType::Pointer()); #else // dofs_aux_list[i] = set_type( allocators[i]); dofs_aux_list[i].reserve(nelements); #endif } KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2)) << "Initializing element loop" << std::endl; #pragma omp parallel firstprivate(nelements, ElementalDofList) { #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < nelements; i++) { typename ElementsArrayType::iterator it = pElements.begin() + i; const unsigned int this_thread_id = OpenMPUtils::ThisThread(); // gets list of Dof involved on every element pScheme->GetElementalDofList(*(it.base()), ElementalDofList, CurrentProcessInfo); dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end()); } KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2)) << "Initializing condition loop" << std::endl; ConditionsArrayType &pConditions = rModelPart.Conditions(); const int nconditions = static_cast<int>(pConditions.size()); #pragma omp for schedule(guided, 512) for (int i = 0; i < nconditions; i++) { typename ConditionsArrayType::iterator it = pConditions.begin() + i; const unsigned int this_thread_id = OpenMPUtils::ThisThread(); // gets list of Dof involved on every element pScheme->GetConditionDofList(*(it.base()), ElementalDofList, CurrentProcessInfo); dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end()); } } KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2)) << "Initializing tree reduction\n" << std::endl; // Here we do a reduction in a tree so to have everything on thread 0 unsigned int old_max = nthreads; unsigned int new_max = ceil(0.5 * static_cast<double>(old_max)); while (new_max >= 1 && new_max != old_max) { if (this->GetEchoLevel() > 2) { //just for debugging std::cout << "old_max" << old_max << " new_max:" << new_max << std::endl; for (int i = 0; i < static_cast<int>(new_max); i++) { if (i + new_max < old_max) { std::cout << i << " - " << i + new_max << std::endl; } } std::cout << "********************" << std::endl; } #pragma omp parallel for for (int i = 0; i < static_cast<int>(new_max); i++) { if (i + new_max < old_max) { dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end()); dofs_aux_list[i + new_max].clear(); } } old_max = new_max; new_max = ceil(0.5 * static_cast<double>(old_max)); } KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2)) << "Initializing ordered array filling\n" << std::endl; DofsArrayType Doftemp; BaseType::mDofSet = DofsArrayType(); Doftemp.reserve(dofs_aux_list[0].size()); for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); it++) { Doftemp.push_back((*it)); } Doftemp.Sort(); BaseType::mDofSet = Doftemp; //Throws an exception if there are no Degrees Of Freedom involved in the analysis KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl; KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2)) << "Number of degrees of freedom:" << BaseType::mDofSet.size() << std::endl; BaseType::mDofSetIsInitialized = true; KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished setting up the dofs" << std::endl; KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2)) << "Initializing lock array" << std::endl; #ifdef _OPENMP if (mlock_array.size() != 0) { for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) { omp_destroy_lock(&mlock_array[i]); } } mlock_array.resize(BaseType::mDofSet.size()); for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) { omp_init_lock(&mlock_array[i]); } #endif KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2)) << "End of setup dof set\n" << std::endl; // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode #ifdef KRATOS_DEBUG if (BaseType::GetCalculateReactionsFlag()) { for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl << "Node : " << dof_iterator->Id() << std::endl << "Dof : " << (*dof_iterator) << std::endl << "Not possible to calculate reactions." << std::endl; } } #endif KRATOS_CATCH(""); } /** * @brief Organises the dofset in order to speed up the building phase * @param rModelPart The model part of the problem to solve */ void SetUpSystem( ModelPart &rModelPart) override { //int free_id = 0; BaseType::mEquationSystemSize = BaseType::mDofSet.size(); int ndofs = static_cast<int>(BaseType::mDofSet.size()); #pragma omp parallel for firstprivate(ndofs) for (int i = 0; i < static_cast<int>(ndofs); i++) { typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + i; dof_iterator->SetEquationId(i); } //for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) // dof_iterator->SetEquationId(free_id++); } //************************************************************************** //************************************************************************** void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType &pA, TSystemVectorPointerType &pDx, TSystemVectorPointerType &pb, ModelPart &rModelPart) override { KRATOS_TRY //boost::timer contruct_matrix; if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); pA.swap(pNewA); } if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0)); pDx.swap(pNewDx); } if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0)); pb.swap(pNewb); } TSystemMatrixType &A = *pA; TSystemVectorType &Dx = *pDx; TSystemVectorType &b = *pb; //resizing the system vectors and matrix if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized { A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false); ConstructMatrixStructure(pScheme, A, rModelPart); } else { if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize) { KRATOS_WATCH(" it should not come here!!!!!!!! ... this is SLOW"); KRATOS_ERROR << "The equation system size has changed during the simulation. This is not permited." << std::endl; A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true); ConstructMatrixStructure(pScheme, A, rModelPart); } } if (Dx.size() != BaseType::mEquationSystemSize) Dx.resize(BaseType::mEquationSystemSize, false); if (b.size() != BaseType::mEquationSystemSize) b.resize(BaseType::mEquationSystemSize, false); //std::cout << "CONTINUITY EQ: contruct_matrix : " << contruct_matrix.elapsed() << std::endl; KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void InitializeSolutionStep( ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { KRATOS_TRY std::cout << "Initialize Solution Step in nodal res based " << std::endl; KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void FinalizeSolutionStep( ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { } //************************************************************************** //************************************************************************** void CalculateReactions( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { TSparseSpace::SetToZero(b); //refresh RHS to have the correct reactions BuildRHSNoDirichlet(pScheme, rModelPart, b); const int ndofs = static_cast<int>(BaseType::mDofSet.size()); //NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver #pragma omp parallel for firstprivate(ndofs) for (int k = 0; k < ndofs; k++) { typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k; const int i = (dof_iterator)->EquationId(); (dof_iterator)->GetSolutionStepReactionValue() = -b[i]; } //KRATOS_WATCH(__LINE__) } /** * @brief Applies the dirichlet conditions. This operation may be very heavy or completely * unexpensive depending on the implementation choosen and on how the System Matrix is built. * @details For explanation of how it works for a particular implementation the user * should refer to the particular Builder And Solver choosen * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { std::size_t system_size = A.size1(); std::vector<double> scaling_factors(system_size, 0.0f); const int ndofs = static_cast<int>(BaseType::mDofSet.size()); //NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver #pragma omp parallel for firstprivate(ndofs) for (int k = 0; k < ndofs; k++) { typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k; if (dof_iterator->IsFixed()) scaling_factors[k] = 0.0f; else scaling_factors[k] = 1.0f; } double *Avalues = A.value_data().begin(); std::size_t *Arow_indices = A.index1_data().begin(); std::size_t *Acol_indices = A.index2_data().begin(); //detect if there is a line of all zeros and set the diagonal to a 1 if this happens #pragma omp parallel for firstprivate(system_size) for (int k = 0; k < static_cast<int>(system_size); ++k) { std::size_t col_begin = Arow_indices[k]; std::size_t col_end = Arow_indices[k + 1]; bool empty = true; for (std::size_t j = col_begin; j < col_end; ++j) { if (Avalues[j] != 0.0) { empty = false; break; } } if (empty == true) { A(k, k) = 1.0; b[k] = 0.0; } } #pragma omp parallel for for (int k = 0; k < static_cast<int>(system_size); ++k) { std::size_t col_begin = Arow_indices[k]; std::size_t col_end = Arow_indices[k + 1]; double k_factor = scaling_factors[k]; if (k_factor == 0) { // zero out the whole row, except the diagonal for (std::size_t j = col_begin; j < col_end; ++j) if (static_cast<int>(Acol_indices[j]) != k) Avalues[j] = 0.0; // zero out the RHS b[k] = 0.0; } else { // zero out the column which is associated with the zero'ed row for (std::size_t j = col_begin; j < col_end; ++j) if (scaling_factors[Acol_indices[j]] == 0) Avalues[j] = 0.0; } } } /** * @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed */ void Clear() override { #ifdef _OPENMP for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) omp_destroy_lock(&mlock_array[i]); mlock_array.resize(0); #endif BaseType::Clear(); } /** * @brief This function is designed to be called once to perform all the checks needed * on the input provided. Checks can be "expensive" as the function is designed * to catch user's errors. * @param rModelPart The model part of the problem to solve * @return 0 all ok */ int Check(ModelPart &rModelPart) override { KRATOS_TRY return 0; KRATOS_CATCH(""); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ #ifdef _OPENMP std::vector<omp_lock_t> mlock_array; #endif ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ virtual void ConstructMatrixStructure( typename TSchemeType::Pointer pScheme, TSystemMatrixType &A, ModelPart &rModelPart) { std::cout << " ConstructMatrixStructure for Continuity equation " << std::endl; //filling with zero the matrix (creating the structure) Timer::Start("MatrixStructure"); ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); // Getting the array of the conditions const int nconditions = static_cast<int>(rModelPart.Conditions().size()); ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin(); const std::size_t equation_size = BaseType::mEquationSystemSize; #ifdef USE_GOOGLE_HASH std::vector<google::dense_hash_set<std::size_t>> indices(equation_size); const std::size_t empty_key = 2 * equation_size + 10; #else std::vector<std::unordered_set<std::size_t>> indices(equation_size); #endif #pragma omp parallel for firstprivate(equation_size) for (int iii = 0; iii < static_cast<int>(equation_size); iii++) { #ifdef USE_GOOGLE_HASH indices[iii].set_empty_key(empty_key); #else indices[iii].reserve(40); #endif } Element::EquationIdVectorType EquationId; /* #pragma omp parallel */ /* { */ ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { /* VectorType nodalSFDneighboursId=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); */ /* const unsigned int neighSize = nodalSFDneighboursId.size(); */ const unsigned int neighSize = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER).size(); if (EquationId.size() != neighSize) EquationId.resize(neighSize, false); /* const unsigned int xpos = itNode->GetDofPosition(VELOCITY_X); */ const unsigned int xpos = itNode->GetDofPosition(PRESSURE); for (unsigned int i = 0; i < neighSize; i++) { /* unsigned int idNode=nodalSFDneighboursId[i]; */ unsigned int idNode = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER)[i]; EquationId[i] = rModelPart.Nodes()[idNode].GetDof(PRESSURE, xpos).EquationId(); } for (std::size_t i = 0; i < EquationId.size(); i++) { if (EquationId[i] < BaseType::mEquationSystemSize) { #ifdef _OPENMP omp_set_lock(&mlock_array[EquationId[i]]); #endif auto &row_indices = indices[EquationId[i]]; for (auto it = EquationId.begin(); it != EquationId.end(); it++) { if (*it < BaseType::mEquationSystemSize) row_indices.insert(*it); } #ifdef _OPENMP omp_unset_lock(&mlock_array[EquationId[i]]); #endif } } /* for (std::size_t i = 0; i < EquationId.size(); i++) */ /* { */ /* #ifdef _OPENMP */ /* omp_set_lock(&mlock_array[EquationId[i]]); */ /* #endif */ /* auto& row_indices = indices[EquationId[i]]; */ /* row_indices.insert(EquationId.begin(), EquationId.end()); */ /* #ifdef _OPENMP */ /* omp_unset_lock(&mlock_array[EquationId[i]]); */ /* #endif */ /* } */ } /* } */ Element::EquationIdVectorType ids(3, 0); #pragma omp parallel for firstprivate(nconditions, ids) for (int iii = 0; iii < nconditions; iii++) { typename ConditionsArrayType::iterator i_condition = cond_begin + iii; pScheme->Condition_EquationId(*(i_condition.base()), ids, CurrentProcessInfo); for (std::size_t i = 0; i < ids.size(); i++) { #ifdef _OPENMP omp_set_lock(&mlock_array[ids[i]]); #endif auto &row_indices = indices[ids[i]]; row_indices.insert(ids.begin(), ids.end()); #ifdef _OPENMP omp_unset_lock(&mlock_array[ids[i]]); #endif } } //count the row sizes unsigned int nnz = 0; for (unsigned int i = 0; i < indices.size(); i++) nnz += indices[i].size(); A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz); double *Avalues = A.value_data().begin(); std::size_t *Arow_indices = A.index1_data().begin(); std::size_t *Acol_indices = A.index2_data().begin(); //filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! Arow_indices[0] = 0; for (int i = 0; i < static_cast<int>(A.size1()); i++) Arow_indices[i + 1] = Arow_indices[i] + indices[i].size(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(A.size1()); i++) { const unsigned int row_begin = Arow_indices[i]; const unsigned int row_end = Arow_indices[i + 1]; unsigned int k = row_begin; for (auto it = indices[i].begin(); it != indices[i].end(); it++) { Acol_indices[k] = *it; Avalues[k] = 0.0; k++; } indices[i].clear(); //deallocating the memory std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]); } A.set_filled(indices.size() + 1, nnz); Timer::Stop("MatrixStructure"); /* std::cout<<"..... ConstructMatrixStructure for Continuity equation DONE"<<std::endl; */ } /* virtual void ConstructMatrixStructure( */ /* typename TSchemeType::Pointer pScheme, */ /* TSystemMatrixType& A, */ /* ModelPart& rModelPart) */ /* { */ /* //filling with zero the matrix (creating the structure) */ /* Timer::Start("MatrixStructure"); */ /* // Getting the elements from the model */ /* const int nelements = static_cast<int>(rModelPart.Elements().size()); */ /* // Getting the array of the conditions */ /* const int nconditions = static_cast<int>(rModelPart.Conditions().size()); */ /* ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); */ /* ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin(); */ /* ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin(); */ /* const std::size_t equation_size = BaseType::mEquationSystemSize; */ /* #ifdef USE_GOOGLE_HASH */ /* std::vector<google::dense_hash_set<std::size_t> > indices(equation_size); */ /* const std::size_t empty_key = 2*equation_size + 10; */ /* #else */ /* std::vector<std::unordered_set<std::size_t> > indices(equation_size); */ /* #endif */ /* #pragma omp parallel for firstprivate(equation_size) */ /* for (int iii = 0; iii < static_cast<int>(equation_size); iii++) */ /* { */ /* #ifdef USE_GOOGLE_HASH */ /* indices[iii].set_empty_key(empty_key); */ /* #else */ /* indices[iii].reserve(40); */ /* #endif */ /* } */ /* Element::EquationIdVectorType ids(3, 0); */ /* #pragma omp parallel for firstprivate(nelements, ids) */ /* for(int iii=0; iii<nelements; iii++) */ /* { */ /* typename ElementsContainerType::iterator i_element = el_begin + iii; */ /* pScheme->EquationId( *(i_element.base()) , ids, CurrentProcessInfo); */ /* for (std::size_t i = 0; i < ids.size(); i++) */ /* { */ /* #ifdef _OPENMP */ /* omp_set_lock(&mlock_array[ids[i]]); */ /* #endif */ /* auto& row_indices = indices[ids[i]]; */ /* row_indices.insert(ids.begin(), ids.end()); */ /* #ifdef _OPENMP */ /* omp_unset_lock(&mlock_array[ids[i]]); */ /* #endif */ /* } */ /* } */ /* #pragma omp parallel for firstprivate(nconditions, ids) */ /* for (int iii = 0; iii<nconditions; iii++) */ /* { */ /* typename ConditionsArrayType::iterator i_condition = cond_begin + iii; */ /* pScheme->Condition_EquationId( *(i_condition.base()), ids, CurrentProcessInfo); */ /* for (std::size_t i = 0; i < ids.size(); i++) */ /* { */ /* #ifdef _OPENMP */ /* omp_set_lock(&mlock_array[ids[i]]); */ /* #endif */ /* auto& row_indices = indices[ids[i]]; */ /* row_indices.insert(ids.begin(), ids.end()); */ /* #ifdef _OPENMP */ /* omp_unset_lock(&mlock_array[ids[i]]); */ /* #endif */ /* } */ /* } */ /* //count the row sizes */ /* unsigned int nnz = 0; */ /* for (unsigned int i = 0; i < indices.size(); i++) */ /* nnz += indices[i].size(); */ /* A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz); */ /* double* Avalues = A.value_data().begin(); */ /* std::size_t* Arow_indices = A.index1_data().begin(); */ /* std::size_t* Acol_indices = A.index2_data().begin(); */ /* //filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! */ /* Arow_indices[0] = 0; */ /* for (int i = 0; i < static_cast<int>(A.size1()); i++) */ /* Arow_indices[i+1] = Arow_indices[i] + indices[i].size(); */ /* #pragma omp parallel for */ /* for (int i = 0; i < static_cast<int>(A.size1()); i++) */ /* { */ /* const unsigned int row_begin = Arow_indices[i]; */ /* const unsigned int row_end = Arow_indices[i+1]; */ /* unsigned int k = row_begin; */ /* for (auto it = indices[i].begin(); it != indices[i].end(); it++) */ /* { */ /* Acol_indices[k] = *it; */ /* Avalues[k] = 0.0; */ /* k++; */ /* } */ /* indices[i].clear(); //deallocating the memory */ /* std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]); */ /* } */ /* A.set_filled(indices.size()+1, nnz); */ /* Timer::Stop("MatrixStructure"); */ /* } */ //************************************************************************** void AssembleLHS( TSystemMatrixType &A, LocalSystemMatrixType &LHS_Contribution, Element::EquationIdVectorType &EquationId) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; for (unsigned int j_local = 0; j_local < local_size; j_local++) { unsigned int j_global = EquationId[j_local]; A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } } void Assemble( TSystemMatrixType &A, TSystemVectorType &b, const LocalSystemMatrixType &LHS_Contribution, const LocalSystemVectorType &RHS_Contribution, Element::EquationIdVectorType &EquationId #ifdef USE_LOCKS_IN_ASSEMBLY , std::vector<omp_lock_t> &lock_array #endif ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; #ifdef USE_LOCKS_IN_ASSEMBLY omp_set_lock(&lock_array[i_global]); b[i_global] += RHS_Contribution(i_local); #else double &r_a = b[i_global]; const double &v_a = RHS_Contribution(i_local); #pragma omp atomic r_a += v_a; #endif AssembleRowContribution(A, LHS_Contribution, i_global, i_local, EquationId); #ifdef USE_LOCKS_IN_ASSEMBLY omp_unset_lock(&lock_array[i_global]); #endif //note that computation of reactions is not performed here! } } //************************************************************************** void AssembleRHS( TSystemVectorType &b, LocalSystemVectorType &RHS_Contribution, Element::EquationIdVectorType &EquationId) { unsigned int local_size = RHS_Contribution.size(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; // ASSEMBLING THE SYSTEM VECTOR double &b_value = b[i_global]; const double &rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ inline void AddUnique(std::vector<std::size_t> &v, const std::size_t &candidate) { std::vector<std::size_t>::iterator i = v.begin(); std::vector<std::size_t>::iterator endit = v.end(); while (i != endit && (*i) != candidate) { i++; } if (i == endit) { v.push_back(candidate); } } void BuildRHSNoDirichlet( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemVectorType &b) { KRATOS_TRY //Getting the Elements ElementsArrayType &pElements = rModelPart.Elements(); //getting the array of the conditions ConditionsArrayType &ConditionsArray = rModelPart.Conditions(); ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; // assemble all elements //for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) const int nelements = static_cast<int>(pElements.size()); #pragma omp parallel firstprivate(nelements, RHS_Contribution, EquationId) { #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < nelements; i++) { typename ElementsArrayType::iterator it = pElements.begin() + i; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool element_is_active = true; if ((it)->IsDefined(ACTIVE)) element_is_active = (it)->Is(ACTIVE); if (element_is_active) { //calculate elemental Right Hand Side Contribution pScheme->Calculate_RHS_Contribution(*(it.base()), RHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution AssembleRHS(b, RHS_Contribution, EquationId); } } LHS_Contribution.resize(0, 0, false); RHS_Contribution.resize(0, false); // assemble all conditions //for (typename ConditionsArrayType::ptr_iterator it = ConditionsArray.ptr_begin(); it != ConditionsArray.ptr_end(); ++it) const int nconditions = static_cast<int>(ConditionsArray.size()); //#pragma omp parallel for firstprivate(nconditions, RHS_Contribution, EquationId) schedule(dynamic, 1024) #pragma omp for schedule(guided, 512) for (int i = 0; i < nconditions; i++) { auto it = ConditionsArray.begin() + i; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool condition_is_active = true; if ((it)->IsDefined(ACTIVE)) condition_is_active = (it)->Is(ACTIVE); if (condition_is_active) { //calculate elemental contribution pScheme->Condition_Calculate_RHS_Contribution(*(it.base()), RHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution AssembleRHS(b, RHS_Contribution, EquationId); } } } KRATOS_CATCH("") } //****************************************************************************************** //****************************************************************************************** inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, vector<unsigned int> &partitions) { partitions.resize(number_of_threads + 1); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for (unsigned int i = 1; i < number_of_threads; i++) partitions[i] = partitions[i - 1] + partition_size; } inline void AssembleRowContribution(TSystemMatrixType &A, const Matrix &Alocal, const unsigned int i, const unsigned int i_local, Element::EquationIdVectorType &EquationId) { double *values_vector = A.value_data().begin(); std::size_t *index1_vector = A.index1_data().begin(); std::size_t *index2_vector = A.index2_data().begin(); size_t left_limit = index1_vector[i]; // size_t right_limit = index1_vector[i+1]; //find the first entry size_t last_pos = ForwardFind(EquationId[0], left_limit, index2_vector); size_t last_found = EquationId[0]; #ifndef USE_LOCKS_IN_ASSEMBLY double &r_a = values_vector[last_pos]; const double &v_a = Alocal(i_local, 0); #pragma omp atomic r_a += v_a; #else values_vector[last_pos] += Alocal(i_local, 0); #endif //now find all of the other entries size_t pos = 0; for (unsigned int j = 1; j < EquationId.size(); j++) { unsigned int id_to_find = EquationId[j]; if (id_to_find > last_found) pos = ForwardFind(id_to_find, last_pos + 1, index2_vector); else pos = BackwardFind(id_to_find, last_pos - 1, index2_vector); #ifndef USE_LOCKS_IN_ASSEMBLY double &r = values_vector[pos]; const double &v = Alocal(i_local, j); #pragma omp atomic r += v; #else values_vector[pos] += Alocal(i_local, j); #endif last_found = id_to_find; last_pos = pos; } } inline unsigned int ForwardFind(const unsigned int id_to_find, const unsigned int start, const size_t *index_vector) { unsigned int pos = start; while (id_to_find != index_vector[pos]) pos++; return pos; } inline unsigned int BackwardFind(const unsigned int id_to_find, const unsigned int start, const size_t *index_vector) { unsigned int pos = start; while (id_to_find != index_vector[pos]) pos--; return pos; } ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class NodalResidualBasedBlockBuilderAndSolver */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_NODAL_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER defined */
cpu_ctc.h
#pragma once #include <tuple> #include <cmath> #include <limits> #include <algorithm> #include <numeric> #include <dmlc/omp.h> #include "ctc_helper.h" namespace mxnet_warpctc { template<typename ProbT> class CpuCTC { public: // Noncopyable CpuCTC(int alphabet_size, int minibatch, void* workspace, int blank_label) : alphabet_size_(alphabet_size), minibatch_(minibatch), workspace_(workspace), blank_label_(blank_label) { }; CpuCTC(const CpuCTC&) = delete; CpuCTC& operator=(const CpuCTC&) = delete; ctcStatus_t cost_and_grad(const ProbT* const activations, ProbT *grads, ProbT* costs, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths); ctcStatus_t score_forward(const ProbT* const activations, ProbT* costs, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths); private: class CpuCTC_metadata { private: int setup_labels(const int* const labels, int blank_label, int L, int S); public: CpuCTC_metadata(int L, int S, int T, int mb, int alphabet_size, void* workspace, size_t bytes_used, int blank_label, const int* const labels); ProbT* alphas; ProbT* betas; int* labels_w_blanks; int* e_inc; int* s_inc; ProbT* output; int repeats; }; int alphabet_size_; // Number of characters plus blank int minibatch_; void* workspace_; int blank_label_; void softmax(const ProbT* const activations, ProbT* probs, const int* const input_lengths); std::tuple<ProbT, bool> cost_and_grad_kernel(ProbT *grad, const ProbT* const probs, const int* const labels, int T, int L, int mb, size_t bytes_used); ProbT compute_alphas(const ProbT* probs, int repeats, int S, int T, const int* const e_inc, const int* const s_inc, const int* const labels, ProbT* alphas); ProbT compute_betas_and_grad(ProbT* grad, const ProbT* const probs, ProbT log_partition, int repeats, int S, int T, const int* const e_inc, const int* const s_inc, const int* const labels, ProbT* alphas, ProbT* betas, ProbT* output); }; template<typename ProbT> CpuCTC<ProbT>::CpuCTC_metadata::CpuCTC_metadata(int L, int S, int T, int mb, int alphabet_size, void* workspace, size_t bytes_used, int blank_label, const int* const labels) { alphas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used); bytes_used += sizeof(ProbT) * S * T; std::fill(alphas, alphas + S * T, ctc_helper::neg_inf<ProbT>()); betas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used); bytes_used += sizeof(ProbT) * S; std::fill(betas, betas + S, ctc_helper::neg_inf<ProbT>()); labels_w_blanks = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used); bytes_used += sizeof(int) * S; e_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used); bytes_used += sizeof(int) * S; s_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used); bytes_used += sizeof(int) * S; output = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used); bytes_used += sizeof(ProbT) * alphabet_size; repeats = setup_labels(labels, blank_label, L, S); } template<typename ProbT> int CpuCTC<ProbT>::CpuCTC_metadata::setup_labels(const int* const labels, int blank_label, int L, int S) { int e_counter = 0; int s_counter = 0; s_inc[s_counter++] = 1; int repeats = 0; for (int i = 1; i < L; ++i) { if (labels[i-1] == labels[i]) { s_inc[s_counter++] = 1; s_inc[s_counter++] = 1; e_inc[e_counter++] = 1; e_inc[e_counter++] = 1; ++repeats; } else { s_inc[s_counter++] = 2; e_inc[e_counter++] = 2; } } e_inc[e_counter++] = 1; for (int i = 0; i < L; ++i) { labels_w_blanks[2 * i] = blank_label; labels_w_blanks[2 * i + 1] = labels[i]; } labels_w_blanks[S - 1] = blank_label; return repeats; } template<typename ProbT> void CpuCTC<ProbT>::softmax(const ProbT* const activations, ProbT* probs, const int* const input_lengths) { #pragma omp parallel for for (int mb = 0; mb < minibatch_; ++mb) { for(int c = 0; c < input_lengths[mb]; ++c) { int col_offset = (mb + minibatch_ * c) * alphabet_size_; ProbT max_activation = -std::numeric_limits<ProbT>::infinity(); for(int r = 0; r < alphabet_size_; ++r) max_activation = std::max(max_activation, activations[r + col_offset]); ProbT denom = ProbT(0.); for(int r = 0; r < alphabet_size_; ++r) { probs[r + col_offset] = std::exp(activations[r + col_offset] - max_activation); denom += probs[r + col_offset]; } for(int r = 0; r < alphabet_size_; ++r) { probs[r + col_offset] /= denom; } } } } template<typename ProbT> std::tuple<ProbT, bool> CpuCTC<ProbT>::cost_and_grad_kernel(ProbT *grad, const ProbT* const probs, const int* const labels, int T, int L, int mb, size_t bytes_used) { const int S = 2*L + 1; // Number of labels with blanks CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_, bytes_used, blank_label_, labels); bool over_threshold = false; if (L + ctcm.repeats > T) { return std::make_tuple(ProbT(0), over_threshold); // TODO, not right to return 0 } ProbT llForward = compute_alphas(probs, ctcm.repeats, S, T, ctcm.e_inc, ctcm.s_inc, ctcm.labels_w_blanks, ctcm.alphas); ProbT llBackward = compute_betas_and_grad(grad, probs, llForward, ctcm.repeats, S, T, ctcm.e_inc, ctcm.s_inc, ctcm.labels_w_blanks, ctcm.alphas, ctcm.betas, ctcm.output); ProbT diff = std::abs(llForward - llBackward); if (diff > ctc_helper::threshold) { over_threshold = true; } return std::make_tuple(-llForward, over_threshold); } // Computes forward probabilities template<typename ProbT> ProbT CpuCTC<ProbT>::compute_alphas(const ProbT* probs, int repeats, int S, int T, const int* const e_inc, const int* const s_inc, const int* const labels, ProbT* alphas) { int start = (((S /2) + repeats - T) < 0) ? 0 : 1, end = S > 1 ? 2 : 1; for (int i = start; i < end; ++i) { alphas[i] = std::log(probs[labels[i]]); } for(int t = 1; t < T; ++t) { int remain = (S / 2) + repeats - (T - t); if(remain >= 0) start += s_inc[remain]; if(t <= (S / 2) + repeats) end += e_inc[t - 1]; int startloop = start; int idx1 = t * S, idx2 = (t - 1) * S, idx3 = t * (alphabet_size_ * minibatch_); if (start == 0) { alphas[idx1] = alphas[idx2] + std::log(probs[blank_label_ + idx3]); startloop += 1; } for(int i = startloop; i < end; ++i) { ProbT prev_sum = ctc_helper::log_plus<ProbT>()(alphas[i + idx2], alphas[(i-1) + idx2]); // Skip two if not on blank and not on repeat. if (labels[i] != blank_label_ && i != 1 && labels[i] != labels[i-2]) prev_sum = ctc_helper::log_plus<ProbT>()(prev_sum, alphas[(i-2) + idx2]); alphas[i + idx1] = prev_sum + std::log(probs[labels[i] + idx3]); } } ProbT loglike = ctc_helper::neg_inf<ProbT>(); for(int i = start; i < end; ++i) { loglike = ctc_helper::log_plus<ProbT>()(loglike, alphas[i + (T - 1) * S]); } return loglike; } // Starting from T, we sweep backward over the alpha array computing one column // of betas as we go. At each position we can update product alpha * beta and then // sum into the gradient associated with each label. // NOTE computes gradient w.r.t UNNORMALIZED final layer activations. // Assumed passed in grads are already zeroed! template<typename ProbT> ProbT CpuCTC<ProbT>::compute_betas_and_grad(ProbT* grad, const ProbT* const probs, ProbT log_partition, int repeats, int S, int T, const int* const e_inc, const int* const s_inc, const int* const labels, ProbT* alphas, ProbT* betas, ProbT* output) { int start = S > 1 ? (S - 2) : 0, end = (T > (S / 2) + repeats) ? S : S-1; std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>()); //set the starting values in the beta column at the very right edge for (int i = start; i < end; ++i) { betas[i] = std::log(probs[labels[i] + (T - 1) * (alphabet_size_ * minibatch_)]); //compute alpha * beta in log space at this position in (S, T) space alphas[i + (T - 1) * S] += betas[i]; //update the gradient associated with this label //essentially performing a reduce-by-key in a sequential manner output[labels[i]] = ctc_helper::log_plus<ProbT>()(alphas[i + (T - 1) * S], output[labels[i]]); } //update the gradient wrt to each unique label for (int i = 0; i < alphabet_size_; ++i) { int idx3 = (T - 1) * alphabet_size_ * minibatch_ + i; if (output[i] == 0.0 || output[i] == ctc_helper::neg_inf<ProbT>() || probs[idx3] == 0.0) { grad[idx3] = probs[idx3]; } else { grad[idx3] = probs[idx3] - std::exp(output[i] - std::log(probs[idx3]) - log_partition); } } //loop from the second to last column all the way to the left for(int t = T - 2; t >= 0; --t) { int remain = (S / 2) + repeats - (T - t); if(remain >= -1) start -= s_inc[remain + 1]; if(t < (S / 2) + repeats) end -= e_inc[t]; int endloop = end == S ? end - 1 : end; int idx1 = t * S, idx3 = t * (alphabet_size_ * minibatch_); std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>()); for(int i = start; i < endloop; ++i) { ProbT next_sum = ctc_helper::log_plus<ProbT>()(betas[i], betas[(i+1)]); // Skip two if not on blank and not on repeat. if (labels[i] != blank_label_ && i != (S-2) && labels[i] != labels[i+2]){ next_sum = ctc_helper::log_plus<ProbT>()(next_sum, betas[(i+2)]); } betas[i] = next_sum + std::log(probs[labels[i] + idx3]); //compute alpha * beta in log space alphas[i + idx1] += betas[i]; //update the gradient associated with this label output[labels[i]] = ctc_helper::log_plus<ProbT>()(alphas[i + idx1], output[labels[i]]); } if (end == S) { betas[(S-1)] = betas[(S-1)] + std::log(probs[blank_label_ + idx3]); alphas[(S-1) + idx1] += betas[(S-1)]; output[labels[S-1]] = ctc_helper::log_plus<ProbT>()(alphas[S-1 + idx1], output[labels[S-1]]); } //go over the unique labels and compute the final grad // wrt to each one at this time step for (int i = 0; i < alphabet_size_; ++i) { if (output[i] == 0.0 || output[i] == ctc_helper::neg_inf<ProbT>() || probs[idx3] == 0.0) { grad[idx3] = probs[idx3]; } else { grad[idx3] = probs[idx3] - std::exp(output[i] - std::log(probs[idx3]) - log_partition); } ++idx3; } } ProbT loglike = ctc_helper::neg_inf<ProbT>(); for(int i = start; i < end; ++i) { loglike = ctc_helper::log_plus<ProbT>()(loglike, betas[i]); } return loglike; } template<typename ProbT> ctcStatus_t CpuCTC<ProbT>::cost_and_grad(const ProbT* const activations, ProbT *grads, ProbT *costs, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths) { if (activations == nullptr || grads == nullptr || costs == nullptr || flat_labels == nullptr || label_lengths == nullptr || input_lengths == nullptr ) return CTC_STATUS_INVALID_VALUE; ProbT* probs = static_cast<ProbT *>(workspace_); int maxT = *std::max_element(input_lengths, input_lengths + minibatch_); size_t bytes_used = sizeof(ProbT) * minibatch_ * alphabet_size_ * maxT; //per minibatch memory size_t per_minibatch_bytes = 0; int maxL = *std::max_element(label_lengths, label_lengths + minibatch_);; int maxS = 2 * maxL + 1; //output per_minibatch_bytes += sizeof(float) * alphabet_size_; //alphas per_minibatch_bytes += sizeof(float) * maxS * maxT; //betas per_minibatch_bytes += sizeof(float) * maxS; //labels w/blanks, e_inc, s_inc per_minibatch_bytes += 3 * sizeof(int) * maxS; softmax(activations, probs, input_lengths); #pragma omp parallel for for (int mb = 0; mb < minibatch_; ++mb) { const int T = input_lengths[mb]; // Length of utterance (time) const int L = label_lengths[mb]; // Number of labels in transcription bool mb_status; std::tie(costs[mb], mb_status) = cost_and_grad_kernel(grads + mb * alphabet_size_, probs + mb * alphabet_size_, flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0), T, L, mb, bytes_used + mb * per_minibatch_bytes); } return CTC_STATUS_SUCCESS; } template<typename ProbT> ctcStatus_t CpuCTC<ProbT>::score_forward(const ProbT* const activations, ProbT* costs, const int* const flat_labels, const int* const label_lengths, const int* const input_lengths) { if (activations == nullptr || costs == nullptr || flat_labels == nullptr || label_lengths == nullptr || input_lengths == nullptr ) return CTC_STATUS_INVALID_VALUE; ProbT* probs = static_cast<ProbT *>(workspace_); int maxT = *std::max_element(input_lengths, input_lengths + minibatch_); size_t bytes_used = sizeof(ProbT) * minibatch_ * alphabet_size_ * maxT; //per minibatch memory size_t per_minibatch_bytes = 0; int maxL = *std::max_element(label_lengths, label_lengths + minibatch_); int maxS = 2 * maxL + 1; //output per_minibatch_bytes += sizeof(float) * alphabet_size_; //alphas per_minibatch_bytes += sizeof(float) * maxS * maxT; //betas per_minibatch_bytes += sizeof(float) * maxS; //labels w/blanks, e_inc, s_inc per_minibatch_bytes += 3 * sizeof(int) * maxS; softmax(activations, probs, input_lengths); #pragma omp parallel for for (int mb = 0; mb < minibatch_; ++mb) { const int T = input_lengths[mb]; // Length of utterance (time) const int L = label_lengths[mb]; // Number of labels in transcription const int S = 2*L + 1; // Number of labels with blanks CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_, bytes_used + mb * per_minibatch_bytes, blank_label_, flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0)); if (L + ctcm.repeats > T) costs[mb] = ProbT(0); else { costs[mb] = -compute_alphas(probs + mb * alphabet_size_, ctcm.repeats, S, T, ctcm.e_inc, ctcm.s_inc, ctcm.labels_w_blanks, ctcm.alphas); } } return CTC_STATUS_SUCCESS; } } // mxnet_warpctc
mpncbo.c
/* $Header$ */ /* mpncbo -- netCDF binary operator */ /* Purpose: Compute sum, difference, product, or ratio of specified hyperslabs of specfied variables from two input netCDF files and output them to a single file. */ /* Copyright (C) 1995--present Charlie Zender This file is part of NCO, the netCDF Operators. NCO is free software. You may redistribute and/or modify NCO under the terms of the 3-Clause BSD License. You are permitted to link NCO with the HDF, netCDF, OPeNDAP, and UDUnits libraries and to distribute the resulting executables under the terms of the BSD, but in addition obeying the extra stipulations of the HDF, netCDF, OPeNDAP, and UDUnits licenses. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 3-Clause BSD License for more details. The original author of this software, Charlie Zender, seeks to improve it with your suggestions, contributions, bug-reports, and patches. Please contact the NCO project at http://nco.sf.net or write to Charlie Zender Department of Earth System Science University of California, Irvine Irvine, CA 92697-3100 */ /* Usage: mpncbo -O -p ~/nco/data in.nc in.nc ~/foo.nc mpncbo -O -v mss_val in.nc in.nc ~/foo.nc mpncbo -p /data/zender/tmp h0001.nc ~/foo.nc mpncbo -p /data/zender/tmp -l /data/zender/tmp/rmt h0001.nc h0002.nc ~/foo.nc mpncbo -p /ZENDER/tmp -l /data/zender/tmp/rmt h0001.nc h0002.nc ~/foo.nc mpncbo -p /ZENDER/tmp -l /usr/tmp/zender h0001.nc h0002.nc ~/foo.nc Test type conversion: ncks -O -C -v float_var in.nc foo1.nc ncrename -v float_var,double_var foo1.nc ncks -O -C -v double_var in.nc foo2.nc mpncbo -O -C -v double_var foo1.nc foo2.nc foo3.nc mpncbo -O -C -v double_var foo2.nc foo1.nc foo4.nc ncks -H -m foo1.nc ncks -H -m foo2.nc ncks -H -m foo3.nc ncks -H -m foo4.nc Test nco_var_cnf_dmn: ncks -O -v scalar_var in.nc ~/foo.nc ; ncrename -v scalar_var,four_dmn_rec_var foo.nc ; mpncbo -O -v four_dmn_rec_var in.nc ~/foo.nc foo2.nc */ #ifdef HAVE_CONFIG_H # include <config.h> /* Autotools tokens */ #endif /* !HAVE_CONFIG_H */ /* Standard C headers */ #include <math.h> /* sin cos cos sin 3.14159 */ #include <stdio.h> /* stderr, FILE, NULL, etc. */ #include <stdlib.h> /* atof, atoi, malloc, getopt */ #include <string.h> /* strcmp() */ #include <sys/stat.h> /* stat() */ #include <time.h> /* machine time */ #include <unistd.h> /* POSIX stuff */ #ifndef HAVE_GETOPT_LONG # include "nco_getopt.h" #else /* HAVE_GETOPT_LONG */ # ifdef HAVE_GETOPT_H # include <getopt.h> # endif /* !HAVE_GETOPT_H */ #endif /* HAVE_GETOPT_LONG */ /* 3rd party vendors */ #include <netcdf.h> /* netCDF definitions and C library */ #ifdef ENABLE_MPI # include <mpi.h> /* MPI definitions */ # include "nco_mpi.h" /* MPI utilities */ #endif /* !ENABLE_MPI */ /* Personal headers */ /* #define MAIN_PROGRAM_FILE MUST precede #include libnco.h */ #define MAIN_PROGRAM_FILE #include "libnco.h" /* netCDF Operator (NCO) library */ int main(int argc,char **argv) { char **fl_lst_abb=NULL; /* Option a */ char **fl_lst_in; char **gaa_arg=NULL; /* [sng] Global attribute arguments */ char **var_lst_in=NULL_CEWI; char *aux_arg[NC_MAX_DIMS]; char *cmd_ln; char *cnk_arg[NC_MAX_DIMS]; char *cnk_map_sng=NULL_CEWI; /* [sng] Chunking map */ char *cnk_plc_sng=NULL_CEWI; /* [sng] Chunking policy */ char *fl_in_1=NULL; /* fl_in_1 is nco_realloc'd when not NULL */ char *fl_in_2=NULL; /* fl_in_2 is nco_realloc'd when not NULL */ char *fl_out=NULL; /* Option o */ char *fl_out_tmp=NULL; /* MPI CEWI */ char *fl_pth=NULL; /* Option p */ char *fl_pth_lcl=NULL; /* Option l */ char *lmt_arg[NC_MAX_DIMS]; char *nco_op_typ_sng=NULL; /* [sng] Operation type */ char *opt_crr=NULL; /* [sng] String representation of current long-option name */ char *optarg_lcl=NULL; /* [sng] Local copy of system optarg */ char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */ const char * const CVS_Id="$Id$"; const char * const CVS_Revision="$Revision$"; const char * const opt_sht_lst="34567ACcD:d:FhL:l:Oo:p:rRSt:v:X:xy:-:"; cnk_dmn_sct **cnk_dmn=NULL_CEWI; #if defined(__cplusplus) || defined(PGI_CC) ddra_info_sct ddra_info; ddra_info.flg_ddra=False; #else /* !__cplusplus */ ddra_info_sct ddra_info={.MRV_flg=False,.flg_ddra=False,.lmn_nbr=0LL,.lmn_nbr_avg=0LL,.lmn_nbr_wgt=0LL,.nco_op_typ=nco_op_nil,.rnk_avg=0,.rnk_var=0,.rnk_wgt=0,.tmr_flg=nco_tmr_srt,.var_idx=0,.wgt_brd_flg=False,.wrd_sz=0}; #endif /* !__cplusplus */ dmn_sct **dim_1; dmn_sct **dim_2; dmn_sct **dmn_out; extern char *optarg; extern int optind; /* Using naked stdin/stdout/stderr in parallel region generates warning Copy appropriate filehandle to variable scoped shared in parallel clause */ FILE * const fp_stderr=stderr; /* [fl] stderr filehandle CEWI */ FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */ int *in_id_1_arr; int *in_id_2_arr; int abb_arg_nbr=0; int aux_nbr=0; /* [nbr] Number of auxiliary coordinate hyperslabs specified */ int cnk_map=nco_cnk_map_nil; /* [enm] Chunking map */ int cnk_nbr=0; /* [nbr] Number of chunk sizes */ int cnk_plc=nco_cnk_plc_nil; /* [enm] Chunking policy */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int fl_idx; int fl_nbr=0; int fl_in_fmt_1; /* [enm] Input file format */ int fl_in_fmt_2; /* [enm] Input file format */ int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */ int fll_md_old; /* [enm] Old fill mode */ int gaa_nbr=0; /* [nbr] Number of global attributes to add */ int idx; int jdx; int dmn_idx; int dmn_jdx; int in_id_1; int in_id_2; int lmt_nbr=0; /* Option d. NB: lmt_nbr gets incremented */ int log_lvl=0; /* [enm] netCDF library debugging verbosity [0..5] */ int md_open; /* [enm] Mode flag for nc_open() call */ int nbr_dmn_fl_1; int nbr_dmn_fl_2; int nbr_dmn_xtr_1; int nbr_dmn_xtr_2; int nbr_var_fix_1; /* nbr_var_fix_1 gets incremented */ int nbr_var_fix_2; /* nbr_var_fix_2 gets incremented */ int nbr_var_fl_1; int nbr_var_fl_2; int nbr_var_prc_1; /* nbr_var_prc_1 gets incremented */ int nbr_var_prc_2; /* nbr_var_prc_2 gets incremented */ int xtr_nbr_1=0; /* xtr_nbr_1 won't otherwise be set for -c with no -v */ int xtr_nbr_2=0; /* xtr_nbr_2 won't otherwise be set for -c with no -v */ int nco_op_typ=nco_op_nil; /* [enm] Operation type */ int opt; int out_id; int rcd=NC_NOERR; /* [rcd] Return code */ int thr_idx; /* [idx] Index of current thread */ int thr_nbr=int_CEWI; /* [nbr] Thread number Option t */ int var_lst_in_nbr=0; lmt_sct **aux=NULL_CEWI; /* Auxiliary coordinate limits */ lmt_sct **lmt=NULL_CEWI; lmt_all_sct **lmt_all_lst=NULL_CEWI; /* List of *lmt_all structures */ cnv_sct *cnv; /* [sct] Convention structure */ nco_bool EXCLUDE_INPUT_LIST=False; /* Option c */ nco_bool EXTRACT_ALL_COORDINATES=False; /* Option c */ nco_bool EXTRACT_ASSOCIATED_COORDINATES=True; /* Option C */ nco_bool FILE_1_RETRIEVED_FROM_REMOTE_LOCATION; nco_bool FILE_2_RETRIEVED_FROM_REMOTE_LOCATION; nco_bool FL_LST_IN_FROM_STDIN=False; /* [flg] fl_lst_in comes from stdin */ nco_bool FORCE_APPEND=False; /* Option A */ nco_bool FORCE_OVERWRITE=False; /* Option O */ nco_bool FORTRAN_IDX_CNV=False; /* Option F */ nco_bool HISTORY_APPEND=True; /* Option h */ nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool MSA_USR_RDR=False; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order*/ nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_CREATE=False; /* [flg] Create (netCDF3-only) file(s) with unbuffered I/O */ nco_bool SHARE_OPEN=False; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ nco_bool WRT_TMP_FL=True; /* [flg] Write output to temporary file */ nco_bool flg_mmr_cln=False; /* [flg] Clean memory prior to exit */ nco_bool flg_ddra=False; /* [flg] DDRA diagnostics */ nm_id_sct *dmn_lst_1; nm_id_sct *dmn_lst_2; nm_id_sct *xtr_lst_1=NULL; /* xtr_lst_1 may be alloc()'d from NULL with -c option */ nm_id_sct *xtr_lst_2=NULL; /* xtr_lst_2 may be alloc()'d from NULL with -c option */ size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ size_t cnk_csh_byt=NCO_CNK_CSH_BYT_DFL; /* [B] Chunk cache size */ size_t cnk_min_byt=NCO_CNK_SZ_MIN_BYT_DFL; /* [B] Minimize size of variable to chunk */ size_t cnk_sz_byt=0UL; /* [B] Chunk size in bytes */ size_t cnk_sz_scl=0UL; /* [nbr] Chunk size scalar */ size_t hdr_pad=0UL; /* [B] Pad at end of header section */ var_sct **var_1; var_sct **var_2; var_sct **var_fix_1; var_sct **var_fix_2; var_sct **var_fix_out; var_sct **var_out; var_sct **var_prc_1; var_sct **var_prc_2; var_sct **var_prc_out; #ifdef ENABLE_MPI /* Declare all MPI-specific variables here */ MPI_Status mpi_stt; /* [enm] Status check to decode msg_tag_typ */ nco_bool TKN_WRT_FREE=True; /* [flg] Write-access to output file is available */ int fl_nm_lng; /* [nbr] Output file name length CEWI */ int msg_bfr[msg_bfr_lng]; /* [bfr] Buffer containing var, idx, tkn_wrt_rsp */ int msg_tag_typ; /* [enm] MPI message tag type */ int prc_rnk; /* [idx] Process rank */ int prc_nbr=0; /* [nbr] Number of MPI processes */ int tkn_wrt_rsp; /* [enm] Response to request for write token */ int var_wrt_nbr=0; /* [nbr] Variables written to output file until now */ int rnk_wrk; /* [idx] Worker rank */ int wrk_id_bfr[wrk_id_bfr_lng]; /* [bfr] Buffer for rnk_wrk */ #endif /* !ENABLE_MPI */ static struct option opt_lng[]={ /* Structure ordered by short option key if possible */ /* Long options with no argument, no short option counterpart */ {"clean",no_argument,0,0}, /* [flg] Clean memory prior to exit */ {"mmr_cln",no_argument,0,0}, /* [flg] Clean memory prior to exit */ {"drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"dirty",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"mmr_drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"ddra",no_argument,0,0}, /* [flg] DDRA diagnostics */ {"mdl_cmp",no_argument,0,0}, /* [flg] DDRA diagnostics */ {"msa_usr_rdr",no_argument,0,0}, /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ {"msa_user_order",no_argument,0,0}, /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ {"ram_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) in RAM */ {"create_ram",no_argument,0,0}, /* [flg] Create file in RAM */ {"open_ram",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) in RAM */ {"diskless_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) in RAM */ {"share_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"create_share",no_argument,0,0}, /* [flg] Create (netCDF3) file(s) with unbuffered I/O */ {"open_share",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) with unbuffered I/O */ {"unbuffered_io",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"uio",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"wrt_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */ {"write_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */ {"no_tmp_fl",no_argument,0,0}, /* [flg] Do not write output to temporary file */ {"version",no_argument,0,0}, {"vrs",no_argument,0,0}, /* Long options with argument, no short option counterpart */ {"bfr_sz_hnt",required_argument,0,0}, /* [B] Buffer size hint */ {"buffer_size_hint",required_argument,0,0}, /* [B] Buffer size hint */ {"cnk_byt",required_argument,0,0}, /* [B] Chunk size in bytes */ {"chunk_byte",required_argument,0,0}, /* [B] Chunk size in bytes */ {"cnk_dmn",required_argument,0,0}, /* [nbr] Chunk size */ {"chunk_dimension",required_argument,0,0}, /* [nbr] Chunk size */ {"cnk_map",required_argument,0,0}, /* [nbr] Chunking map */ {"chunk_map",required_argument,0,0}, /* [nbr] Chunking map */ {"cnk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */ {"chunk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */ {"cnk_plc",required_argument,0,0}, /* [nbr] Chunking policy */ {"chunk_policy",required_argument,0,0}, /* [nbr] Chunking policy */ {"cnk_scl",required_argument,0,0}, /* [nbr] Chunk size scalar */ {"chunk_scalar",required_argument,0,0}, /* [nbr] Chunk size scalar */ {"fl_fmt",required_argument,0,0}, {"file_format",required_argument,0,0}, {"gaa",required_argument,0,0}, /* [sng] Global attribute add */ {"glb_att_add",required_argument,0,0}, /* [sng] Global attribute add */ {"hdr_pad",required_argument,0,0}, {"header_pad",required_argument,0,0}, {"log_lvl",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */ {"log_level",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */ /* Long options with short counterparts */ {"3",no_argument,0,'3'}, {"4",no_argument,0,'4'}, {"netcdf4",no_argument,0,'4'}, {"5",no_argument,0,'5'}, {"64bit_data",no_argument,0,'5'}, {"cdf5",no_argument,0,'5'}, {"pnetcdf",no_argument,0,'5'}, {"64bit_offset",no_argument,0,'6'}, {"7",no_argument,0,'7'}, {"append",no_argument,0,'A'}, {"coords",no_argument,0,'c'}, {"crd",no_argument,0,'c'}, {"xtr_ass_var",no_argument,0,'c'}, {"xcl_ass_var",no_argument,0,'C'}, {"no_coords",no_argument,0,'C'}, {"no_crd",no_argument,0,'C'}, {"dbg_lvl",required_argument,0,'D'}, {"debug",required_argument,0,'D'}, {"nco_dbg_lvl",required_argument,0,'D'}, {"dimension",required_argument,0,'d'}, {"dmn",required_argument,0,'d'}, {"fortran",no_argument,0,'F'}, {"ftn",no_argument,0,'F'}, {"history",no_argument,0,'h'}, {"hst",no_argument,0,'h'}, {"dfl_lvl",required_argument,0,'L'}, /* [enm] Deflate level */ {"deflate",required_argument,0,'L'}, /* [enm] Deflate level */ {"local",required_argument,0,'l'}, {"lcl",required_argument,0,'l'}, {"overwrite",no_argument,0,'O'}, {"ovr",no_argument,0,'O'}, {"path",required_argument,0,'p'}, {"retain",no_argument,0,'R'}, {"rtn",no_argument,0,'R'}, {"revision",no_argument,0,'r'}, {"suspend", no_argument,0,'S'}, {"thr_nbr",required_argument,0,'t'}, {"threads",required_argument,0,'t'}, {"omp_num_threads",required_argument,0,'t'}, {"variable",required_argument,0,'v'}, {"auxiliary",required_argument,0,'X'}, {"exclude",no_argument,0,'x'}, {"xcl",no_argument,0,'x'}, {"operation",required_argument,0,'y'}, {"op_typ",required_argument,0,'y'}, {"help",no_argument,0,'?'}, {"hlp",no_argument,0,'?'}, {0,0,0,0} }; /* end opt_lng */ int opt_idx=0; /* Index of current long option into opt_lng array */ #ifdef ENABLE_MPI /* MPI Initialization */ MPI_Init(&argc,&argv); MPI_Comm_size(MPI_COMM_WORLD,&prc_nbr); MPI_Comm_rank(MPI_COMM_WORLD,&prc_rnk); #endif /* !ENABLE_MPI */ /* Start timer and save command line */ ddra_info.tmr_flg=nco_tmr_srt; rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); ddra_info.tmr_flg=nco_tmr_mtd; cmd_ln=nco_cmd_ln_sng(argc,argv); /* Get program name and set program enum (e.g., nco_prg_id=ncra) */ nco_prg_nm=nco_prg_prs(argv[0],&nco_prg_id); /* Parse command line arguments */ while(1){ /* getopt_long_only() allows one dash to prefix long options */ opt=getopt_long(argc,argv,opt_sht_lst,opt_lng,&opt_idx); /* NB: access to opt_crr is only valid when long_opt is detected */ if(opt == EOF) break; /* Parse positional arguments once getopt_long() returns EOF */ opt_crr=(char *)strdup(opt_lng[opt_idx].name); /* Process long options without short option counterparts */ if(opt == 0){ if(!strcmp(opt_crr,"bfr_sz_hnt") || !strcmp(opt_crr,"buffer_size_hint")){ bfr_sz_hnt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_byt") || !strcmp(opt_crr,"chunk_byte")){ cnk_sz_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_byt */ if(!strcmp(opt_crr,"cnk_min") || !strcmp(opt_crr,"chunk_min")){ cnk_min_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_min */ if(!strcmp(opt_crr,"cnk_dmn") || !strcmp(opt_crr,"chunk_dimension")){ /* Copy limit argument for later processing */ cnk_arg[cnk_nbr]=(char *)strdup(optarg); cnk_nbr++; } /* endif cnk */ if(!strcmp(opt_crr,"cnk_scl") || !strcmp(opt_crr,"chunk_scalar")){ cnk_sz_scl=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_map") || !strcmp(opt_crr,"chunk_map")){ /* Chunking map */ cnk_map_sng=(char *)strdup(optarg); cnk_map=nco_cnk_map_get(cnk_map_sng); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_plc") || !strcmp(opt_crr,"chunk_policy")){ /* Chunking policy */ cnk_plc_sng=(char *)strdup(optarg); cnk_plc=nco_cnk_plc_get(cnk_plc_sng); } /* endif cnk */ if(!strcmp(opt_crr,"mmr_cln") || !strcmp(opt_crr,"clean")) flg_mmr_cln=True; /* [flg] Clean memory prior to exit */ if(!strcmp(opt_crr,"drt") || !strcmp(opt_crr,"mmr_drt") || !strcmp(opt_crr,"dirty")) flg_mmr_cln=False; /* [flg] Clean memory prior to exit */ if(!strcmp(opt_crr,"ddra") || !strcmp(opt_crr,"mdl_cmp")) ddra_info.flg_ddra=flg_ddra=True; /* [flg] DDRA diagnostics */ if(!strcmp(opt_crr,"fl_fmt") || !strcmp(opt_crr,"file_format")) rcd=nco_create_mode_prs(optarg,&fl_out_fmt); if(!strcmp(opt_crr,"gaa") || !strcmp(opt_crr,"glb_att_add")){ gaa_arg=(char **)nco_realloc(gaa_arg,(gaa_nbr+1)*sizeof(char *)); gaa_arg[gaa_nbr++]=(char *)strdup(optarg); } /* endif gaa */ if(!strcmp(opt_crr,"hdr_pad") || !strcmp(opt_crr,"header_pad")){ hdr_pad=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif "hdr_pad" */ if(!strcmp(opt_crr,"log_lvl") || !strcmp(opt_crr,"log_level")){ log_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); nc_set_log_level(log_lvl); } /* !log_lvl */ if(!strcmp(opt_crr,"msa_usr_rdr") || !strcmp(opt_crr,"msa_user_order")) MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"create_ram") || !strcmp(opt_crr,"diskless_all")) RAM_CREATE=True; /* [flg] Create (netCDF3) file(s) in RAM */ if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"open_ram") || !strcmp(opt_crr,"diskless_all")) RAM_OPEN=True; /* [flg] Open (netCDF3) file(s) in RAM */ if(!strcmp(opt_crr,"share_all") || !strcmp(opt_crr,"unbuffered_io") || !strcmp(opt_crr,"uio") || !strcmp(opt_crr,"create_share")) SHARE_CREATE=True; /* [flg] Create (netCDF3) file(s) with unbuffered I/O */ if(!strcmp(opt_crr,"share_all") || !strcmp(opt_crr,"unbuffered_io") || !strcmp(opt_crr,"uio") || !strcmp(opt_crr,"open_share")) SHARE_OPEN=True; /* [flg] Open (netCDF3) file(s) with unbuffered I/O */ if(!strcmp(opt_crr,"vrs") || !strcmp(opt_crr,"version")){ (void)nco_vrs_prn(CVS_Id,CVS_Revision); nco_exit(EXIT_SUCCESS); } /* endif "vrs" */ if(!strcmp(opt_crr,"wrt_tmp_fl") || !strcmp(opt_crr,"write_tmp_fl")) WRT_TMP_FL=True; if(!strcmp(opt_crr,"no_tmp_fl")) WRT_TMP_FL=False; } /* opt != 0 */ /* Process short options */ switch(opt){ case 0: /* Long options have already been processed, return */ break; case '3': /* Request netCDF3 output storage format */ fl_out_fmt=NC_FORMAT_CLASSIC; break; case '4': /* Request netCDF4 output storage format */ fl_out_fmt=NC_FORMAT_NETCDF4; break; case '5': /* Request netCDF3 64-bit offset+data storage (i.e., pnetCDF) format */ fl_out_fmt=NC_FORMAT_CDF5; break; case '6': /* Request netCDF3 64-bit offset output storage format */ fl_out_fmt=NC_FORMAT_64BIT_OFFSET; break; case '7': /* Request netCDF4-classic output storage format */ fl_out_fmt=NC_FORMAT_NETCDF4_CLASSIC; break; case 'A': /* Toggle FORCE_APPEND */ FORCE_APPEND=!FORCE_APPEND; break; case 'C': /* Extract all coordinates associated with extracted variables? */ EXTRACT_ASSOCIATED_COORDINATES=False; break; case 'c': EXTRACT_ALL_COORDINATES=True; break; case 'D': /* The debugging level. Default is 0. */ nco_dbg_lvl=(unsigned short int)strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); break; case 'd': /* Copy limit argument for later processing */ lmt_arg[lmt_nbr]=(char *)strdup(optarg); lmt_nbr++; break; case 'F': /* Toggle index convention. Default is 0-based arrays (C-style). */ FORTRAN_IDX_CNV=!FORTRAN_IDX_CNV; break; case 'h': /* Toggle appending to history global attribute */ HISTORY_APPEND=!HISTORY_APPEND; break; case 'L': /* [enm] Deflate level. Default is 0. */ dfl_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); break; case 'l': /* Local path prefix for files retrieved from remote file system */ fl_pth_lcl=(char *)strdup(optarg); break; case 'O': /* Toggle FORCE_OVERWRITE */ FORCE_OVERWRITE=!FORCE_OVERWRITE; break; case 'o': /* Name of output file */ fl_out=(char *)strdup(optarg); break; case 'p': /* Common file path */ fl_pth=(char *)strdup(optarg); break; case 'R': /* Toggle removal of remotely-retrieved-files. Default is True. */ RM_RMT_FL_PST_PRC=!RM_RMT_FL_PST_PRC; break; case 'r': /* Print CVS program information and copyright notice */ (void)nco_vrs_prn(CVS_Id,CVS_Revision); (void)nco_lbr_vrs_prn(); (void)nco_cpy_prn(); (void)nco_cnf_prn(); nco_exit(EXIT_SUCCESS); break; #ifdef ENABLE_MPI case 'S': /* Suspend with signal handler to facilitate debugging */ if(signal(SIGUSR1,nco_cnt_run) == SIG_ERR) (void)fprintf(fp_stdout,"%s: ERROR Could not install suspend handler.\n",nco_prg_nm); while(!nco_spn_lck_brk) usleep(nco_spn_lck_us); /* Spinlock. fxm: should probably insert a sched_yield */ break; #endif /* !ENABLE_MPI */ case 't': /* Thread number */ thr_nbr=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); break; case 'v': /* Variables to extract/exclude */ /* Replace commas with hashes when within braces (convert back later) */ optarg_lcl=(char *)strdup(optarg); (void)nco_rx_comma2hash(optarg_lcl); var_lst_in=nco_lst_prs_2D(optarg_lcl,",",&var_lst_in_nbr); optarg_lcl=(char *)nco_free(optarg_lcl); xtr_nbr_1=xtr_nbr_2=var_lst_in_nbr; break; case 'X': /* Copy auxiliary coordinate argument for later processing */ aux_arg[aux_nbr]=(char *)strdup(optarg); aux_nbr++; MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ break; case 'x': /* Exclude rather than extract variables specified with -v */ EXCLUDE_INPUT_LIST=True; break; case 'y': /* User-specified operation type overrides invocation default */ nco_op_typ_sng=(char *)strdup(optarg); nco_op_typ=nco_op_typ_get(nco_op_typ_sng); break; case '?': /* Print proper usage */ (void)nco_usg_prn(); nco_exit(EXIT_SUCCESS); break; case '-': /* Long options are not allowed */ (void)fprintf(stderr,"%s: ERROR Long options are not available in this build. Use single letter options instead.\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); break; default: /* Print proper usage */ (void)fprintf(stdout,"%s ERROR in command-line syntax/options. Please reformulate command accordingly.\n",nco_prg_nm_get()); (void)nco_usg_prn(); nco_exit(EXIT_FAILURE); break; } /* end switch */ if(opt_crr) opt_crr=(char *)nco_free(opt_crr); } /* end while loop */ /* Process positional arguments and fill-in filenames */ fl_lst_in=nco_fl_lst_mk(argv,argc,optind,&fl_nbr,&fl_out,&FL_LST_IN_FROM_STDIN,FORCE_OVERWRITE); /* Make uniform list of user-specified chunksizes */ if(cnk_nbr > 0) cnk_dmn=nco_cnk_prs(cnk_nbr,cnk_arg); /* Make uniform list of user-specified dimension limits */ lmt=nco_lmt_prs(lmt_nbr,lmt_arg); /* Initialize thread information */ thr_nbr=nco_openmp_ini(thr_nbr); in_id_1_arr=(int *)nco_malloc(thr_nbr*sizeof(int)); in_id_2_arr=(int *)nco_malloc(thr_nbr*sizeof(int)); /* Parse filenames */ fl_idx=0; /* Input file _1 */ fl_in_1=nco_fl_nm_prs(fl_in_1,fl_idx,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth); if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"%s: INFO Input file %d is %s",nco_prg_nm_get(),fl_idx,fl_in_1); /* Make sure file is on local system and is readable or die trying */ fl_in_1=nco_fl_mk_lcl(fl_in_1,fl_pth_lcl,&FILE_1_RETRIEVED_FROM_REMOTE_LOCATION); if(nco_dbg_lvl >= nco_dbg_fl && FILE_1_RETRIEVED_FROM_REMOTE_LOCATION) (void)fprintf(stderr,", local file is %s",fl_in_1); if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"\n"); /* Open file once per thread to improve caching */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) rcd+=nco_fl_open(fl_in_1,md_open,&bfr_sz_hnt,in_id_1_arr+thr_idx); in_id_1=in_id_1_arr[0]; fl_idx=1; /* Input file _2 */ fl_in_2=nco_fl_nm_prs(fl_in_2,fl_idx,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth); if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"%s: INFO Input file %d is %s",nco_prg_nm_get(),fl_idx,fl_in_2); /* Make sure file is on local system and is readable or die trying */ fl_in_2=nco_fl_mk_lcl(fl_in_2,fl_pth_lcl,&FILE_2_RETRIEVED_FROM_REMOTE_LOCATION); if(nco_dbg_lvl >= nco_dbg_fl && FILE_2_RETRIEVED_FROM_REMOTE_LOCATION) (void)fprintf(stderr,", local file is %s",fl_in_2); if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"\n"); /* Open file once per thread to improve caching */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) rcd+=nco_fl_open(fl_in_2,md_open,&bfr_sz_hnt,in_id_2_arr+thr_idx); in_id_2=in_id_2_arr[0]; /* Parse auxiliary coordinates */ if(aux_nbr > 0){ int aux_idx_nbr; aux=nco_aux_evl(in_id_1,aux_nbr,aux_arg,&aux_idx_nbr); if(aux_idx_nbr > 0){ lmt=(lmt_sct **)nco_realloc(lmt,(lmt_nbr+aux_idx_nbr)*sizeof(lmt_sct *)); int lmt_nbr_new=lmt_nbr+aux_idx_nbr; int aux_idx=0; for(int lmt_idx=lmt_nbr;lmt_idx<lmt_nbr_new;lmt_idx++) lmt[lmt_idx]=aux[aux_idx++]; lmt_nbr=lmt_nbr_new; } /* endif aux */ } /* endif aux_nbr */ /* Get number of variables and dimensions in file */ (void)nco_inq(in_id_1,&nbr_dmn_fl_1,&nbr_var_fl_1,(int *)NULL,(int *)NULL); (void)nco_inq(in_id_2,&nbr_dmn_fl_2,&nbr_var_fl_2,(int *)NULL,(int *)NULL); (void)nco_inq_format(in_id_1,&fl_in_fmt_1); (void)nco_inq_format(in_id_2,&fl_in_fmt_2); /* Form initial extraction list which may include extended regular expressions */ xtr_lst_1=nco_var_lst_mk(in_id_1,nbr_var_fl_1,var_lst_in,EXCLUDE_INPUT_LIST,EXTRACT_ALL_COORDINATES,&xtr_nbr_1); xtr_lst_2=nco_var_lst_mk(in_id_2,nbr_var_fl_2,var_lst_in,EXCLUDE_INPUT_LIST,EXTRACT_ALL_COORDINATES,&xtr_nbr_2); /* Change included variables to excluded variables */ if(EXCLUDE_INPUT_LIST) xtr_lst_1=nco_var_lst_xcl(in_id_1,nbr_var_fl_1,xtr_lst_1,&xtr_nbr_1); if(EXCLUDE_INPUT_LIST) xtr_lst_2=nco_var_lst_xcl(in_id_2,nbr_var_fl_2,xtr_lst_2,&xtr_nbr_2); /* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */ cnv=nco_cnv_ini(in_id_1); /* Add all coordinate variables to extraction list */ if(EXTRACT_ALL_COORDINATES) xtr_lst_1=nco_var_lst_crd_add(in_id_1,nbr_dmn_fl_1,nbr_var_fl_1,xtr_lst_1,&xtr_nbr_1,cnv); if(EXTRACT_ALL_COORDINATES) xtr_lst_2=nco_var_lst_crd_add(in_id_2,nbr_dmn_fl_2,nbr_var_fl_2,xtr_lst_2,&xtr_nbr_2,cnv); /* Extract coordinates associated with extracted variables */ if(EXTRACT_ASSOCIATED_COORDINATES) xtr_lst_1=nco_var_lst_crd_ass_add(in_id_1,xtr_lst_1,&xtr_nbr_1,cnv); if(EXTRACT_ASSOCIATED_COORDINATES) xtr_lst_2=nco_var_lst_crd_ass_add(in_id_2,xtr_lst_2,&xtr_nbr_2,cnv); /* With fully symmetric 1<->2 ordering, may occasionally find xtr_nbr_2 > xtr_nbr_1 This occurs, e.g., when fl_in_1 contains reduced variables and full coordinates are only in fl_in_2 and so will not appear xtr_lst_1 */ /* Sort extraction list by variable ID for fastest I/O */ if(xtr_nbr_1 > 1) xtr_lst_1=nco_lst_srt_nm_id(xtr_lst_1,xtr_nbr_1,False); if(xtr_nbr_2 > 1) xtr_lst_2=nco_lst_srt_nm_id(xtr_lst_2,xtr_nbr_2,False); /* We now have final list of variables to extract. Phew. */ /* Find coordinate/dimension values associated with user-specified limits NB: nco_lmt_evl() with same nc_id contains OpenMP critical region */ for(idx=0;idx<lmt_nbr;idx++) (void)nco_lmt_evl(in_id_1,lmt[idx],0L,FORTRAN_IDX_CNV); /* Place all dimensions in lmt_all_lst */ lmt_all_lst=(lmt_all_sct **)nco_malloc(nbr_dmn_fl_1*sizeof(lmt_all_sct *)); /* Initialize lmt_all_sct's */ (void)nco_msa_lmt_all_ntl(in_id_1,MSA_USR_RDR,lmt_all_lst,nbr_dmn_fl_1,lmt,lmt_nbr); /* Find dimensions associated with variables to be extracted */ dmn_lst_1=nco_dmn_lst_ass_var(in_id_1,xtr_lst_1,xtr_nbr_1,&nbr_dmn_xtr_1); dmn_lst_2=nco_dmn_lst_ass_var(in_id_2,xtr_lst_2,xtr_nbr_2,&nbr_dmn_xtr_2); /* Fill-in dimension structure for all extracted dimensions */ dim_1=(dmn_sct **)nco_malloc(nbr_dmn_xtr_1*sizeof(dmn_sct *)); dim_2=(dmn_sct **)nco_malloc(nbr_dmn_xtr_2*sizeof(dmn_sct *)); for(idx=0;idx<nbr_dmn_xtr_1;idx++) dim_1[idx]=nco_dmn_fll(in_id_1,dmn_lst_1[idx].id,dmn_lst_1[idx].nm); for(idx=0;idx<nbr_dmn_xtr_2;idx++) dim_2[idx]=nco_dmn_fll(in_id_2,dmn_lst_2[idx].id,dmn_lst_2[idx].nm); /* Dimension lists no longer needed */ dmn_lst_1=nco_nm_id_lst_free(dmn_lst_1,nbr_dmn_xtr_1); dmn_lst_2=nco_nm_id_lst_free(dmn_lst_2,nbr_dmn_xtr_2); /* Check that dims in list 2 are a subset of list 1 and that they are the same size */ (void)nco_dmn_sct_cmp(dim_1,nbr_dmn_xtr_1,dim_2,nbr_dmn_xtr_2,fl_in_1,fl_in_2); /* Duplicate input dimension structures for output dimension structures */ dmn_out=(dmn_sct **)nco_malloc(nbr_dmn_xtr_1*sizeof(dmn_sct *)); for(idx=0;idx<nbr_dmn_xtr_1;idx++){ dmn_out[idx]=nco_dmn_dpl(dim_1[idx]); (void)nco_dmn_xrf(dim_1[idx],dmn_out[idx]); } /* Merge hyperslab limit information into dimension structures */ if(nbr_dmn_fl_1 > 0) (void)nco_dmn_lmt_all_mrg(dmn_out,nbr_dmn_xtr_1,lmt_all_lst,nbr_dmn_fl_1); if(nco_dbg_lvl >= nco_dbg_sbr){ for(idx=0;idx<xtr_nbr_1;idx++) (void)fprintf(stderr,"xtr_lst_1[%d].nm = %s, .id= %d\n",idx,xtr_lst_1[idx].nm,xtr_lst_1[idx].id); } /* end if */ /* Fill-in variable structure list for all extracted variables */ var_1=(var_sct **)nco_malloc(xtr_nbr_1*sizeof(var_sct *)); var_2=(var_sct **)nco_malloc(xtr_nbr_2*sizeof(var_sct *)); var_out=(var_sct **)nco_malloc(xtr_nbr_1*sizeof(var_sct *)); for(idx=0;idx<xtr_nbr_1;idx++){ var_1[idx]=nco_var_fll(in_id_1,xtr_lst_1[idx].id,xtr_lst_1[idx].nm,dim_1,nbr_dmn_xtr_1); var_out[idx]=nco_var_dpl(var_1[idx]); (void)nco_xrf_var(var_1[idx],var_out[idx]); (void)nco_xrf_dmn(var_out[idx]); } /* end loop over idx */ for(idx=0;idx<xtr_nbr_2;idx++) var_2[idx]=nco_var_fll(in_id_2,xtr_lst_2[idx].id,xtr_lst_2[idx].nm,dim_2,nbr_dmn_xtr_2); /* Extraction lists no longer needed */ xtr_lst_1=nco_nm_id_lst_free(xtr_lst_1,xtr_nbr_1); xtr_lst_2=nco_nm_id_lst_free(xtr_lst_2,xtr_nbr_2); /* Die gracefully on unsupported features... */ if(xtr_nbr_1 < xtr_nbr_2){ (void)fprintf(fp_stdout,"%s: WARNING First file has fewer extracted variables than second file (%d < %d). This desired feature is TODO nco581.\n",nco_prg_nm,xtr_nbr_1,xtr_nbr_2); nco_exit(EXIT_FAILURE); } /* endif */ /* Refresh var_out with dim_out data */ (void)nco_var_dmn_refresh(var_out,xtr_nbr_1); /* Change dimensions in dim_2 to dim_out */ for(idx=0;idx<nbr_dmn_xtr_2;idx++){ for(jdx=0;jdx<nbr_dmn_xtr_1;jdx++) if(!strcmp(dim_2[idx]->nm,dmn_out[jdx]->nm)){ /* NB: Copy new dim data but do NOT free original as dimension element is aliased in var_2 array */ (void)nco_dmn_cpy(dim_2[idx],dmn_out[jdx]); break; } /* endif */ /* Dimension not found so die gracefully */ if(jdx==nbr_dmn_xtr_1){ (void)fprintf(fp_stdout,"%s: ERROR dimension \"%s\" in second file %s is not present in first file %s\n",nco_prg_nm,dim_2[idx]->nm,fl_in_2,fl_in_1); nco_exit(EXIT_FAILURE); } /* endif dimension not found */ } /* end loop over dimensions */ /* Refresh var_2 with the new dim_2 data */ (void)nco_var_dmn_refresh(var_2,xtr_nbr_2); /* Divide variable lists into lists of fixed variables and variables to be processed Create lists from file_1 last so those values remain in *_out arrays */ (void)nco_var_lst_dvd(var_2,var_out,xtr_nbr_2,cnv,True,nco_pck_plc_nil,nco_pck_map_nil,(dmn_sct **)NULL,0,&var_fix_2,&var_fix_out,&nbr_var_fix_2,&var_prc_2,&var_prc_out,&nbr_var_prc_2); /* Avoid double-free() condition */ var_fix_out=(var_sct **)nco_free(var_fix_out); var_prc_out=(var_sct **)nco_free(var_prc_out); (void)nco_var_lst_dvd(var_1,var_out,xtr_nbr_1,cnv,True,nco_pck_plc_nil,nco_pck_map_nil,(dmn_sct **)NULL,0,&var_fix_1,&var_fix_out,&nbr_var_fix_1,&var_prc_1,&var_prc_out,&nbr_var_prc_1); /* Die gracefully on unsupported features... */ if(nbr_var_fix_1 < nbr_var_fix_2){ (void)fprintf(fp_stdout,"%s: ERROR First file has fewer fixed variables than second file (%d < %d). This feature is TODO nco581.\n",nco_prg_nm,nbr_var_fix_1,nbr_var_fix_2); nco_exit(EXIT_FAILURE); } /* endif */ /* Merge two variable lists into same order */ rcd=nco_var_lst_mrg(&var_prc_1,&var_prc_2,&nbr_var_prc_1,&nbr_var_prc_2); /* Make output and input files consanguinous */ if(fl_out_fmt == NCO_FORMAT_UNDEFINED) fl_out_fmt=fl_in_fmt_1; /* Verify output file format supports requested actions */ (void)nco_fl_fmt_vet(fl_out_fmt,cnk_nbr,dfl_lvl); /* Open output file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); /* 20101019 fxm got to here merging ncbo 4.0.5 into mpncbo */ /* Assign zero to start and unity to stride vectors in output variables */ (void)nco_var_srd_srt_set(var_out,xtr_nbr_1); #ifdef ENABLE_MPI if(prc_rnk == rnk_mgr){ /* MPI manager code */ #endif /* !ENABLE_MPI */ /* Make output and input files consanguinous */ if(fl_out_fmt == NCO_FORMAT_UNDEFINED) fl_out_fmt=fl_in_fmt_1; /* Open output file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); /* Copy global attributes */ (void)nco_att_cpy(in_id_1,out_id,NC_GLOBAL,NC_GLOBAL,(nco_bool)True); /* Catenate time-stamped command line to "history" global attribute */ if(HISTORY_APPEND) (void)nco_hst_att_cat(out_id,cmd_ln); if(HISTORY_APPEND && FORCE_APPEND) (void)nco_prv_att_cat(fl_in_1,in_id_1,out_id); if(gaa_nbr > 0) (void)nco_glb_att_add(out_id,gaa_arg,gaa_nbr); if(HISTORY_APPEND) (void)nco_vrs_att_cat(out_id); if(thr_nbr > 1 && HISTORY_APPEND) (void)nco_thr_att_cat(out_id,thr_nbr); #ifdef ENABLE_MPI /* Initialize MPI task information */ if(prc_nbr > 0 && HISTORY_APPEND) (void)nco_mpi_att_cat(out_id,prc_nbr); #endif /* !ENABLE_MPI */ /* Define dimensions in output file */ (void)nco_dmn_dfn(fl_out,out_id,dmn_out,nbr_dmn_xtr_1); /* fxm: TODO 550 put max_dim_sz/list(var_1,var_2) into var_def(var_out) */ /* Define variables in output file, copy their attributes */ (void)nco_var_dfn(in_id_1,fl_out,out_id,var_out,xtr_nbr_1,(dmn_sct **)NULL,(int)0,nco_pck_plc_nil,nco_pck_map_nil,dfl_lvl); /* Set chunksize parameters */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl_1,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr); /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); /* Take output file out of define mode */ if(hdr_pad == 0UL){ (void)nco_enddef(out_id); }else{ (void)nco__enddef(out_id,hdr_pad); if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO Padding header with %lu extra bytes\n",nco_prg_nm_get(),(unsigned long)hdr_pad); } /* hdr_pad */ #ifdef ENABLE_MPI } /* prc_rnk != rnk_mgr */ /* Manager obtains output filename and broadcasts to workers */ if(prc_rnk == rnk_mgr) fl_nm_lng=(int)strlen(fl_out_tmp); MPI_Bcast(&fl_nm_lng,1,MPI_INT,rnk_mgr,MPI_COMM_WORLD); if(prc_rnk != rnk_mgr) fl_out_tmp=(char *)nco_malloc((fl_nm_lng+1)*sizeof(char)); MPI_Bcast(fl_out_tmp,fl_nm_lng+1,MPI_CHAR,rnk_mgr,MPI_COMM_WORLD); if(prc_rnk == rnk_mgr){ /* MPI manager code */ TKN_WRT_FREE=False; #endif /* !ENABLE_MPI */ /* Copy variable data for non-processed variables */ (void)nco_msa_var_val_cpy(in_id_1,out_id,var_fix_1,nbr_var_fix_1,lmt_all_lst,nbr_dmn_fl_1); #ifdef ENABLE_MPI /* Close output file so workers can open it */ nco_close(out_id); TKN_WRT_FREE=True; } /* prc_rnk != rnk_mgr */ #endif /* !ENABLE_MPI */ /* ncbo() code has been similar to nces() (and ncra()) wherever possible Major differences occur where performance would otherwise suffer From now on, however, binary-file and binary-operation nature of ncbo() is too different from nces() paradigm to justify following nces() style. Instead, we adopt symmetric nomenclature (e.g., file_1, file_2), and perform differences variable-by-variable so peak memory usage goes as Order(2*maximum variable size) rather than Order(3*maximum record size) or Order(3*file size) */ /* Perform various error-checks on input file */ if(False) (void)nco_fl_cmp_err_chk(); /* Default operation depends on invocation name */ if(nco_op_typ_sng == NULL) nco_op_typ=nco_op_typ_get(nco_op_typ_sng); /* Timestamp end of metadata setup and disk layout */ rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); ddra_info.tmr_flg=nco_tmr_rgl; #ifdef ENABLE_MPI if(prc_rnk == rnk_mgr){ /* MPI manager code */ /* Compensate for incrementing on each worker's first message */ var_wrt_nbr=-prc_nbr+1; idx=0; /* While variables remain to be processed or written... */ while(var_wrt_nbr < nbr_var_prc_1){ /* Receive message from any worker */ MPI_Recv(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&mpi_stt); /* Obtain MPI message tag type */ msg_tag_typ=mpi_stt.MPI_TAG; /* Get sender's prc_rnk */ rnk_wrk=wrk_id_bfr[0]; /* Allocate next variable, if any, to worker */ if(msg_tag_typ == msg_tag_wrk_rqs){ var_wrt_nbr++; /* [nbr] Number of variables written */ /* Worker closed output file before sending msg_tag_wrk_rqs */ TKN_WRT_FREE=True; if(idx < nbr_var_prc_1){ /* Tell requesting worker to allocate space for next variable */ msg_bfr[0]=idx; /* [idx] Variable to be processed */ msg_bfr[1]=out_id; /* Output file ID */ msg_bfr[2]=var_prc_out[idx]->id; /* [id] Variable ID in output file */ /* Point to next variable on list */ idx++; }else{ msg_bfr[0]=idx_all_wrk_ass; /* [enm] All variables already assigned */ msg_bfr[1]=out_id; /* Output file ID */ } /* endif idx */ MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_wrk_rsp,MPI_COMM_WORLD); /* msg_tag_typ != msg_tag_wrk_rqs */ }else if(msg_tag_typ == msg_tag_tkn_wrt_rqs){ /* Allocate token if free, else ask worker to try later */ if(TKN_WRT_FREE){ TKN_WRT_FREE=False; msg_bfr[0]=tkn_wrt_rqs_xcp; /* Accept request for write token */ }else{ msg_bfr[0]=tkn_wrt_rqs_dny; /* Deny request for write token */ } /* !TKN_WRT_FREE */ MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD); } /* msg_tag_typ != msg_tag_tkn_wrt_rqs */ } /* end while var_wrt_nbr < nbr_var_prc_1 */ }else{ /* prc_rnk != rnk_mgr, end Manager code begin Worker code */ wrk_id_bfr[0]=prc_rnk; while(1){ /* While work remains... */ /* Send msg_tag_wrk_rqs */ wrk_id_bfr[0]=prc_rnk; MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_wrk_rqs,MPI_COMM_WORLD); /* Receive msg_tag_wrk_rsp */ MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,0,msg_tag_wrk_rsp,MPI_COMM_WORLD,&mpi_stt); idx=msg_bfr[0]; out_id=msg_bfr[1]; if(idx == idx_all_wrk_ass) break; else{ var_prc_out[idx]->id=msg_bfr[2]; /* Process this variable same as UP code */ #else /* !ENABLE_MPI */ #ifdef _OPENMP /* OpenMP notes: shared(): msk and wgt are not altered within loop private(): wgt_avg does not need initialization */ #pragma omp parallel for default(none) firstprivate(ddra_info) private(idx,in_id_1,in_id_2,dmn_idx,dmn_jdx) shared(nco_dbg_lvl,dim_1,fl_in_1,fl_in_2,fl_out,flg_ddra,in_id_1_arr,in_id_2_arr,nbr_dmn_xtr_1,nbr_var_prc_1,nbr_var_prc_2,nco_op_typ,out_id,nco_prg_nm,rcd,var_prc_1,var_prc_2,var_prc_out,lmt_all_lst,nbr_dmn_fl_1) #endif /* !_OPENMP */ /* UP and SMP codes main loop over variables */ for(idx=0;idx<nbr_var_prc_1;idx++){ #endif /* ENABLE_MPI */ /* Common code for UP, SMP, and MPI */ int has_mss_val=False; ptr_unn mss_val; if(nco_dbg_lvl >= nco_dbg_var) (void)fprintf(fp_stderr,"%s, ",var_prc_1[idx]->nm); if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr); in_id_1=in_id_1_arr[omp_get_thread_num()]; in_id_2=in_id_2_arr[omp_get_thread_num()]; (void)nco_var_mtd_refresh(in_id_1,var_prc_1[idx]); has_mss_val=var_prc_1[idx]->has_mss_val; (void)nco_msa_var_get(in_id_1,var_prc_1[idx],lmt_all_lst,nbr_dmn_fl_1); /* Find and set variable dmn_nbr, ID, mss_val, type in second file */ (void)nco_var_mtd_refresh(in_id_2,var_prc_2[idx]); /* Read hyperslab from second file */ (void)nco_msa_var_get(in_id_2,var_prc_2[idx],lmt_all_lst,nbr_dmn_fl_1); /* Check that all dims in var_prc_2 are in var_prc_1 */ for(dmn_idx=0;dmn_idx<var_prc_2[idx]->nbr_dim;dmn_idx++){ for(dmn_jdx=0;dmn_jdx<var_prc_1[idx]->nbr_dim;dmn_jdx++) if(!strcmp(var_prc_2[idx]->dim[dmn_idx]->nm,var_prc_1[idx]->dim[dmn_jdx]->nm)) break; if(dmn_jdx==var_prc_1[idx]->nbr_dim){ (void)fprintf(fp_stdout,"%s: ERROR Variables do not conform:\nFile %s variable %s has dimension %s not present in file %s variable %s\n",nco_prg_nm,fl_in_2,var_prc_2[idx]->nm, var_prc_2[idx]->dim[dmn_idx]->nm,fl_in_1,var_prc_1[idx]->nm); nco_exit(EXIT_FAILURE); } /* endif error */ } /* end loop over idx */ /* Die gracefully on unsupported features... */ if(var_prc_1[idx]->nbr_dim < var_prc_2[idx]->nbr_dim){ (void)fprintf(fp_stdout,"%s: ERROR Variable %s has lesser rank in first file than in second file (%d < %d). This feature is NCO TODO 552.\n",nco_prg_nm,var_prc_1[idx]->nm,var_prc_1[idx]->nbr_dim,var_prc_2[idx]->nbr_dim); nco_exit(EXIT_FAILURE); } /* endif */ if(var_prc_1[idx]->nbr_dim > var_prc_2[idx]->nbr_dim) (void)ncap_var_cnf_dmn(&var_prc_out[idx],&var_prc_2[idx]); /* var2 now conforms in size to var1, and is in memory */ /* fxm: TODO 268 allow var1 or var2 to typecast */ /* Make sure var2 conforms to type of var1 */ if(var_prc_1[idx]->type != var_prc_2[idx]->type){ if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(fp_stderr,"%s: INFO Input variables do not conform in type:\nFile 1 = %s variable %s has type %s\nFile 2 = %s variable %s has type %s\nFile 3 = %s variable %s will have type %s\n",nco_prg_nm,fl_in_1,var_prc_1[idx]->nm,nco_typ_sng(var_prc_1[idx]->type),fl_in_2,var_prc_2[idx]->nm,nco_typ_sng(var_prc_2[idx]->type),fl_out,var_prc_1[idx]->nm,nco_typ_sng(var_prc_1[idx]->type)); } /* endif different type */ var_prc_2[idx]=nco_var_cnf_typ(var_prc_1[idx]->type,var_prc_2[idx]); /* Change missing_value of var_prc_2, if any, to missing_value of var_prc_1, if any */ has_mss_val=nco_mss_val_cnf(var_prc_1[idx],var_prc_2[idx]); /* mss_val in fl_1, if any, overrides mss_val in fl_2 */ if(has_mss_val) mss_val=var_prc_1[idx]->mss_val; /* Perform specified binary operation */ switch(nco_op_typ){ case nco_op_add: /* [enm] Add file_1 to file_2 */ (void)nco_var_add(var_prc_1[idx]->type,var_prc_1[idx]->sz,has_mss_val,mss_val,var_prc_2[idx]->val,var_prc_1[idx]->val); break; case nco_op_mlt: /* [enm] Multiply file_1 by file_2 */ (void)nco_var_mlt(var_prc_1[idx]->type,var_prc_1[idx]->sz,has_mss_val,mss_val,var_prc_2[idx]->val,var_prc_1[idx]->val); break; case nco_op_dvd: /* [enm] Divide file_1 by file_2 */ (void)nco_var_dvd(var_prc_1[idx]->type,var_prc_1[idx]->sz,has_mss_val,mss_val,var_prc_2[idx]->val,var_prc_1[idx]->val); break; case nco_op_sbt: /* [enm] Subtract file_2 from file_1 */ (void)nco_var_sbt(var_prc_1[idx]->type,var_prc_1[idx]->sz,has_mss_val,mss_val,var_prc_2[idx]->val,var_prc_1[idx]->val); break; default: /* Other defined nco_op_typ values are valid for ncra(), ncrcat(), ncwa(), not ncbo() */ (void)fprintf(fp_stdout,"%s: ERROR Illegal nco_op_typ in binary operation\n",nco_prg_nm); nco_exit(EXIT_FAILURE); break; } /* end case */ var_prc_2[idx]->val.vp=nco_free(var_prc_2[idx]->val.vp); #ifdef ENABLE_MPI /* Obtain token and prepare to write */ while(1){ /* Send msg_tag_tkn_wrt_rqs repeatedly until token obtained */ wrk_id_bfr[0]=prc_rnk; MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rqs,MPI_COMM_WORLD); MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD,&mpi_stt); tkn_wrt_rsp=msg_bfr[0]; /* Wait then re-send request */ if(tkn_wrt_rsp == tkn_wrt_rqs_dny) sleep(tkn_wrt_rqs_ntv); else break; } /* end while loop waiting for write token */ /* Worker has token---prepare to write */ if(tkn_wrt_rsp == tkn_wrt_rqs_xcp){ if(RAM_OPEN) md_open=NC_WRITE|NC_SHARE|NC_DISKLESS; else md_open=NC_WRITE|NC_SHARE; rcd=nco_fl_open(fl_out_tmp,md_open,&bfr_sz_hnt,&out_id); /* Set chunksize parameters */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl_1,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr); /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); #else /* !ENABLE_MPI */ #ifdef _OPENMP #pragma omp critical #endif /* !_OPENMP */ #endif /* !ENABLE_MPI */ /* Common code for UP, SMP, and MPI */ { /* begin OpenMP critical */ /* Copy result to output file and free workspace buffer */ if(var_prc_1[idx]->nbr_dim == 0){ (void)nco_put_var1(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_1[idx]->val.vp,var_prc_1[idx]->type); }else{ /* end if variable is scalar */ (void)nco_put_vara(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc_1[idx]->val.vp,var_prc_1[idx]->type); } /* end else */ } /* end OpenMP critical */ var_prc_1[idx]->val.vp=nco_free(var_prc_1[idx]->val.vp); if(flg_ddra){ /* DDRA diagnostics Usage: ncbo -O -C --mdl -p ~/nco/data in.nc in.nc ~/foo.nc ncbo -O -C --mdl -p ${DATA}/nco_bm stl_5km.nc stl_5km.nc ~/foo.nc ncbo -O -C --mdl -p ${DATA}/nco_bm gcm_T85.nc gcm_T85.nc ~/foo.nc */ /* Assign remaining input for DDRA diagnostics */ ddra_info.lmn_nbr=var_prc_1[idx]->sz; /* [nbr] Variable size */ ddra_info.nco_op_typ=nco_op_typ; /* [enm] Operation type */ ddra_info.rnk_var=var_prc_1[idx]->nbr_dim; /* I [nbr] Variable rank (in input file) */ ddra_info.var_idx=idx; /* [enm] Index */ ddra_info.wrd_sz=nco_typ_lng(var_prc_1[idx]->type); /* [B] Bytes per element */ /* DDRA diagnostics */ rcd+=nco_ddra /* [fnc] Count operations */ (var_prc_1[idx]->nm, /* I [sng] Variable name */ (char *)NULL, /* I [sng] Weight name */ &ddra_info); /* I [sct] DDRA information */ } /* !flg_ddra */ #ifdef ENABLE_MPI /* Close output file and increment written counter */ nco_close(out_id); var_wrt_nbr++; } /* endif tkn_wrt_rqs_xcp */ } /* end else !idx_all_wrk_ass */ } /* end while loop requesting work/token */ } /* endif Worker */ #else /* !ENABLE_MPI */ } /* end (OpenMP parallel for) loop over idx */ #endif /* !ENABLE_MPI */ if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"\n"); /* Close input netCDF files */ for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) nco_close(in_id_1_arr[thr_idx]); for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) nco_close(in_id_2_arr[thr_idx]); #ifdef ENABLE_MPI /* Manager moves output file (closed by workers) from temporary to permanent location */ if(prc_rnk == rnk_mgr) (void)nco_fl_mv(fl_out_tmp,fl_out); #else /* !ENABLE_MPI */ /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); #endif /* end !ENABLE_MPI */ /* Remove local copy of file */ if(FILE_1_RETRIEVED_FROM_REMOTE_LOCATION && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in_1); if(FILE_2_RETRIEVED_FROM_REMOTE_LOCATION && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in_2); /* Clean memory unless dirty memory allowed */ if(flg_mmr_cln){ /* ncbo-specific memory */ if(fl_in_1) fl_in_1=(char *)nco_free(fl_in_1); if(fl_in_2) fl_in_2=(char *)nco_free(fl_in_2); /* NCO-generic clean-up */ /* Free individual strings/arrays */ for(idx=0;idx<nbr_dmn_fl_1;idx++) for(jdx=0;jdx<lmt_all_lst[idx]->lmt_dmn_nbr;jdx++) lmt_all_lst[idx]->lmt_dmn[jdx]=nco_lmt_free(lmt_all_lst[idx]->lmt_dmn[jdx]); if(nbr_dmn_fl_1 > 0) lmt_all_lst=nco_lmt_all_lst_free(lmt_all_lst,nbr_dmn_fl_1); lmt=(lmt_sct**)nco_free(lmt); if(cmd_ln) cmd_ln=(char *)nco_free(cmd_ln); if(cnk_map_sng) cnk_map_sng=(char *)nco_free(cnk_map_sng); if(cnk_plc_sng) cnk_plc_sng=(char *)nco_free(cnk_plc_sng); if(fl_out) fl_out=(char *)nco_free(fl_out); if(fl_out_tmp) fl_out_tmp=(char *)nco_free(fl_out_tmp); if(fl_pth) fl_pth=(char *)nco_free(fl_pth); if(fl_pth_lcl) fl_pth_lcl=(char *)nco_free(fl_pth_lcl); if(in_id_1_arr) in_id_1_arr=(int *)nco_free(in_id_1_arr); if(in_id_2_arr) in_id_2_arr=(int *)nco_free(in_id_2_arr); /* Free lists of strings */ if(fl_lst_in && fl_lst_abb == NULL) fl_lst_in=nco_sng_lst_free(fl_lst_in,fl_nbr); if(fl_lst_in && fl_lst_abb) fl_lst_in=nco_sng_lst_free(fl_lst_in,1); if(fl_lst_abb) fl_lst_abb=nco_sng_lst_free(fl_lst_abb,abb_arg_nbr); if(gaa_nbr > 0) gaa_arg=nco_sng_lst_free(gaa_arg,gaa_nbr); if(var_lst_in_nbr > 0) var_lst_in=nco_sng_lst_free(var_lst_in,var_lst_in_nbr); /* Free limits */ for(idx=0;idx<lmt_nbr;idx++) lmt_arg[idx]=(char *)nco_free(lmt_arg[idx]); for(idx=0;idx<aux_nbr;idx++) aux_arg[idx]=(char *)nco_free(aux_arg[idx]); if(aux_nbr > 0) aux=(lmt_sct **)nco_free(aux); /* Free chunking information */ for(idx=0;idx<cnk_nbr;idx++) cnk_arg[idx]=(char *)nco_free(cnk_arg[idx]); if(cnk_nbr > 0) cnk_dmn=nco_cnk_lst_free(cnk_dmn,cnk_nbr); /* Free dimension lists */ if(nbr_dmn_xtr_1 > 0) dim_1=nco_dmn_lst_free(dim_1,nbr_dmn_xtr_1); if(nbr_dmn_xtr_2 > 0) dim_2=nco_dmn_lst_free(dim_2,nbr_dmn_xtr_2); if(nbr_dmn_xtr_1 > 0) dmn_out=nco_dmn_lst_free(dmn_out,nbr_dmn_xtr_1); /* Free variable lists Using nco_var_lst_free() to free main var_1 and var_2 lists would fail if ncap_var_prc_dmn() had to broadcast any variables because pointer var_1 and var_2 still contain dangling pointer to old variable. Hence, use nco_var_lst_free() to free prc and fix lists and use nco_free() to free main var_1 and var_2 lists. Dangling pointers in var_1 and var_2 are unsafe: fxm TODO 578 */ if(nbr_var_prc_1 > 0) var_prc_1=nco_var_lst_free(var_prc_1,nbr_var_prc_1); if(nbr_var_fix_1 > 0) var_fix_1=nco_var_lst_free(var_fix_1,nbr_var_fix_1); if(nbr_var_prc_2 > 0) var_prc_2=nco_var_lst_free(var_prc_2,nbr_var_prc_2); if(nbr_var_fix_2 > 0) var_fix_2=nco_var_lst_free(var_fix_2,nbr_var_fix_2); var_1=(var_sct **)nco_free(var_1); var_2=(var_sct **)nco_free(var_2); if(xtr_nbr_1 > 0) var_out=nco_var_lst_free(var_out,xtr_nbr_1); var_prc_out=(var_sct **)nco_free(var_prc_out); var_fix_out=(var_sct **)nco_free(var_fix_out); } /* !flg_mmr_cln */ #ifdef ENABLE_MPI MPI_Finalize(); #endif /* !ENABLE_MPI */ /* End timer */ ddra_info.tmr_flg=nco_tmr_end; /* [enm] Timer flag */ rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); if(rcd != NC_NOERR) nco_err_exit(rcd,"main"); nco_exit_gracefully(); return EXIT_SUCCESS; } /* end main() */
grid_ao.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <string.h> #include <math.h> #include <complex.h> #include "config.h" #include "cint.h" #include "vhf/fblas.h" #include "gto/grid_ao_drv.h" #define MIN(X,Y) ((X)<(Y)?(X):(Y)) #define MAX(X,Y) ((X)>(Y)?(X):(Y)) #define ALL_IMAGES 255 #define IMGBLK 40 #define OF_CMPLX 2 double CINTcommon_fac_sp(int l); void GTOshell_eval_grid_cart(double *gto, double *ri, double *exps, double *coord, double *alpha, double *coeff, double *env, int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids); void GTOshell_eval_grid_cart_deriv1(double *gto, double *ri, double *exps, double *coord, double *alpha, double *coeff, double *env, int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids); void GTOshell_eval_grid_cart_deriv2(double *cgto, double *ri, double *exps, double *coord, double *alpha, double *coeff, double *env, int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids); void GTOshell_eval_grid_cart_deriv3(double *cgto, double *ri, double *exps, double *coord, double *alpha, double *coeff, double *env, int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids); void GTOshell_eval_grid_cart_deriv4(double *cgto, double *ri, double *exps, double *coord, double *alpha, double *coeff, double *env, int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids); void GTOshell_eval_grid_cart(double *gto, double *ri, double *exps, double *coord, double *alpha, double *coeff, double *env, int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids); void GTOshell_eval_grid_ip_cart(double *gto, double *ri, double *exps, double *coord, double *alpha, double *coeff, double *env, int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids); /* * Extend the meaning of non0table: given shell ID and block ID, * non0table is the number of images in Ls that does not vanish. * Ls should be sorted based on the distance to center cell. */ void PBCnr_ao_screen(unsigned char *non0table, double *coords, int ngrids, double *Ls, int nimgs, int *atm, int natm, int *bas, int nbas, double *env) { const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; #pragma omp parallel default(none) \ shared(Ls, nimgs, coords, ngrids, non0table, atm, natm, bas, nbas, env) { int i, j, m; int np, nc, atm_id; size_t bas_id, ib; double rr, arr, maxc; double logcoeff[NPRIMAX]; double dr[3]; double rL[3]; double *p_exp, *pcoeff, *ratm; #pragma omp for nowait schedule(dynamic) for (bas_id = 0; bas_id < nbas; bas_id++) { np = bas[NPRIM_OF+bas_id*BAS_SLOTS]; nc = bas[NCTR_OF +bas_id*BAS_SLOTS]; p_exp = env + bas[PTR_EXP+bas_id*BAS_SLOTS]; pcoeff = env + bas[PTR_COEFF+bas_id*BAS_SLOTS]; atm_id = bas[ATOM_OF+bas_id*BAS_SLOTS]; ratm = env + atm[atm_id*ATM_SLOTS+PTR_COORD]; for (j = 0; j < np; j++) { maxc = 0; for (i = 0; i < nc; i++) { maxc = MAX(maxc, fabs(pcoeff[i*np+j])); } logcoeff[j] = log(maxc); } for (ib = 0; ib < nblk; ib++) { for (m = nimgs-1; m >= 0; m--) { rL[0] = ratm[0] + Ls[m*3+0]; rL[1] = ratm[1] + Ls[m*3+1]; rL[2] = ratm[2] + Ls[m*3+2]; for (i = ib*BLKSIZE; i < MIN(ngrids, (ib+1)*BLKSIZE); i++) { dr[0] = coords[0*ngrids+i] - rL[0]; dr[1] = coords[1*ngrids+i] - rL[1]; dr[2] = coords[2*ngrids+i] - rL[2]; rr = dr[0]*dr[0] + dr[1]*dr[1] + dr[2]*dr[2]; for (j = 0; j < np; j++) { arr = p_exp[j] * rr; if (arr-logcoeff[j] < EXPCUTOFF) { non0table[ib*nbas+bas_id] = MIN(ALL_IMAGES, m+1); goto next_blk; } } } } non0table[ib*nbas+bas_id] = 0; next_blk:; } } } } static void _copy(double complex *out, double *ao_k, size_t ngrids, size_t bgrids, int nkpts, int ncomp, int nao, int ncol) { int i, j, k, ic; double complex *pout; double *ao_r, *ao_i; int blksize = ncomp * ncol * bgrids; for (k = 0; k < nkpts; k++) { ao_r = ao_k + k*2 * blksize; ao_i = ao_k +(k*2+1) * blksize; for (ic = 0; ic < ncomp; ic++) { pout = out + (k * ncomp + ic) * nao * ngrids; for (j = 0; j < ncol; j++) { for (i = 0; i < bgrids; i++) { pout[j*ngrids+i] = (ao_r[j*bgrids+i] + ao_i[j*bgrids+i]*_Complex_I); } } ao_r += ncol * bgrids; ao_i += ncol * bgrids; } } } // grid2atm[nimgs,xyz,grid_id] static void _fill_grid2atm(double *grid2atm, double *min_grid2atm, double *coord, double *Ls, double *r_atm, int atm_imag_max, size_t bgrids, size_t ngrids, int nimgs) { int ig, m; double rL[3]; double dist; double dist_min; for (m = 0; m < nimgs; m++) { if ((m < atm_imag_max || atm_imag_max == ALL_IMAGES)) { rL[0] = r_atm[0] + Ls[m*3+0]; rL[1] = r_atm[1] + Ls[m*3+1]; rL[2] = r_atm[2] + Ls[m*3+2]; dist_min = 1e9; for (ig = 0; ig < bgrids; ig++) { grid2atm[0*BLKSIZE+ig] = coord[0*ngrids+ig] - rL[0]; grid2atm[1*BLKSIZE+ig] = coord[1*ngrids+ig] - rL[1]; grid2atm[2*BLKSIZE+ig] = coord[2*ngrids+ig] - rL[2]; dist = (grid2atm[0*BLKSIZE+ig]*grid2atm[0*BLKSIZE+ig] + grid2atm[1*BLKSIZE+ig]*grid2atm[1*BLKSIZE+ig] + grid2atm[2*BLKSIZE+ig]*grid2atm[2*BLKSIZE+ig]); dist_min = MIN(dist, dist_min); } min_grid2atm[m] = sqrt(dist_min); } grid2atm += 3*BLKSIZE; } } void PBCeval_cart_iter(FPtr_eval feval, FPtr_exp fexp, size_t nao, size_t ngrids, size_t bgrids, size_t offao, int param[], int *shls_slice, int *ao_loc, double *buf, double *Ls, double complex *expLk, int nimgs, int nkpts, int di_max, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { const int ncomp = param[TENSOR]; const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const int nkpts2 = nkpts * OF_CMPLX; int i, j, k, l, np, nc, atm_id, bas_id, deg, ao_id; int iL, iL0, iLcount, dimc; int grid2atm_atm_id, count; double fac; double *p_exp, *pcoeff, *pcoord, *pao, *ri; double *grid2atm = buf; // shape [nimgs,3,bgrids] double *eprim = grid2atm + nimgs*3*BLKSIZE; double *aobuf = eprim + NPRIMAX*BLKSIZE*2; double *aobufk = aobuf + IMGBLK*ncomp*di_max*bgrids; double *Lk_buf = aobufk + nkpts*ncomp*di_max*bgrids * OF_CMPLX; double complex *zLk_buf = (double complex *)Lk_buf; double *min_grid2atm = Lk_buf + IMGBLK * nkpts * OF_CMPLX; double *pexpLk; int img_idx[nimgs]; int atm_imag_max[natm]; for (i = 0; i < natm; i++) { atm_imag_max[i] = 0; } for (bas_id = sh0; bas_id < sh1; bas_id++) { atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF]; atm_imag_max[atm_id] = MAX(atm_imag_max[atm_id], non0table[bas_id]); } grid2atm_atm_id = -1; for (bas_id = sh0; bas_id < sh1; bas_id++) { np = bas[bas_id*BAS_SLOTS+NPRIM_OF]; nc = bas[bas_id*BAS_SLOTS+NCTR_OF ]; l = bas[bas_id*BAS_SLOTS+ANG_OF ]; deg = (l+1)*(l+2)/2; dimc = nc*deg * ncomp * bgrids; fac = CINTcommon_fac_sp(l); p_exp = env + bas[bas_id*BAS_SLOTS+PTR_EXP]; pcoeff = env + bas[bas_id*BAS_SLOTS+PTR_COEFF]; atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF]; ri = env + atm[PTR_COORD+atm_id*ATM_SLOTS]; ao_id = ao_loc[bas_id] - ao_loc[sh0]; if (grid2atm_atm_id != atm_id) { _fill_grid2atm(grid2atm, min_grid2atm, coord, Ls, ri, atm_imag_max[atm_id], bgrids, ngrids, nimgs); grid2atm_atm_id = atm_id; } for (i = 0; i < nkpts2*dimc; i++) { aobufk[i] = 0; } for (iL0 = 0; iL0 < nimgs; iL0+=IMGBLK) { iLcount = MIN(IMGBLK, nimgs - iL0); count = 0; for (iL = iL0; iL < iL0+iLcount; iL++) { pcoord = grid2atm + iL * 3*BLKSIZE; if ((iL < non0table[bas_id] || non0table[bas_id] == ALL_IMAGES) && (min_grid2atm[iL] < rcut[bas_id]) && (*fexp)(eprim, pcoord, p_exp, pcoeff, l, np, nc, bgrids, fac)) { pao = aobuf + count * dimc; (*feval)(pao, ri, eprim, pcoord, p_exp, pcoeff, env, l, np, nc, nc*deg, bgrids, bgrids); img_idx[count] = iL; count += 1; } } if (count > 0) { if (img_idx[count-1] != iL0 + count-1) { // some images are skipped for (i = 0; i < count; i++) { j = img_idx[i]; for (k = 0; k < nkpts; k++) { zLk_buf[i*nkpts+k] = expLk[j*nkpts+k]; } } pexpLk = Lk_buf; } else { pexpLk = (double *)(expLk + nkpts * iL0); } dgemm_(&TRANS_N, &TRANS_T, &dimc, &nkpts2, &count, &D1, aobuf, &dimc, pexpLk, &nkpts2, &D1, aobufk, &dimc); } } _copy(ao+ao_id*ngrids+offao, aobufk, ngrids, bgrids, nkpts, ncomp, nao, nc*deg); } } void PBCeval_sph_iter(FPtr_eval feval, FPtr_exp fexp, size_t nao, size_t ngrids, size_t bgrids, size_t offao, int param[], int *shls_slice, int *ao_loc, double *buf, double *Ls, double complex *expLk, int nimgs, int nkpts, int di_max, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { const int ncomp = param[TENSOR]; const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const int nkpts2 = nkpts * OF_CMPLX; int i, j, k, l, np, nc, atm_id, bas_id, deg, dcart, ao_id; int iL, iL0, iLcount, dimc; int grid2atm_atm_id, count; double fac; double *p_exp, *pcoeff, *pcoord, *pcart, *pao, *ri; double *grid2atm = buf; // shape [nimgs,3,bgrids] double *eprim = grid2atm + nimgs*3*BLKSIZE; double *aobuf = eprim + NPRIMAX*BLKSIZE*2; double *aobufk = aobuf + IMGBLK*ncomp*di_max*bgrids; double *Lk_buf = aobufk + nkpts*ncomp*di_max*bgrids * OF_CMPLX; double complex *zLk_buf = (double complex *)Lk_buf; double *cart_gto = Lk_buf + IMGBLK * nkpts * OF_CMPLX; double *min_grid2atm = cart_gto + ncomp*NCTR_CART*bgrids; double *pexpLk; int img_idx[nimgs]; int atm_imag_max[natm]; for (i = 0; i < natm; i++) { atm_imag_max[i] = 0; } for (bas_id = sh0; bas_id < sh1; bas_id++) { atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF]; atm_imag_max[atm_id] = MAX(atm_imag_max[atm_id], non0table[bas_id]); } grid2atm_atm_id = -1; for (bas_id = sh0; bas_id < sh1; bas_id++) { np = bas[bas_id*BAS_SLOTS+NPRIM_OF]; nc = bas[bas_id*BAS_SLOTS+NCTR_OF ]; l = bas[bas_id*BAS_SLOTS+ANG_OF ]; deg = l * 2 + 1; dcart = (l+1)*(l+2)/2; dimc = nc*deg * ncomp * bgrids; fac = CINTcommon_fac_sp(l); p_exp = env + bas[bas_id*BAS_SLOTS+PTR_EXP]; pcoeff = env + bas[bas_id*BAS_SLOTS+PTR_COEFF]; atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF]; ri = env + atm[PTR_COORD+atm_id*ATM_SLOTS]; ao_id = ao_loc[bas_id] - ao_loc[sh0]; if (grid2atm_atm_id != atm_id) { _fill_grid2atm(grid2atm, min_grid2atm, coord, Ls, ri, atm_imag_max[atm_id], bgrids, ngrids, nimgs); grid2atm_atm_id = atm_id; } for (i = 0; i < nkpts2*dimc; i++) { aobufk[i] = 0; } for (iL0 = 0; iL0 < nimgs; iL0+=IMGBLK) { iLcount = MIN(IMGBLK, nimgs - iL0); count = 0; for (iL = iL0; iL < iL0+iLcount; iL++) { pcoord = grid2atm + iL * 3*BLKSIZE; if ((iL < non0table[bas_id] || non0table[bas_id] == ALL_IMAGES) && (min_grid2atm[iL] < rcut[bas_id]) && (*fexp)(eprim, pcoord, p_exp, pcoeff, l, np, nc, bgrids, fac)) { pao = aobuf + count * dimc; if (l <= 1) { // s, p functions (*feval)(pao, ri, eprim, pcoord, p_exp, pcoeff, env, l, np, nc, nc*dcart, bgrids, bgrids); } else { (*feval)(cart_gto, ri, eprim, pcoord, p_exp, pcoeff, env, l, np, nc, nc*dcart, bgrids, bgrids); pcart = cart_gto; for (i = 0; i < ncomp * nc; i++) { CINTc2s_ket_sph1(pao, pcart, bgrids, bgrids, l); pao += deg * bgrids; pcart += dcart * bgrids; } } img_idx[count] = iL; count++; } } if (count > 0) { if (img_idx[count-1] != iL0 + count-1) { // some images are skipped for (i = 0; i < count; i++) { j = img_idx[i]; for (k = 0; k < nkpts; k++) { zLk_buf[i*nkpts+k] = expLk[j*nkpts+k]; } } pexpLk = Lk_buf; } else { pexpLk = (double *)(expLk + nkpts * iL0); } dgemm_(&TRANS_N, &TRANS_T, &dimc, &nkpts2, &count, &D1, aobuf, &dimc, pexpLk, &nkpts2, &D1, aobufk, &dimc); } } _copy(ao+ao_id*ngrids+offao, aobufk, ngrids, bgrids, nkpts, ncomp, nao, nc*deg); } } int GTOshloc_by_atom(int *shloc, int *shls_slice, int *ao_loc, int *atm, int *bas); /* * blksize <= 1024 to avoid stack overflow * * non0table[ngrids/blksize,natm] is the T/F table for ao values to * screen the ao evaluation for each shell */ void PBCeval_loop(void (*fiter)(), FPtr_eval feval, FPtr_exp fexp, int ngrids, int param[], int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int shloc[shls_slice[1]-shls_slice[0]+1]; const int nshblk = GTOshloc_by_atom(shloc, shls_slice, ao_loc, atm, bas); const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; const size_t Ngrids = ngrids; int i; int di_max = 0; for (i = shls_slice[0]; i < shls_slice[1]; i++) { di_max = MAX(di_max, ao_loc[i+1] - ao_loc[i]); } #pragma omp parallel default(none) \ shared(fiter, feval, fexp, param, ngrids, \ Ls, nimgs, di_max, expLk, nkpts, shls_slice, ao_loc, \ ao, coord, rcut, non0table, atm, natm, bas, nbas, env, shloc) { const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; const size_t nao = ao_loc[sh1] - ao_loc[sh0]; int ip, ib, k, iloc, ish; size_t aoff, bgrids; size_t bufsize =((nimgs*3 + NPRIMAX*2 + nkpts *param[POS_E1]*param[TENSOR]*di_max * OF_CMPLX + IMGBLK*param[POS_E1]*param[TENSOR]*di_max + param[POS_E1]*param[TENSOR]*NCTR_CART) * BLKSIZE + nkpts * IMGBLK * OF_CMPLX + nimgs); double *buf = malloc(sizeof(double) * bufsize); #pragma omp for nowait schedule(dynamic, 1) for (k = 0; k < nblk*nshblk; k++) { iloc = k / nblk; ish = shloc[iloc]; ib = k - iloc * nblk; ip = ib * BLKSIZE; aoff = (ao_loc[ish] - ao_loc[sh0]) * Ngrids + ip; bgrids = MIN(ngrids-ip, BLKSIZE); (*fiter)(feval, fexp, nao, Ngrids, bgrids, aoff, param, shloc+iloc, ao_loc, buf, Ls, expLk, nimgs, nkpts, di_max, ao, coord+ip, rcut, non0table+ib*nbas, atm, natm, bas, nbas, env); } free(buf); } } void PBCeval_cart_drv(FPtr_eval feval, FPtr_exp fexp, int ngrids, int param[], int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { PBCeval_loop(PBCeval_cart_iter, feval, fexp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCeval_sph_drv(FPtr_eval feval, FPtr_exp fexp, int ngrids, int param[], int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { PBCeval_loop(PBCeval_sph_iter, feval, fexp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_cart_deriv0(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 1}; PBCeval_cart_drv(GTOshell_eval_grid_cart, GTOcontract_exp0, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_sph_deriv0(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 1}; PBCeval_sph_drv(GTOshell_eval_grid_cart, GTOcontract_exp0, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_cart_deriv1(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 4}; PBCeval_cart_drv(GTOshell_eval_grid_cart_deriv1, GTOcontract_exp1, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_sph_deriv1(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 4}; PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv1, GTOcontract_exp1, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_cart_deriv2(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 10}; PBCeval_cart_drv(GTOshell_eval_grid_cart_deriv2, GTOprim_exp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_sph_deriv2(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 10}; PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv2, GTOprim_exp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_cart_deriv3(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 20}; PBCeval_cart_drv(GTOshell_eval_grid_cart_deriv3, GTOprim_exp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_sph_deriv3(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 20}; PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv3, GTOprim_exp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_cart_deriv4(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 35}; PBCeval_cart_drv(GTOshell_eval_grid_cart_deriv4, GTOprim_exp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_sph_deriv4(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 35}; PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv4, GTOprim_exp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_cart(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { // int param[] = {1, 1}; // PBCeval_cart_drv(GTOshell_eval_grid_cart, GTOcontract_exp0, // ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, // ao, coord, rcut, non0table, atm, natm, bas, nbas, env); PBCGTOval_cart_deriv0(ngrids, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_sph(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { // int param[] = {1, 1}; // PBCeval_sph_drv(GTOshell_eval_grid_cart, GTOcontract_exp0, // ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, // ao, coord, rcut, non0table, atm, natm, bas, nbas, env); PBCGTOval_sph_deriv0(ngrids, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_ip_cart(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 3}; PBCeval_cart_drv(GTOshell_eval_grid_ip_cart, GTOcontract_exp1, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_ip_sph(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 3}; PBCeval_sph_drv(GTOshell_eval_grid_ip_cart, GTOcontract_exp1, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); }
v_p_strategy.h
// // Project Name: KratosPFEMFluidDynamicsApplication $ // Last modified by: $Author: AFranci $ // Date: $Date: January 2016 $ // Revision: $Revision: 0.0 $ // // #ifndef KRATOS_V_P_STRATEGY_H #define KRATOS_V_P_STRATEGY_H #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "utilities/openmp_utils.h" #include "processes/process.h" #include "solving_strategies/schemes/scheme.h" #include "solving_strategies/strategies/solving_strategy.h" #include "custom_utilities/mesher_utilities.hpp" #include "custom_utilities/boundary_normals_calculation_utilities.hpp" #include "custom_utilities/solver_settings.h" #include "pfem_fluid_dynamics_application_variables.h" #include <stdio.h> #include <math.h> namespace Kratos { ///@addtogroup PFEMFluidDynamicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ template <class TSparseSpace, class TDenseSpace, class TLinearSolver> class VPStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(VPStrategy); typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType; ///@} ///@name Life Cycle ///@{ VPStrategy(ModelPart &rModelPart, SolverSettingsType &rSolverConfig) : BaseType(rModelPart) { std::cout << "VPStrategy INITIALIZE STRATEGY" << std::endl; InitializeStrategy(rSolverConfig); } VPStrategy(ModelPart &rModelPart, typename TLinearSolver::Pointer pVelocityLinearSolver, typename TLinearSolver::Pointer pPressureLinearSolver, bool ReformDofSet = true, unsigned int DomainSize = 2) : BaseType(rModelPart) { KRATOS_TRY; KRATOS_CATCH(""); } /// Destructor. virtual ~VPStrategy() {} virtual int Check() override { return false; } virtual bool SolveSolutionStep() override { return false; } virtual void FinalizeSolutionStep() override {} virtual void InitializeSolutionStep() override {} void UpdateTopology(ModelPart &rModelPart, unsigned int echoLevel) { KRATOS_TRY; this->CalculateDisplacementsAndPorosity(); BaseType::MoveMesh(); KRATOS_CATCH(""); } void SetBlockedAndIsolatedFlags() { KRATOS_TRY; ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd); for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { unsigned int numNodes = itElem->GetGeometry().size(); std::vector<array_1d<double, 3>> nodesCoordinates; nodesCoordinates.resize(numNodes); (itElem)->Set(BLOCKED, false); (itElem)->Set(ISOLATED, false); unsigned int freeSurfaceNodes = 0; unsigned int freeSurfaceRigidNodes = 0; unsigned int rigidNodes = 0; unsigned int isolatedNodes = 0; for (unsigned int i = 0; i < numNodes; i++) { if (itElem->GetGeometry()[i].Is(FREE_SURFACE)) { freeSurfaceNodes++; if (itElem->GetGeometry()[i].Is(RIGID)) { freeSurfaceRigidNodes++; } } else if (itElem->GetGeometry()[i].Is(RIGID)) { rigidNodes++; } nodesCoordinates[i] = itElem->GetGeometry()[i].Coordinates(); ElementWeakPtrVectorType &neighb_elems = itElem->GetGeometry()[i].GetValue(NEIGHBOUR_ELEMENTS); if (neighb_elems.size() == 1) { isolatedNodes++; } } if (dimension == 3) { double a1 = 0; //slope x for plane on the first triangular face of the tetrahedra (nodes A,B,C) double b1 = 0; //slope y for plane on the first triangular face of the tetrahedra (nodes A,B,C) double c1 = 0; //slope z for plane on the first triangular face of the tetrahedra (nodes A,B,C) a1 = (nodesCoordinates[1][1] - nodesCoordinates[0][1]) * (nodesCoordinates[2][2] - nodesCoordinates[0][2]) - (nodesCoordinates[2][1] - nodesCoordinates[0][1]) * (nodesCoordinates[1][2] - nodesCoordinates[0][2]); b1 = (nodesCoordinates[1][2] - nodesCoordinates[0][2]) * (nodesCoordinates[2][0] - nodesCoordinates[0][0]) - (nodesCoordinates[2][2] - nodesCoordinates[0][2]) * (nodesCoordinates[1][0] - nodesCoordinates[0][0]); c1 = (nodesCoordinates[1][0] - nodesCoordinates[0][0]) * (nodesCoordinates[2][1] - nodesCoordinates[0][1]) - (nodesCoordinates[2][0] - nodesCoordinates[0][0]) * (nodesCoordinates[1][1] - nodesCoordinates[0][1]); double a2 = 0; //slope x for plane on the second triangular face of the tetrahedra (nodes A,B,D) double b2 = 0; //slope y for plane on the second triangular face of the tetrahedra (nodes A,B,D) double c2 = 0; //slope z for plane on the second triangular face of the tetrahedra (nodes A,B,D) a2 = (nodesCoordinates[1][1] - nodesCoordinates[0][1]) * (nodesCoordinates[3][2] - nodesCoordinates[0][2]) - (nodesCoordinates[3][1] - nodesCoordinates[0][1]) * (nodesCoordinates[1][2] - nodesCoordinates[0][2]); b2 = (nodesCoordinates[1][2] - nodesCoordinates[0][2]) * (nodesCoordinates[3][0] - nodesCoordinates[0][0]) - (nodesCoordinates[3][2] - nodesCoordinates[0][2]) * (nodesCoordinates[1][0] - nodesCoordinates[0][0]); c2 = (nodesCoordinates[1][0] - nodesCoordinates[0][0]) * (nodesCoordinates[3][1] - nodesCoordinates[0][1]) - (nodesCoordinates[3][0] - nodesCoordinates[0][0]) * (nodesCoordinates[1][1] - nodesCoordinates[0][1]); double a3 = 0; //slope x for plane on the third triangular face of the tetrahedra (nodes B,C,D) double b3 = 0; //slope y for plane on the third triangular face of the tetrahedra (nodes B,C,D) double c3 = 0; //slope z for plane on the third triangular face of the tetrahedra (nodes B,C,D) a3 = (nodesCoordinates[1][1] - nodesCoordinates[2][1]) * (nodesCoordinates[3][2] - nodesCoordinates[2][2]) - (nodesCoordinates[3][1] - nodesCoordinates[2][1]) * (nodesCoordinates[1][2] - nodesCoordinates[2][2]); b3 = (nodesCoordinates[1][2] - nodesCoordinates[2][2]) * (nodesCoordinates[3][0] - nodesCoordinates[2][0]) - (nodesCoordinates[3][2] - nodesCoordinates[2][2]) * (nodesCoordinates[1][0] - nodesCoordinates[2][0]); c3 = (nodesCoordinates[1][0] - nodesCoordinates[2][0]) * (nodesCoordinates[3][1] - nodesCoordinates[2][1]) - (nodesCoordinates[3][0] - nodesCoordinates[2][0]) * (nodesCoordinates[1][1] - nodesCoordinates[2][1]); double a4 = 0; //slope x for plane on the fourth triangular face of the tetrahedra (nodes A,C,D) double b4 = 0; //slope y for plane on the fourth triangular face of the tetrahedra (nodes A,C,D) double c4 = 0; //slope z for plane on the fourth triangular face of the tetrahedra (nodes A,C,D) a4 = (nodesCoordinates[0][1] - nodesCoordinates[2][1]) * (nodesCoordinates[3][2] - nodesCoordinates[2][2]) - (nodesCoordinates[3][1] - nodesCoordinates[2][1]) * (nodesCoordinates[0][2] - nodesCoordinates[2][2]); b4 = (nodesCoordinates[0][2] - nodesCoordinates[2][2]) * (nodesCoordinates[3][0] - nodesCoordinates[2][0]) - (nodesCoordinates[3][2] - nodesCoordinates[2][2]) * (nodesCoordinates[0][0] - nodesCoordinates[2][0]); c4 = (nodesCoordinates[0][0] - nodesCoordinates[2][0]) * (nodesCoordinates[3][1] - nodesCoordinates[2][1]) - (nodesCoordinates[3][0] - nodesCoordinates[2][0]) * (nodesCoordinates[0][1] - nodesCoordinates[2][1]); double cosAngle12 = (a1 * a2 + b1 * b2 + c1 * c2) / (sqrt(pow(a1, 2) + pow(b1, 2) + pow(c1, 2)) * sqrt(pow(a2, 2) + pow(b2, 2) + pow(c2, 2))); double cosAngle13 = (a1 * a3 + b1 * b3 + c1 * c3) / (sqrt(pow(a1, 2) + pow(b1, 2) + pow(c1, 2)) * sqrt(pow(a3, 2) + pow(b3, 2) + pow(c3, 2))); double cosAngle14 = (a1 * a4 + b1 * b4 + c1 * c4) / (sqrt(pow(a1, 2) + pow(b1, 2) + pow(c1, 2)) * sqrt(pow(a4, 2) + pow(b4, 2) + pow(c4, 2))); double cosAngle23 = (a3 * a2 + b3 * b2 + c3 * c2) / (sqrt(pow(a3, 2) + pow(b3, 2) + pow(c3, 2)) * sqrt(pow(a2, 2) + pow(b2, 2) + pow(c2, 2))); double cosAngle24 = (a4 * a2 + b4 * b2 + c4 * c2) / (sqrt(pow(a4, 2) + pow(b4, 2) + pow(c4, 2)) * sqrt(pow(a2, 2) + pow(b2, 2) + pow(c2, 2))); double cosAngle34 = (a4 * a3 + b4 * b3 + c4 * c3) / (sqrt(pow(a4, 2) + pow(b4, 2) + pow(c4, 2)) * sqrt(pow(a3, 2) + pow(b3, 2) + pow(c3, 2))); if ((fabs(cosAngle12) > 0.99 || fabs(cosAngle13) > 0.99 || fabs(cosAngle14) > 0.99 || fabs(cosAngle23) > 0.99 || fabs(cosAngle24) > 0.99 || fabs(cosAngle34) > 0.99) && (freeSurfaceNodes == numNodes) && isolatedNodes > 1) { (itElem)->Set(BLOCKED, true); // std::cout << "in the strategy BLOCKED ELEMENT: " << (itElem)->Id() << std::endl; } else if ((fabs(cosAngle12) > 0.995 || fabs(cosAngle13) > 0.995 || fabs(cosAngle14) > 0.995 || fabs(cosAngle23) > 0.995 || fabs(cosAngle24) > 0.995 || fabs(cosAngle34) > 0.995) && (freeSurfaceNodes == numNodes) && isolatedNodes == 1) { (itElem)->Set(BLOCKED, true); // std::cout << "in the strategy BLOCKED ELEMENT: " << (itElem)->Id() << std::endl; } else if ((fabs(cosAngle12) > 0.999 || fabs(cosAngle13) > 0.999 || fabs(cosAngle14) > 0.999 || fabs(cosAngle23) > 0.999 || fabs(cosAngle24) > 0.999 || fabs(cosAngle34) > 0.999) && (freeSurfaceNodes == numNodes)) { (itElem)->Set(BLOCKED, true); // std::cout << "in the strategy BLOCKED ELEMENT: " << (itElem)->Id() << std::endl; } } if (freeSurfaceNodes == numNodes && rigidNodes == 0 && isolatedNodes >= (numNodes - 1)) { (itElem)->Set(ISOLATED, true); (itElem)->Set(BLOCKED, false); } } } KRATOS_CATCH(""); } void CalculatePressureVelocity() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; unsigned int timeStep = rCurrentProcessInfo[STEP]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { if (timeStep == 1) { (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0; } else { double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0); double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1); double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0); CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval; } } } void CalculatePressureAcceleration() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; unsigned int timeStep = rCurrentProcessInfo[STEP]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { if (timeStep == 1) { (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0; } else { double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0); double &PreviousPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1); double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0); CurrentPressureAcceleration = (CurrentPressureVelocity - PreviousPressureVelocity) / timeInterval; } } } virtual void CalculateTemporalVariables() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0); array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1); /* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */ if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID))) { UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity); } else if ((i)->Is(RIGID)) { array_1d<double, 3> Zeros(3, 0.0); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros; (i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros; } else { (i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0; if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION)) { array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration; (i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME]; } } const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; unsigned int timeStep = rCurrentProcessInfo[STEP]; if (timeStep == 1) { (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0; } else { double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0); double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1); double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0); double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0); CurrentPressureAcceleration = CurrentPressureVelocity / timeInterval; CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval; CurrentPressureAcceleration += -CurrentPressureVelocity / timeInterval; } } } void CalculateAccelerations() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0); array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1); /* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */ if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID))) { UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity); } else if ((i)->Is(RIGID)) { array_1d<double, 3> Zeros(3, 0.0); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros; (i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros; } else { (i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0; if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION)) { array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration; (i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME]; } } } } inline void UpdateAccelerations(array_1d<double, 3> &CurrentAcceleration, const array_1d<double, 3> &CurrentVelocity, array_1d<double, 3> &PreviousAcceleration, const array_1d<double, 3> &PreviousVelocity) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double Dt = rCurrentProcessInfo[DELTA_TIME]; noalias(CurrentAcceleration) = 2.0 * (CurrentVelocity - PreviousVelocity) / Dt - PreviousAcceleration; } virtual void CalculateDisplacementsAndPorosity() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double TimeStep = rCurrentProcessInfo[DELTA_TIME]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0); array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1); /* if( i->IsFixed(DISPLACEMENT_X) == false ) */ CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0]; /* if( i->IsFixed(DISPLACEMENT_Y) == false ) */ CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1]; /* if( i->IsFixed(DISPLACEMENT_Z) == false ) */ CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2]; // currentFluidFractionRate = (currentFluidFraction - previousFluidFraction)/TimeStep; } } virtual void UpdateStressStrain() {} virtual void Clear() override {} ///@} ///@name Access ///@{ virtual void SetEchoLevel(int Level) override { BaseType::SetEchoLevel(Level); } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { std::stringstream buffer; buffer << "VPStrategy"; return buffer.str(); } /// Print information about this object. void PrintInfo(std::ostream &rOStream) const override { rOStream << "VPStrategy"; } /// Print object's data. void PrintData(std::ostream &rOStream) const override { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected Life Cycle ///@{ ///@} ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /// Calculate the coefficients for time iteration. /** * @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME variables. */ virtual bool SolveMomentumIteration(unsigned int it, unsigned int maxIt, bool &fixedTimeStep, double &velocityNorm) { return false; } virtual bool SolveContinuityIteration(unsigned int it, unsigned int maxIt, double &NormP) { return false; } void ComputeErrorL2Norm(double tensilStressSign) //tensilStressSign = 1.0 for FIC, tensilStressSign = -1.0 for FS { ModelPart &rModelPart = BaseType::GetModelPart(); const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double currentTime = rCurrentProcessInfo[TIME]; const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); long double sumErrorL2Velocity = 0; long double sumErrorL2VelocityX = 0; long double sumErrorL2VelocityY = 0; long double sumErrorL2Pressure = 0; long double sumErrorL2TauXX = 0; long double sumErrorL2TauYY = 0; long double sumErrorL2TauXY = 0; #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd); for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { Element::GeometryType &geometry = itElem->GetGeometry(); long double nodalArea = 0; if (dimension == 2) { nodalArea = geometry.Area() / 3.0; } else if (dimension == 3) { nodalArea = geometry.Volume() * 0.25; } long double bariPosX = 0; long double bariPosY = 0; long double eleErrorL2Velocity = 0; long double eleErrorL2VelocityX = 0; long double eleErrorL2VelocityY = 0; long double eleErrorL2Pressure = 0; //ShapeFunctionDerivativesArrayType DN_DX; Matrix NContainer; NContainer = geometry.ShapeFunctionsValues(GeometryData::GI_GAUSS_1); const Vector &N = row(NContainer, 0); const unsigned int NumNodes = geometry.size(); double elementalPressure = N[0] * geometry(0)->FastGetSolutionStepValue(PRESSURE); double elementalVelocityX = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_X); double elementalVelocityY = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_Y); ; for (unsigned int i = 1; i < NumNodes; i++) { elementalPressure += N[i] * geometry(i)->FastGetSolutionStepValue(PRESSURE); elementalVelocityX += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_X); elementalVelocityY += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_Y); } for (unsigned int i = 0; i < geometry.size(); i++) { const long double nodalPosX = geometry(i)->X(); const long double nodalPosY = geometry(i)->Y(); bariPosX += nodalPosX / 3.0; bariPosY += nodalPosY / 3.0; } const long double posX = bariPosX; const long double posY = bariPosY; long double expectedVelocityX = pow(posX, 2) * (1.0 - posX) * (1.0 - posX) * (2.0 * posY - 6.0 * pow(posY, 2) + 4.0 * pow(posY, 3)); long double expectedVelocityY = -pow(posY, 2) * (1.0 - posY) * (1.0 - posY) * (2.0 * posX - 6.0 * pow(posX, 2) + 4.0 * pow(posX, 3)); long double expectedPressure = -tensilStressSign * posX * (1.0 - posX); eleErrorL2VelocityX = elementalVelocityX - expectedVelocityX; eleErrorL2VelocityY = elementalVelocityY - expectedVelocityY; eleErrorL2Pressure = elementalPressure - expectedPressure; sumErrorL2VelocityX += pow(eleErrorL2VelocityX, 2) * geometry.Area(); sumErrorL2VelocityY += pow(eleErrorL2VelocityY, 2) * geometry.Area(); sumErrorL2Pressure += pow(eleErrorL2Pressure, 2) * geometry.Area(); const long double tauXX = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XX); const long double tauYY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_YY); const long double tauXY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XY); long double expectedTauXX = 2.0 * (-4.0 * (1.0 - bariPosX) * bariPosX * (-1.0 + 2.0 * bariPosX) * bariPosY * (1.0 - 3.0 * bariPosY + 2.0 * pow(bariPosY, 2))); long double expectedTauYY = 2.0 * (4.0 * bariPosX * (1.0 - 3.0 * bariPosX + 2.0 * pow(bariPosX, 2)) * (1.0 - bariPosY) * bariPosY * (-1.0 + 2.0 * bariPosY)); long double expectedTauXY = (2.0 * (1.0 - 6.0 * bariPosY + 6.0 * pow(bariPosY, 2)) * (1.0 - bariPosX) * (1.0 - bariPosX) * pow(bariPosX, 2) - 2.0 * (1.0 - 6.0 * bariPosX + 6.0 * pow(bariPosX, 2)) * (1.0 - bariPosY) * (1 - bariPosY) * pow(bariPosY, 2)); long double nodalErrorTauXX = tauXX - expectedTauXX; long double nodalErrorTauYY = tauYY - expectedTauYY; long double nodalErrorTauXY = tauXY - expectedTauXY; sumErrorL2TauXX += pow(nodalErrorTauXX, 2) * geometry.Area(); sumErrorL2TauYY += pow(nodalErrorTauYY, 2) * geometry.Area(); sumErrorL2TauXY += pow(nodalErrorTauXY, 2) * geometry.Area(); } } long double errorL2Velocity = sqrt(sumErrorL2Velocity); long double errorL2VelocityX = sqrt(sumErrorL2VelocityX); long double errorL2VelocityY = sqrt(sumErrorL2VelocityY); long double errorL2Pressure = sqrt(sumErrorL2Pressure); long double errorL2TauXX = sqrt(sumErrorL2TauXX); long double errorL2TauYY = sqrt(sumErrorL2TauYY); long double errorL2TauXY = sqrt(sumErrorL2TauXY); std::ofstream myfileVelocity; myfileVelocity.open("errorL2VelocityFile.txt", std::ios::app); myfileVelocity << currentTime << "\t" << errorL2Velocity << "\n"; myfileVelocity.close(); std::ofstream myfileVelocityX; myfileVelocityX.open("errorL2VelocityXFile.txt", std::ios::app); myfileVelocityX << currentTime << "\t" << errorL2VelocityX << "\n"; myfileVelocityX.close(); std::ofstream myfileVelocityY; myfileVelocityY.open("errorL2VelocityYFile.txt", std::ios::app); myfileVelocityY << currentTime << "\t" << errorL2VelocityY << "\n"; myfileVelocityY.close(); std::ofstream myfilePressure; myfilePressure.open("errorL2PressureFile.txt", std::ios::app); myfilePressure << currentTime << "\t" << errorL2Pressure << "\n"; myfilePressure.close(); std::ofstream myfileTauXX; myfileTauXX.open("errorL2TauXXFile.txt", std::ios::app); myfileTauXX << currentTime << "\t" << errorL2TauXX << "\n"; myfileTauXX.close(); std::ofstream myfileTauYY; myfileTauYY.open("errorL2TauYYFile.txt", std::ios::app); myfileTauYY << currentTime << "\t" << errorL2TauYY << "\n"; myfileTauYY.close(); std::ofstream myfileTauXY; myfileTauXY.open("errorL2TauXYFile.txt", std::ios::app); myfileTauXY << currentTime << "\t" << errorL2TauXY << "\n"; myfileTauXY.close(); } void ComputeErrorL2NormCasePoiseuille() { ModelPart &rModelPart = BaseType::GetModelPart(); const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double currentTime = rCurrentProcessInfo[TIME]; const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); double sumErrorL2VelocityTheta = 0; double sumErrorL2TauTheta = 0; double r_in = 0.2; double R_out = 0.5; double kappa = r_in / R_out; double omega = 0.5; double viscosity = 100.0; #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd); for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { Element::GeometryType &geometry = itElem->GetGeometry(); long double nodalArea = 0; if (dimension == 2) { nodalArea = geometry.Area() / 3.0; } else if (dimension == 3) { nodalArea = geometry.Volume() * 0.25; } long double bariPosX = 0; long double bariPosY = 0; long double eleErrorL2Velocity = 0; long double eleErrorL2VelocityX = 0; long double eleErrorL2VelocityY = 0; long double eleErrorL2Pressure = 0; //ShapeFunctionDerivativesArrayType DN_DX; Matrix NContainer; NContainer = geometry.ShapeFunctionsValues(GeometryData::GI_GAUSS_1); //this->CalculateGeometryData(DN_DX,NContainer,GaussWeights); const Vector &N = row(NContainer, 0); // itElem->EvaluateInPoint(elementalPressure,PRESSURE,N); const unsigned int NumNodes = geometry.size(); double elementalPressure = N[0] * geometry(0)->FastGetSolutionStepValue(PRESSURE); double elementalVelocityX = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_X); double elementalVelocityY = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_Y); ; for (unsigned int i = 1; i < NumNodes; i++) { elementalPressure += N[i] * geometry(i)->FastGetSolutionStepValue(PRESSURE); elementalVelocityX += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_X); elementalVelocityY += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_Y); } for (unsigned int i = 0; i < geometry.size(); i++) { // index = i*dimension; const long double nodalPosX = geometry(i)->X(); const long double nodalPosY = geometry(i)->Y(); bariPosX += nodalPosX / 3.0; bariPosY += nodalPosY / 3.0; } const long double posX = bariPosX; const long double posY = bariPosY; const double rPos = sqrt(pow(posX, 2) + pow(posY, 2)); const double cosalfa = posX / rPos; const double sinalfa = posY / rPos; const double sin2alfa = 2.0 * cosalfa * sinalfa; const double cos2alfa = 1.0 - 2.0 * pow(sinalfa, 2); double expectedVelocityTheta = pow(kappa, 2) * omega * R_out / (1.0 - pow(kappa, 2)) * (R_out / rPos - rPos / R_out); double computedVelocityTheta = sqrt(pow(elementalVelocityX, 2) + pow(elementalVelocityY, 2)); double nodalErrorVelocityTheta = computedVelocityTheta - expectedVelocityTheta; const long double tauXX = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XX); const long double tauYY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_YY); const long double tauXY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XY); double expectedTauTheta = (2.0 * viscosity * pow(kappa, 2) * omega * pow(R_out, 2)) / (1.0 - pow(kappa, 2)) / pow(rPos, 2); double computedTauTheta = (tauXX - tauYY) * sin2alfa / 2.0 - tauXY * cos2alfa; double nodalErrorTauTheta = computedTauTheta - expectedTauTheta; sumErrorL2VelocityTheta += pow(nodalErrorVelocityTheta, 2) * geometry.Area(); sumErrorL2TauTheta += pow(nodalErrorTauTheta, 2) * geometry.Area(); } } double errorL2VelocityTheta = sqrt(sumErrorL2VelocityTheta); double errorL2TauTheta = sqrt(sumErrorL2TauTheta); std::ofstream myfileVelocity; myfileVelocity.open("errorL2Poiseuille.txt", std::ios::app); myfileVelocity << currentTime << "\t" << errorL2VelocityTheta << "\t" << errorL2TauTheta << "\n"; myfileVelocity.close(); } double ComputeVelocityNorm() { ModelPart &rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); double NormV = 0.00; #pragma omp parallel for reduction(+ \ : NormV) for (int i_node = 0; i_node < n_nodes; ++i_node) { const auto it_node = rModelPart.NodesBegin() + i_node; const auto &r_vel = it_node->FastGetSolutionStepValue(VELOCITY); for (unsigned int d = 0; d < 3; ++d) { NormV += r_vel[d] * r_vel[d]; } } NormV = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV); NormV = sqrt(NormV); const double zero_tol = 1.0e-12; if (NormV < zero_tol) NormV = 1.00; return NormV; } double ComputePressureNorm() { ModelPart &rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); double NormP = 0.00; #pragma omp parallel for reduction(+ \ : NormP) for (int i_node = 0; i_node < n_nodes; ++i_node) { const auto it_node = rModelPart.NodesBegin() + i_node; const double Pr = it_node->FastGetSolutionStepValue(PRESSURE); NormP += Pr * Pr; } NormP = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP); NormP = sqrt(NormP); const double zero_tol = 1.0e-12; if (NormP < zero_tol) NormP = 1.00; return NormP; } virtual bool CheckVelocityConvergence(const double NormDv, double &errorNormDv) { return false; } virtual bool CheckPressureConvergence(const double NormDp, double &errorNormDp, double &NormP) { return false; } virtual bool FixTimeStepMomentum(const double DvErrorNorm, bool &fixedTimeStep) { return false; } virtual bool CheckMomentumConvergence(const double DvErrorNorm, bool &fixedTimeStep) { return false; } virtual bool FixTimeStepContinuity(const double DvErrorNorm, bool &fixedTimeStep) { return false; } virtual bool CheckContinuityConvergence(const double DvErrorNorm, bool &fixedTimeStep) { return false; } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ // Fractional step index. /* 1 : Momentum step (calculate fractional step velocity) * 2-3 : Unused (reserved for componentwise calculation of frac step velocity) * 4 : Pressure step * 5 : Computation of projections * 6 : End of step velocity */ // unsigned int mStepId; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ virtual void InitializeStrategy(SolverSettingsType &rSolverConfig) { KRATOS_TRY; KRATOS_CATCH(""); } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. VPStrategy &operator=(VPStrategy const &rOther) {} /// Copy constructor. VPStrategy(VPStrategy const &rOther) {} ///@} }; /// Class VPStrategy ///@} ///@name Type Definitions ///@{ ///@} ///@} // addtogroup } // namespace Kratos. #endif // KRATOS_V_P_STRATEGY_H
1539.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp target teams distribute parallel for schedule(static, 4) num_threads(4) dist_schedule(static, 8) private(j) for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp parallel for simd schedule(dynamic, 16) for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
sgemm_spec.c
#include "config.h" #include <unistd.h> #include <stdlib.h> #ifdef HAVE_PNG #include <png.h> #endif #include <sys/time.h> #include <omp.h> #include <CUnit/Basic.h> #include <CUnit/Console.h> #include "mkl.h" static double get_time() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + t.tv_usec * 1e-6; } #ifdef HAVE_PNG static void visualize(const char* file, const int h, const int w, float* p) { png_structp png = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); png_infop info = png_create_info_struct(png); FILE* fp = fopen(file, "w"); png_init_io(png, fp); png_set_IHDR(png, info, w, h, 8, PNG_COLOR_TYPE_GRAY, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT); png_bytepp rows = png_malloc(png, sizeof(png_bytep) * h); png_set_rows(png, info, rows); int i, j; for (i = 0; i < h; ++i) rows[i] = png_malloc(png, w); for (i = 0; i < h; ++i) { for (j = 0; j < w; ++j) { rows[i][j] = (png_byte)p[i*w+j]; // p[*] : 0 ~ 255 } } png_write_png(png, info, PNG_TRANSFORM_IDENTITY, NULL); for (i = 0; i < h; ++i) png_free(png, rows[i]); png_free(png, rows); png_destroy_write_struct(&png, &info); } #endif static void suite_sgemm_RNN(); static void suite_sgemm_RNT(); static void suite_sgemm_RTN(); static void suite_sgemm_RTT(); static void suite_sgemm_with_mempool(); int main() { CU_initialize_registry(); suite_sgemm_RNN(); suite_sgemm_RNT(); suite_sgemm_RTN(); suite_sgemm_RTT(); suite_sgemm_with_mempool(); isatty(fileno(stdout)) ? CU_console_run_tests() : CU_basic_run_tests(); const unsigned int result = CU_get_number_of_failures(); CU_cleanup_registry(); return (result ? 1 : 0); } #define DECL_TEST_FOR_EACH_SIZE(TEST_FUNCTION) \ static void TEST_FUNCTION##_S(); \ static void TEST_FUNCTION##_M(); \ static void TEST_FUNCTION##_L(); DECL_TEST_FOR_EACH_SIZE(test_sgemm_RNN_ones); DECL_TEST_FOR_EACH_SIZE(test_sgemm_RNN_randoms); static void test_sgemm_RNN_benchmark(); int setup_suite_sgemm_RNN() { srand(0xDEADBEEF); return 0; } int teardown_suite_sgemm_RNN() { return 0; } void suite_sgemm_RNN() { CU_pSuite suite = CU_add_suite("sgemm RNN", setup_suite_sgemm_RNN, teardown_suite_sgemm_RNN); CU_add_test(suite, "ones (small)", test_sgemm_RNN_ones_S); CU_add_test(suite, "ones (medium)", test_sgemm_RNN_ones_M); CU_add_test(suite, "ones (large)", test_sgemm_RNN_ones_L); CU_add_test(suite, "randoms (small)", test_sgemm_RNN_randoms_S); CU_add_test(suite, "randoms (medium)", test_sgemm_RNN_randoms_M); CU_add_test(suite, "randoms (large)", test_sgemm_RNN_randoms_L); CU_add_test(suite, "benchmark", test_sgemm_RNN_benchmark); } static float* mkl_malloc_ones(const int m, const int n) { float* p = mkl_malloc(m * n * sizeof(float), 4096); int i = 0; for (i = 0; i < m * n; ++i) p[i] = 1.0; return p; } static void test_sgemm_RNN_ones(const int M, const int N, const int K) { float* A = mkl_malloc_ones(M, K); float* B = mkl_malloc_ones(K, N); float* C = mkl_malloc_ones(M, N); cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, M, N, K, 1, A, K, B, N, 1, C, N); int diff = 0; { int i, j; for (i = 0; i < M; ++i) for (j = 0; j < N; ++j) diff |= K+1 - (int)C[i*N+j]; CU_ASSERT_EQUAL(diff, 0); } #ifdef HAVE_PNG if (diff) { { int i, j; #pragma omp parallel for private(i, j) for (i = 0; i < M; ++i) for (j = 0; j < N; ++j) C[i*N+j] = (K+1 == (int)C[i*N+j]) ? 255 : 0; } char file[256] = {0}; sprintf(file, "ones_%dx%d_%dx%d.png", M, K, K, N); visualize(file, M, N, C); } #endif mkl_free(C); mkl_free(B); mkl_free(A); } static int rand_int_in_range(int from, int to) { return ((float)rand() / RAND_MAX) * (to - from) + from; } #define IMPL_TEST_FOR_EACH_SIZE(TEST_FUNCTION) \ void TEST_FUNCTION##_S() { \ int i = 0; \ puts(""); \ for (i = 0; i < 32; ++i) { \ int M = rand_int_in_range(1, 256); \ int N = rand_int_in_range(1, 256); \ int K = rand_int_in_range(2, 128); \ printf("M = %d, N = %d K = %d\n", M, N, K); \ TEST_FUNCTION(M, N, K); \ } \ } \ void TEST_FUNCTION##_M() { \ int i = 0; \ puts(""); \ for (i = 0; i < 16; ++i) { \ int M = rand_int_in_range(1, 64 * 12); \ int N = rand_int_in_range(1, 64 * 12); \ int K = rand_int_in_range(2, 128); \ printf("M = %d, N = %d K = %d\n", M, N, K); \ TEST_FUNCTION(M, N, K); \ } \ } \ void TEST_FUNCTION##_L() { \ int i = 0; \ puts(""); \ for (i = 0; i < 8; ++i) { \ int M = rand_int_in_range(64 * 12, 1024); \ int N = rand_int_in_range(4096, 8192); \ int K = rand_int_in_range(2, 128); \ printf("M = %d, N = %d K = %d\n", M, N, K); \ TEST_FUNCTION(M, N, K); \ } \ } IMPL_TEST_FOR_EACH_SIZE(test_sgemm_RNN_ones); static float rand_float_in_range(float from, float to) { return ((float)rand() / RAND_MAX) * (to - from) + from; } static float* mkl_malloc_randoms(const int m, const int n) { float* p = mkl_malloc(m * n * sizeof(float), 4096); int i = 0; #pragma omp parallel for private(i) for (i = 0; i < m * n; ++i) p[i] = rand_float_in_range(-1.0, 1.0); return p; } static void test_sgemm_RNN_randoms(const int M, const int N, const int K) { float* A = mkl_malloc_randoms(M, K); float* B = mkl_malloc_randoms(K, N); float* C = mkl_malloc_randoms(M, N); float* A_ref = malloc(K*M*sizeof(float)); float* B_ref = malloc(N*K*sizeof(float)); float* C_ref = malloc(M*N*sizeof(float)); memcpy(A_ref, A, M*K*sizeof(float)); memcpy(B_ref, B, K*N*sizeof(float)); memcpy(C_ref, C, M*N*sizeof(float)); const float alpha = rand_float_in_range(-1.0, 1.0); const float beta = rand_float_in_range(-1.0, 1.0); cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, M, N, K, alpha, A, K, B, N, beta, C, N); { int i, j, k; #pragma omp parallel for private(i, j, k) for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { float acc = 0; for (k = 0; k < K; ++k) acc += A_ref[i*K+k] * B_ref[k*N+j]; C_ref[i*N+j] = alpha * acc + beta * C_ref[i*N+j]; } } } { float maximum_abs_error = 0; int i, j; #pragma omp parallel for private(i, j) reduction(max: maximum_abs_error) for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { if (maximum_abs_error < fabsf(C_ref[i*N+j] - C[i*N+j])) maximum_abs_error = fabsf(C_ref[i*N+j] - C[i*N+j]); } } CU_ASSERT_DOUBLE_EQUAL(maximum_abs_error, 0, 0.001); } free(C_ref); free(B_ref); free(A_ref); mkl_free(C); mkl_free(B); mkl_free(A); } IMPL_TEST_FOR_EACH_SIZE(test_sgemm_RNN_randoms); void test_sgemm_RNN_benchmark() { const int M = 96; const int N = 3072; const int K = 363; float* A = mkl_malloc_randoms(M, K); float* B = mkl_malloc_randoms(K, N); float* C = mkl_malloc_randoms(M, N); float* A_ref = malloc(K*M*sizeof(float)); float* B_ref = malloc(N*K*sizeof(float)); float* C_ref = malloc(M*N*sizeof(float)); memcpy(A_ref, A, M*K*sizeof(float)); memcpy(B_ref, B, K*N*sizeof(float)); memcpy(C_ref, C, M*N*sizeof(float)); const float alpha = rand_float_in_range(-1.0, 1.0); const float beta = rand_float_in_range(-1.0, 1.0); printf("\nRNN: %dx%d * %dx%d\n", M, K, K, N); { double start = get_time(); cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, M, N, K, alpha, A, K, B, N, beta, C, N); double elapsed_time = get_time() - start; printf("GPU: %9.6lf [sec], %9.6lf [Gflop/s]\n", elapsed_time, (2 * M * N * K + 3 * M * N) / elapsed_time * 1e-9); } { double start = get_time(); int i, j, k; #pragma omp parallel for private(i, j, k) for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { float acc = 0; for (k = 0; k < K; ++k) acc += A_ref[i*K+k] * B_ref[k*N+j]; C_ref[i*N+j] = alpha * acc + beta * C_ref[i*N+j]; } } double elapsed_time = get_time() - start; printf("CPU (%d threads): %9.6lf [sec], %9.6lf [Gflop/s]\n", omp_get_max_threads(), elapsed_time, (2 * M * N * K + 3 * M * N) / elapsed_time * 1e-9); } free(C_ref); free(B_ref); free(A_ref); mkl_free(C); mkl_free(B); mkl_free(A); } DECL_TEST_FOR_EACH_SIZE(test_sgemm_RNT_ones); DECL_TEST_FOR_EACH_SIZE(test_sgemm_RNT_randoms); static void test_sgemm_RNT_benchmark(); int setup_suite_sgemm_RNT() { srand(0xDEADBEEF); return 0; } int teardown_suite_sgemm_RNT() { return 0; } void suite_sgemm_RNT() { CU_pSuite suite = CU_add_suite("sgemm RNT", setup_suite_sgemm_RNT, teardown_suite_sgemm_RNT); CU_add_test(suite, "ones (small)", test_sgemm_RNT_ones_S); CU_add_test(suite, "ones (medium)", test_sgemm_RNT_ones_M); CU_add_test(suite, "ones (large)", test_sgemm_RNT_ones_L); CU_add_test(suite, "randoms (small)", test_sgemm_RNT_randoms_S); CU_add_test(suite, "randoms (medium)", test_sgemm_RNT_randoms_M); CU_add_test(suite, "randoms (large)", test_sgemm_RNT_randoms_L); CU_add_test(suite, "benchmark", test_sgemm_RNT_benchmark); } static void test_sgemm_RNT_ones(const int M, const int N, const int K) { float* A = mkl_malloc_ones(M, K); float* B = mkl_malloc_ones(N, K); float* C = mkl_malloc_ones(M, N); cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, M, N, K, 1, A, K, B, K, 1, C, N); int diff = 0; { int i, j; for (i = 0; i < M; ++i) for (j = 0; j < N; ++j) diff |= K+1 - (int)C[i*N+j]; CU_ASSERT_EQUAL(diff, 0); } #ifdef HAVE_PNG if (diff) { { int i, j; #pragma omp parallel for private(i, j) for (i = 0; i < M; ++i) for (j = 0; j < N; ++j) C[i*N+j] = (K+1 == (int)C[i*N+j]) ? 255 : 0; } char file[256] = {0}; sprintf(file, "ones_%dx%d_%dx%d.png", M, K, K, N); visualize(file, M, N, C); } #endif mkl_free(C); mkl_free(B); mkl_free(A); } IMPL_TEST_FOR_EACH_SIZE(test_sgemm_RNT_ones); static void test_sgemm_RNT_randoms(const int M, const int N, const int K) { float* A = mkl_malloc_randoms(M, K); float* B = mkl_malloc_randoms(N, K); float* C = mkl_malloc_randoms(M, N); float* A_ref = malloc(M*K*sizeof(float)); float* B_ref = malloc(N*K*sizeof(float)); float* C_ref = malloc(M*N*sizeof(float)); memcpy(A_ref, A, M*K*sizeof(float)); memcpy(B_ref, B, N*K*sizeof(float)); memcpy(C_ref, C, M*N*sizeof(float)); const float alpha = rand_float_in_range(-1.0, 1.0); const float beta = rand_float_in_range(-1.0, 1.0); cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, M, N, K, alpha, A, K, B, K, beta, C, N); { int i, j, k; #pragma omp parallel for private(i, j, k) for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { float acc = 0; for (k = 0; k < K; ++k) acc += A_ref[i*K+k] * B_ref[j*K+k]; C_ref[i*N+j] = alpha * acc + beta * C_ref[i*N+j]; } } } { float maximum_abs_error = 0; int i, j; #pragma omp parallel for private(i, j) reduction(max: maximum_abs_error) for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { if (maximum_abs_error < fabsf(C_ref[i*N+j] - C[i*N+j])) maximum_abs_error = fabsf(C_ref[i*N+j] - C[i*N+j]); } } CU_ASSERT_DOUBLE_EQUAL(maximum_abs_error, 0, 0.001); } free(C_ref); free(B_ref); free(A_ref); mkl_free(C); mkl_free(B); mkl_free(A); } IMPL_TEST_FOR_EACH_SIZE(test_sgemm_RNT_randoms); void test_sgemm_RNT_benchmark() { const int M = 96; const int N = 3072; const int K = 363; float* A = mkl_malloc_randoms(M, K); float* B = mkl_malloc_randoms(N, K); float* C = mkl_malloc_randoms(M, N); float* A_ref = malloc(M*K*sizeof(float)); float* B_ref = malloc(N*K*sizeof(float)); float* C_ref = malloc(M*N*sizeof(float)); memcpy(A_ref, A, M*K*sizeof(float)); memcpy(B_ref, B, N*K*sizeof(float)); memcpy(C_ref, C, M*N*sizeof(float)); const float alpha = rand_float_in_range(-1.0, 1.0); const float beta = rand_float_in_range(-1.0, 1.0); printf("\nRNT: %dx%d * %dx%d\n", M, K, K, N); { double start = get_time(); cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, M, N, K, alpha, A, K, B, K, beta, C, N); double elapsed_time = get_time() - start; printf("GPU: %9.6lf [sec], %9.6lf [Gflop/s]\n", elapsed_time, (2 * M * N * K + 3 * M * N) / elapsed_time * 1e-9); } { double start = get_time(); int i, j, k; #pragma omp parallel for private(i, j, k) for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { float acc = 0; for (k = 0; k < K; ++k) acc += A_ref[i*K+k] * B_ref[j*K+k]; C_ref[i*N+j] = alpha * acc + beta * C_ref[i*N+j]; } } double elapsed_time = get_time() - start; printf("CPU (%d threads): %9.6lf [sec], %9.6lf [Gflop/s]\n", omp_get_max_threads(), elapsed_time, (2 * M * N * K + 3 * M * N) / elapsed_time * 1e-9); } free(C_ref); free(B_ref); free(A_ref); mkl_free(C); mkl_free(B); mkl_free(A); } DECL_TEST_FOR_EACH_SIZE(test_sgemm_RTN_ones); DECL_TEST_FOR_EACH_SIZE(test_sgemm_RTN_randoms); static void test_sgemm_RTN_benchmark(); int setup_suite_sgemm_RTN() { srand(0xDEADBEEF); return 0; } int teardown_suite_sgemm_RTN() { return 0; } void suite_sgemm_RTN() { CU_pSuite suite = CU_add_suite("sgemm RTN", setup_suite_sgemm_RTN, teardown_suite_sgemm_RTN); CU_add_test(suite, "ones (small)", test_sgemm_RTN_ones_S); CU_add_test(suite, "ones (medium)", test_sgemm_RTN_ones_M); CU_add_test(suite, "ones (large)", test_sgemm_RTN_ones_L); CU_add_test(suite, "randoms (small)", test_sgemm_RTN_randoms_S); CU_add_test(suite, "randoms (medium)", test_sgemm_RTN_randoms_M); CU_add_test(suite, "randoms (large)", test_sgemm_RTN_randoms_L); CU_add_test(suite, "benchmark", test_sgemm_RTN_benchmark); } static void test_sgemm_RTN_ones(const int M, const int N, const int K) { float* A = mkl_malloc_ones(K, M); float* B = mkl_malloc_ones(K, N); float* C = mkl_malloc_ones(M, N); cblas_sgemm(CblasRowMajor, CblasTrans, CblasNoTrans, M, N, K, 1, A, M, B, N, 1, C, N); int diff = 0; { int i, j; for (i = 0; i < M; ++i) for (j = 0; j < N; ++j) diff |= K+1 - (int)C[i*N+j]; CU_ASSERT_EQUAL(diff, 0); } #ifdef HAVE_PNG if (diff) { { int i, j; #pragma omp parallel for private(i, j) for (i = 0; i < M; ++i) for (j = 0; j < N; ++j) C[i*N+j] = (K+1 == (int)C[i*N+j]) ? 255 : 0; } char file[256] = {0}; sprintf(file, "ones_%dx%d_%dx%d.png", M, K, K, N); visualize(file, M, N, C); } #endif mkl_free(C); mkl_free(B); mkl_free(A); } IMPL_TEST_FOR_EACH_SIZE(test_sgemm_RTN_ones); static void test_sgemm_RTN_randoms(const int M, const int N, const int K) { float* A = mkl_malloc_randoms(K, M); float* B = mkl_malloc_randoms(K, N); float* C = mkl_malloc_randoms(M, N); float* A_ref = malloc(K*M*sizeof(float)); float* B_ref = malloc(K*N*sizeof(float)); float* C_ref = malloc(M*N*sizeof(float)); memcpy(A_ref, A, K*M*sizeof(float)); memcpy(B_ref, B, K*N*sizeof(float)); memcpy(C_ref, C, M*N*sizeof(float)); const float alpha = rand_float_in_range(-1.0, 1.0); const float beta = rand_float_in_range(-1.0, 1.0); cblas_sgemm(CblasRowMajor, CblasTrans, CblasNoTrans, M, N, K, alpha, A, M, B, N, beta, C, N); { int i, j, k; #pragma omp parallel for private(i, j, k) for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { float acc = 0; for (k = 0; k < K; ++k) acc += A_ref[k*M+i] * B_ref[k*N+j]; C_ref[i*N+j] = alpha * acc + beta * C_ref[i*N+j]; } } } { float maximum_abs_error = 0; int i, j; #pragma omp parallel for private(i, j) reduction(max: maximum_abs_error) for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { if (maximum_abs_error < fabsf(C_ref[i*N+j] - C[i*N+j])) maximum_abs_error = fabsf(C_ref[i*N+j] - C[i*N+j]); } } CU_ASSERT_DOUBLE_EQUAL(maximum_abs_error, 0, 0.001); } free(C_ref); free(B_ref); free(A_ref); mkl_free(C); mkl_free(B); mkl_free(A); } IMPL_TEST_FOR_EACH_SIZE(test_sgemm_RTN_randoms); void test_sgemm_RTN_benchmark() { const int M = 96; const int N = 3072; const int K = 363; float* A = mkl_malloc_randoms(K, M); float* B = mkl_malloc_randoms(K, N); float* C = mkl_malloc_randoms(M, N); float* A_ref = malloc(K*M*sizeof(float)); float* B_ref = malloc(K*N*sizeof(float)); float* C_ref = malloc(M*N*sizeof(float)); memcpy(A_ref, A, K*M*sizeof(float)); memcpy(B_ref, B, K*N*sizeof(float)); memcpy(C_ref, C, M*N*sizeof(float)); const float alpha = rand_float_in_range(-1.0, 1.0); const float beta = rand_float_in_range(-1.0, 1.0); printf("\nRTN: %dx%d * %dx%d\n", M, K, K, N); { double start = get_time(); cblas_sgemm(CblasRowMajor, CblasTrans, CblasNoTrans, M, N, K, alpha, A, M, B, N, beta, C, N); double elapsed_time = get_time() - start; printf("GPU: %9.6lf [sec], %9.6lf [Gflop/s]\n", elapsed_time, (2 * M * N * K + 3 * M * N) / elapsed_time * 1e-9); } { double start = get_time(); int i, j, k; #pragma omp parallel for private(i, j, k) for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { float acc = 0; for (k = 0; k < K; ++k) acc += A_ref[k*M+i] * B_ref[k*N+j]; C_ref[i*N+j] = alpha * acc + beta * C_ref[i*N+j]; } } double elapsed_time = get_time() - start; printf("CPU (%d threads): %9.6lf [sec], %9.6lf [Gflop/s]\n", omp_get_max_threads(), elapsed_time, (2 * M * N * K + 3 * M * N) / elapsed_time * 1e-9); } free(C_ref); free(B_ref); free(A_ref); mkl_free(C); mkl_free(B); mkl_free(A); } DECL_TEST_FOR_EACH_SIZE(test_sgemm_RTT_ones); DECL_TEST_FOR_EACH_SIZE(test_sgemm_RTT_randoms); static void test_sgemm_RTT_benchmark(); int setup_suite_sgemm_RTT() { srand(0xDEADBEEF); return 0; } int teardown_suite_sgemm_RTT() { return 0; } void suite_sgemm_RTT() { CU_pSuite suite = CU_add_suite("sgemm RTT", setup_suite_sgemm_RTT, teardown_suite_sgemm_RTT); CU_add_test(suite, "ones (small)", test_sgemm_RTT_ones_S); CU_add_test(suite, "ones (medium)", test_sgemm_RTT_ones_M); CU_add_test(suite, "ones (large)", test_sgemm_RTT_ones_L); CU_add_test(suite, "randoms (small)", test_sgemm_RTT_randoms_S); CU_add_test(suite, "randoms (medium)", test_sgemm_RTT_randoms_M); CU_add_test(suite, "randoms (large)", test_sgemm_RTT_randoms_L); CU_add_test(suite, "benchmark", test_sgemm_RTT_benchmark); } static void test_sgemm_RTT_ones(const int M, const int N, const int K) { float* A = mkl_malloc_ones(K, M); float* B = mkl_malloc_ones(N, K); float* C = mkl_malloc_ones(M, N); cblas_sgemm(CblasRowMajor, CblasTrans, CblasTrans, M, N, K, 1, A, M, B, K, 1, C, N); int diff = 0; { int i, j; for (i = 0; i < M; ++i) for (j = 0; j < N; ++j) diff |= K+1 - (int)C[i*N+j]; CU_ASSERT_EQUAL(diff, 0); } #ifdef HAVE_PNG if (diff) { { int i, j; #pragma omp parallel for private(i, j) for (i = 0; i < M; ++i) for (j = 0; j < N; ++j) C[i*N+j] = (K+1 == (int)C[i*N+j]) ? 255 : 0; } char file[256] = {0}; sprintf(file, "ones_%dx%d_%dx%d.png", M, K, K, N); visualize(file, M, N, C); } #endif mkl_free(C); mkl_free(B); mkl_free(A); } IMPL_TEST_FOR_EACH_SIZE(test_sgemm_RTT_ones); static void test_sgemm_RTT_randoms(const int M, const int N, const int K) { float* A = mkl_malloc_randoms(K, M); float* B = mkl_malloc_randoms(N, K); float* C = mkl_malloc_randoms(M, N); float* A_ref = malloc(K*M*sizeof(float)); float* B_ref = malloc(N*K*sizeof(float)); float* C_ref = malloc(M*N*sizeof(float)); memcpy(A_ref, A, K*M*sizeof(float)); memcpy(B_ref, B, N*K*sizeof(float)); memcpy(C_ref, C, M*N*sizeof(float)); const float alpha = rand_float_in_range(-1.0, 1.0); const float beta = rand_float_in_range(-1.0, 1.0); cblas_sgemm(CblasRowMajor, CblasTrans, CblasTrans, M, N, K, alpha, A, M, B, K, beta, C, N); { int i, j, k; #pragma omp parallel for private(i, j, k) for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { float acc = 0; for (k = 0; k < K; ++k) acc += A_ref[k*M+i] * B_ref[j*K+k]; C_ref[i*N+j] = alpha * acc + beta * C_ref[i*N+j]; } } } { float maximum_abs_error = 0; int i, j; #pragma omp parallel for private(i, j) reduction(max: maximum_abs_error) for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { if (maximum_abs_error < fabsf(C_ref[i*N+j] - C[i*N+j])) maximum_abs_error = fabsf(C_ref[i*N+j] - C[i*N+j]); } } CU_ASSERT_DOUBLE_EQUAL(maximum_abs_error, 0, 0.001); } free(C_ref); free(B_ref); free(A_ref); mkl_free(C); mkl_free(B); mkl_free(A); } IMPL_TEST_FOR_EACH_SIZE(test_sgemm_RTT_randoms); void test_sgemm_RTT_benchmark() { const int M = 3072; const int N = 96; const int K = 363; float* A = mkl_malloc_randoms(K, M); float* B = mkl_malloc_randoms(N, K); float* C = mkl_malloc_randoms(M, N); float* A_ref = malloc(K*M*sizeof(float)); float* B_ref = malloc(N*K*sizeof(float)); float* C_ref = malloc(M*N*sizeof(float)); memcpy(A_ref, A, K*M*sizeof(float)); memcpy(B_ref, B, N*K*sizeof(float)); memcpy(C_ref, C, M*N*sizeof(float)); const float alpha = rand_float_in_range(-1.0, 1.0); const float beta = rand_float_in_range(-1.0, 1.0); printf("\nRTT: %dx%d * %dx%d\n", M, K, K, N); { double start = get_time(); cblas_sgemm(CblasRowMajor, CblasTrans, CblasTrans, M, N, K, alpha, A, M, B, K, beta, C, N); double elapsed_time = get_time() - start; printf("GPU: %9.6lf [sec], %9.6lf [Gflop/s]\n", elapsed_time, (2 * M * N * K + 3 * M * N) / elapsed_time * 1e-9); } { double start = get_time(); int i, j, k; #pragma omp parallel for private(i, j, k) for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { float acc = 0; for (k = 0; k < K; ++k) acc += A_ref[k*M+i] * B_ref[j*K+k]; C_ref[i*N+j] = alpha * acc + beta * C_ref[i*N+j]; } } double elapsed_time = get_time() - start; printf("CPU (%d threads): %9.6lf [sec], %9.6lf [Gflop/s]\n", omp_get_max_threads(), elapsed_time, (2 * M * N * K + 3 * M * N) / elapsed_time * 1e-9); } free(C_ref); free(B_ref); free(A_ref); mkl_free(C); mkl_free(B); mkl_free(A); } static void test_sgemm_with_mempool_ones(); static void test_sgemm_with_mempool_randoms(); int setup_suite_sgemm_with_mempool() { srand(0xDEADBEEF); return 0; } int teardown_suite_sgemm_with_mempool() { return 0; } void suite_sgemm_with_mempool() { CU_pSuite suite = CU_add_suite("sgemm with mempool", setup_suite_sgemm_with_mempool, teardown_suite_sgemm_with_mempool); CU_add_test(suite, "ones", test_sgemm_with_mempool_ones); CU_add_test(suite, "randoms", test_sgemm_with_mempool_randoms); } static void test_sgemm_with_mempool_ones() { const int M = 96; const int N = 3072; const int K = 363; const int size = M * K + K * N + M * N; float* pool = mkl_malloc(size * sizeof(float), 4096); { int i = 0; for (i = 0; i < size; ++i) { pool[i] = 1.0; } } float* A = pool; float* B = A + M * K; float* C = B + K * N; cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, M, N, K, 1, A, K, B, N, 1, C, N); int diff = 0; { int i, j; for (i = 0; i < M; ++i) for (j = 0; j < N; ++j) diff |= K+1 - (int)C[i*N+j]; CU_ASSERT_EQUAL(diff, 0); } #ifdef HAVE_PNG if (diff) { { int i, j; #pragma omp parallel for private(i, j) for (i = 0; i < M; ++i) for (j = 0; j < N; ++j) C[i*N+j] = (K+1 == (int)C[i*N+j]) ? 255 : 0; } char file[256] = {0}; sprintf(file, "ones_%dx%d_%dx%d.png", M, K, K, N); visualize(file, M, N, C); } #endif mkl_free(pool); } void test_sgemm_with_mempool_randoms() { const int M = 96; const int N = 3072; const int K = 363; const int size = M * K + K * N + M * N; float* pool = mkl_malloc(size * sizeof(float), 4096); { int i = 0; for (i = 0; i < size; ++i) { pool[i] = rand_float_in_range(-1.0, 1.0); } } float* A = pool; float* B = A + M * K; float* C = B + K * N; float* A_ref = malloc(M*K*sizeof(float)); float* B_ref = malloc(K*N*sizeof(float)); float* C_ref = malloc(M*N*sizeof(float)); memcpy(A_ref, A, M*K*sizeof(float)); memcpy(B_ref, B, K*N*sizeof(float)); memcpy(C_ref, C, M*N*sizeof(float)); const float alpha = rand_float_in_range(-1.0, 1.0); const float beta = rand_float_in_range(-1.0, 1.0); cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, M, N, K, alpha, A, K, B, N, beta, C, N); { int i, j, k; #pragma omp parallel for private(i, j, k) for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { float acc = 0; for (k = 0; k < K; ++k) acc += A_ref[i*K+k] * B_ref[k*N+j]; C_ref[i*N+j] = alpha * acc + beta * C_ref[i*N+j]; } } } { // C = alpha * A * B + beta * C float maximum_abs_error = 0; int i, j; #pragma omp parallel for private(i, j) reduction(max: maximum_abs_error) for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { if (maximum_abs_error < fabsf(C_ref[i*N+j] - C[i*N+j])) maximum_abs_error = fabsf(C_ref[i*N+j] - C[i*N+j]); } } CU_ASSERT_DOUBLE_EQUAL(maximum_abs_error, 0, 0.001); } { // A and B are not modified int eq = 1; int i, j; for (i = 0; i < M; ++i) { for (j = 0; j < K; ++j) { eq &= A_ref[i*K+j] == A[i*K+j]; } } for (i = 0; i < K; ++i) { for (j = 0; j < N; ++j) { eq &= B_ref[i*N+j] == B[i*N+j]; } } CU_ASSERT(eq); } free(C_ref); free(B_ref); free(A_ref); mkl_free(pool); }
implicit_blender.c
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * The Original Code is Copyright (C) Blender Foundation * All rights reserved. */ /** \file * \ingroup bph */ #include "implicit.h" #ifdef IMPLICIT_SOLVER_BLENDER # include "MEM_guardedalloc.h" # include "DNA_scene_types.h" # include "DNA_object_types.h" # include "DNA_object_force_types.h" # include "DNA_meshdata_types.h" # include "DNA_texture_types.h" # include "BLI_math.h" # include "BLI_utildefines.h" # include "BKE_cloth.h" # include "BKE_collision.h" # include "BKE_effect.h" # include "BPH_mass_spring.h" # ifdef __GNUC__ # pragma GCC diagnostic ignored "-Wtype-limits" # endif # ifdef _OPENMP # define CLOTH_OPENMP_LIMIT 512 # endif //#define DEBUG_TIME # ifdef DEBUG_TIME # include "PIL_time.h" # endif static float I[3][3] = {{1, 0, 0}, {0, 1, 0}, {0, 0, 1}}; static float ZERO[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}}; # if 0 # define C99 # ifdef C99 # defineDO_INLINE inline # else # defineDO_INLINE static # endif # endif /* if 0 */ struct Cloth; ////////////////////////////////////////// /* fast vector / matrix library, enhancements are welcome :) -dg */ ///////////////////////////////////////// /* DEFINITIONS */ typedef float lfVector[3]; typedef struct fmatrix3x3 { float m[3][3]; /* 3x3 matrix */ unsigned int c, r; /* column and row number */ /* int pinned; // is this vertex allowed to move? */ float n1, n2, n3; /* three normal vectors for collision constrains */ unsigned int vcount; /* vertex count */ unsigned int scount; /* spring count */ } fmatrix3x3; /////////////////////////// // float[3] vector /////////////////////////// /* simple vector code */ /* STATUS: verified */ DO_INLINE void mul_fvector_S(float to[3], float from[3], float scalar) { to[0] = from[0] * scalar; to[1] = from[1] * scalar; to[2] = from[2] * scalar; } /* simple v^T * v product ("outer product") */ /* STATUS: HAS TO BE verified (*should* work) */ DO_INLINE void mul_fvectorT_fvector(float to[3][3], float vectorA[3], float vectorB[3]) { mul_fvector_S(to[0], vectorB, vectorA[0]); mul_fvector_S(to[1], vectorB, vectorA[1]); mul_fvector_S(to[2], vectorB, vectorA[2]); } /* simple v^T * v product with scalar ("outer product") */ /* STATUS: HAS TO BE verified (*should* work) */ DO_INLINE void mul_fvectorT_fvectorS(float to[3][3], float vectorA[3], float vectorB[3], float aS) { mul_fvectorT_fvector(to, vectorA, vectorB); mul_fvector_S(to[0], to[0], aS); mul_fvector_S(to[1], to[1], aS); mul_fvector_S(to[2], to[2], aS); } # if 0 /* printf vector[3] on console: for debug output */ static void print_fvector(float m3[3]) { printf("%f\n%f\n%f\n\n", m3[0], m3[1], m3[2]); } /////////////////////////// // long float vector float (*)[3] /////////////////////////// /* print long vector on console: for debug output */ DO_INLINE void print_lfvector(float (*fLongVector)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { print_fvector(fLongVector[i]); } } # endif /* create long vector */ DO_INLINE lfVector *create_lfvector(unsigned int verts) { /* TODO: check if memory allocation was successful */ return (lfVector *)MEM_callocN(verts * sizeof(lfVector), "cloth_implicit_alloc_vector"); // return (lfVector *)cloth_aligned_malloc(&MEMORY_BASE, verts * sizeof(lfVector)); } /* delete long vector */ DO_INLINE void del_lfvector(float (*fLongVector)[3]) { if (fLongVector != NULL) { MEM_freeN(fLongVector); // cloth_aligned_free(&MEMORY_BASE, fLongVector); } } /* copy long vector */ DO_INLINE void cp_lfvector(float (*to)[3], float (*from)[3], unsigned int verts) { memcpy(to, from, verts * sizeof(lfVector)); } /* init long vector with float[3] */ DO_INLINE void init_lfvector(float (*fLongVector)[3], float vector[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { copy_v3_v3(fLongVector[i], vector); } } /* zero long vector with float[3] */ DO_INLINE void zero_lfvector(float (*to)[3], unsigned int verts) { memset(to, 0.0f, verts * sizeof(lfVector)); } /* multiply long vector with scalar*/ DO_INLINE void mul_lfvectorS(float (*to)[3], float (*fLongVector)[3], float scalar, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { mul_fvector_S(to[i], fLongVector[i], scalar); } } /* multiply long vector with scalar*/ /* A -= B * float */ DO_INLINE void submul_lfvectorS(float (*to)[3], float (*fLongVector)[3], float scalar, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECSUBMUL(to[i], fLongVector[i], scalar); } } /* dot product for big vector */ DO_INLINE float dot_lfvector(float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { long i = 0; float temp = 0.0; // XXX brecht, disabled this for now (first schedule line was already disabled), // due to non-commutative nature of floating point ops this makes the sim give // different results each time you run it! // schedule(guided, 2) //#pragma omp parallel for reduction(+: temp) if (verts > CLOTH_OPENMP_LIMIT) for (i = 0; i < (long)verts; i++) { temp += dot_v3v3(fLongVectorA[i], fLongVectorB[i]); } return temp; } /* A = B + C --> for big vector */ DO_INLINE void add_lfvector_lfvector(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { add_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]); } } /* A = B + C * float --> for big vector */ DO_INLINE void add_lfvector_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECADDS(to[i], fLongVectorA[i], fLongVectorB[i], bS); } } /* A = B * float + C * float --> for big vector */ DO_INLINE void add_lfvectorS_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float aS, float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECADDSS(to[i], fLongVectorA[i], aS, fLongVectorB[i], bS); } } /* A = B - C * float --> for big vector */ DO_INLINE void sub_lfvector_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECSUBS(to[i], fLongVectorA[i], fLongVectorB[i], bS); } } /* A = B - C --> for big vector */ DO_INLINE void sub_lfvector_lfvector(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { sub_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]); } } /////////////////////////// // 3x3 matrix /////////////////////////// # if 0 /* printf 3x3 matrix on console: for debug output */ static void print_fmatrix(float m3[3][3]) { printf("%f\t%f\t%f\n", m3[0][0], m3[0][1], m3[0][2]); printf("%f\t%f\t%f\n", m3[1][0], m3[1][1], m3[1][2]); printf("%f\t%f\t%f\n\n", m3[2][0], m3[2][1], m3[2][2]); } static void print_sparse_matrix(fmatrix3x3 *m) { if (m) { unsigned int i; for (i = 0; i < m[0].vcount + m[0].scount; i++) { printf("%d:\n", i); print_fmatrix(m[i].m); } } } # endif # if 0 static void print_lvector(lfVector *v, int numverts) { int i; for (i = 0; i < numverts; ++i) { if (i > 0) printf("\n"); printf("%f,\n", v[i][0]); printf("%f,\n", v[i][1]); printf("%f,\n", v[i][2]); } } # endif # if 0 static void print_bfmatrix(fmatrix3x3 *m) { int tot = m[0].vcount + m[0].scount; int size = m[0].vcount * 3; float *t = MEM_callocN(sizeof(float) * size * size, "bfmatrix"); int q, i, j; for (q = 0; q < tot; ++q) { int k = 3 * m[q].r; int l = 3 * m[q].c; for (j = 0; j < 3; ++j) { for (i = 0; i < 3; ++i) { // if (t[k + i + (l + j) * size] != 0.0f) { // printf("warning: overwriting value at %d, %d\n", m[q].r, m[q].c); // } if (k == l) { t[k + i + (k + j) * size] += m[q].m[i][j]; } else { t[k + i + (l + j) * size] += m[q].m[i][j]; t[l + j + (k + i) * size] += m[q].m[j][i]; } } } } for (j = 0; j < size; ++j) { if (j > 0 && j % 3 == 0) printf("\n"); for (i = 0; i < size; ++i) { if (i > 0 && i % 3 == 0) printf(" "); implicit_print_matrix_elem(t[i + j * size]); } printf("\n"); } MEM_freeN(t); } # endif /* copy 3x3 matrix */ DO_INLINE void cp_fmatrix(float to[3][3], float from[3][3]) { // memcpy(to, from, sizeof (float) * 9); copy_v3_v3(to[0], from[0]); copy_v3_v3(to[1], from[1]); copy_v3_v3(to[2], from[2]); } /* copy 3x3 matrix */ DO_INLINE void initdiag_fmatrixS(float to[3][3], float aS) { cp_fmatrix(to, ZERO); to[0][0] = aS; to[1][1] = aS; to[2][2] = aS; } # if 0 /* calculate determinant of 3x3 matrix */ DO_INLINE float det_fmatrix(float m[3][3]) { return m[0][0] * m[1][1] * m[2][2] + m[1][0] * m[2][1] * m[0][2] + m[0][1] * m[1][2] * m[2][0] - m[0][0] * m[1][2] * m[2][1] - m[0][1] * m[1][0] * m[2][2] - m[2][0] * m[1][1] * m[0][2]; } DO_INLINE void inverse_fmatrix(float to[3][3], float from[3][3]) { unsigned int i, j; float d; if ((d = det_fmatrix(from)) == 0) { printf("can't build inverse"); exit(0); } for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { int i1 = (i + 1) % 3; int i2 = (i + 2) % 3; int j1 = (j + 1) % 3; int j2 = (j + 2) % 3; /** Reverse indexes i&j to take transpose. */ to[j][i] = (from[i1][j1] * from[i2][j2] - from[i1][j2] * from[i2][j1]) / d; /** * <pre> * if (i == j) { * to[i][j] = 1.0f / from[i][j]; * } * else { * to[i][j] = 0; * } * </pre> */ } } } # endif /* 3x3 matrix multiplied by a scalar */ /* STATUS: verified */ DO_INLINE void mul_fmatrix_S(float matrix[3][3], float scalar) { mul_fvector_S(matrix[0], matrix[0], scalar); mul_fvector_S(matrix[1], matrix[1], scalar); mul_fvector_S(matrix[2], matrix[2], scalar); } /* a vector multiplied by a 3x3 matrix */ /* STATUS: verified */ DO_INLINE void mul_fvector_fmatrix(float *to, float *from, float matrix[3][3]) { to[0] = matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2]; to[1] = matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2]; to[2] = matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2]; } /* 3x3 matrix multiplied by a vector */ /* STATUS: verified */ DO_INLINE void mul_fmatrix_fvector(float *to, float matrix[3][3], float from[3]) { to[0] = dot_v3v3(matrix[0], from); to[1] = dot_v3v3(matrix[1], from); to[2] = dot_v3v3(matrix[2], from); } /* 3x3 matrix addition with 3x3 matrix */ DO_INLINE void add_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3]) { add_v3_v3v3(to[0], matrixA[0], matrixB[0]); add_v3_v3v3(to[1], matrixA[1], matrixB[1]); add_v3_v3v3(to[2], matrixA[2], matrixB[2]); } /* A -= B*x + C*y (3x3 matrix sub-addition with 3x3 matrix) */ DO_INLINE void subadd_fmatrixS_fmatrixS( float to[3][3], float matrixA[3][3], float aS, float matrixB[3][3], float bS) { VECSUBADDSS(to[0], matrixA[0], aS, matrixB[0], bS); VECSUBADDSS(to[1], matrixA[1], aS, matrixB[1], bS); VECSUBADDSS(to[2], matrixA[2], aS, matrixB[2], bS); } /* A = B - C (3x3 matrix subtraction with 3x3 matrix) */ DO_INLINE void sub_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3]) { sub_v3_v3v3(to[0], matrixA[0], matrixB[0]); sub_v3_v3v3(to[1], matrixA[1], matrixB[1]); sub_v3_v3v3(to[2], matrixA[2], matrixB[2]); } ///////////////////////////////////////////////////////////////// // special functions ///////////////////////////////////////////////////////////////// /* 3x3 matrix multiplied+added by a vector */ /* STATUS: verified */ DO_INLINE void muladd_fmatrix_fvector(float to[3], float matrix[3][3], float from[3]) { to[0] += dot_v3v3(matrix[0], from); to[1] += dot_v3v3(matrix[1], from); to[2] += dot_v3v3(matrix[2], from); } DO_INLINE void muladd_fmatrixT_fvector(float to[3], float matrix[3][3], float from[3]) { to[0] += matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2]; to[1] += matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2]; to[2] += matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2]; } BLI_INLINE void outerproduct(float r[3][3], const float a[3], const float b[3]) { mul_v3_v3fl(r[0], a, b[0]); mul_v3_v3fl(r[1], a, b[1]); mul_v3_v3fl(r[2], a, b[2]); } BLI_INLINE void cross_m3_v3m3(float r[3][3], const float v[3], float m[3][3]) { cross_v3_v3v3(r[0], v, m[0]); cross_v3_v3v3(r[1], v, m[1]); cross_v3_v3v3(r[2], v, m[2]); } BLI_INLINE void cross_v3_identity(float r[3][3], const float v[3]) { r[0][0] = 0.0f; r[1][0] = v[2]; r[2][0] = -v[1]; r[0][1] = -v[2]; r[1][1] = 0.0f; r[2][1] = v[0]; r[0][2] = v[1]; r[1][2] = -v[0]; r[2][2] = 0.0f; } BLI_INLINE void madd_m3_m3fl(float r[3][3], float m[3][3], float f) { r[0][0] += m[0][0] * f; r[0][1] += m[0][1] * f; r[0][2] += m[0][2] * f; r[1][0] += m[1][0] * f; r[1][1] += m[1][1] * f; r[1][2] += m[1][2] * f; r[2][0] += m[2][0] * f; r[2][1] += m[2][1] * f; r[2][2] += m[2][2] * f; } ///////////////////////////////////////////////////////////////// /////////////////////////// // SPARSE SYMMETRIC big matrix with 3x3 matrix entries /////////////////////////// /* printf a big matrix on console: for debug output */ # if 0 static void print_bfmatrix(fmatrix3x3 *m3) { unsigned int i = 0; for (i = 0; i < m3[0].vcount + m3[0].scount; i++) { print_fmatrix(m3[i].m); } } # endif BLI_INLINE void init_fmatrix(fmatrix3x3 *matrix, int r, int c) { matrix->r = r; matrix->c = c; } /* create big matrix */ DO_INLINE fmatrix3x3 *create_bfmatrix(unsigned int verts, unsigned int springs) { // TODO: check if memory allocation was successful */ fmatrix3x3 *temp = (fmatrix3x3 *)MEM_callocN(sizeof(fmatrix3x3) * (verts + springs), "cloth_implicit_alloc_matrix"); int i; temp[0].vcount = verts; temp[0].scount = springs; /* vertex part of the matrix is diagonal blocks */ for (i = 0; i < verts; ++i) { init_fmatrix(temp + i, i, i); } return temp; } /* delete big matrix */ DO_INLINE void del_bfmatrix(fmatrix3x3 *matrix) { if (matrix != NULL) { MEM_freeN(matrix); } } /* copy big matrix */ DO_INLINE void cp_bfmatrix(fmatrix3x3 *to, fmatrix3x3 *from) { // TODO bounds checking memcpy(to, from, sizeof(fmatrix3x3) * (from[0].vcount + from[0].scount)); } /* init big matrix */ // slow in parallel DO_INLINE void init_bfmatrix(fmatrix3x3 *matrix, float m3[3][3]) { unsigned int i; for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) { cp_fmatrix(matrix[i].m, m3); } } /* init the diagonal of big matrix */ // slow in parallel DO_INLINE void initdiag_bfmatrix(fmatrix3x3 *matrix, float m3[3][3]) { unsigned int i, j; float tmatrix[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}}; for (i = 0; i < matrix[0].vcount; i++) { cp_fmatrix(matrix[i].m, m3); } for (j = matrix[0].vcount; j < matrix[0].vcount + matrix[0].scount; j++) { cp_fmatrix(matrix[j].m, tmatrix); } } /* SPARSE SYMMETRIC multiply big matrix with long vector*/ /* STATUS: verified */ DO_INLINE void mul_bfmatrix_lfvector(float (*to)[3], fmatrix3x3 *from, lfVector *fLongVector) { unsigned int i = 0; unsigned int vcount = from[0].vcount; lfVector *temp = create_lfvector(vcount); zero_lfvector(to, vcount); # pragma omp parallel sections private(i) if (vcount > CLOTH_OPENMP_LIMIT) { # pragma omp section { for (i = from[0].vcount; i < from[0].vcount + from[0].scount; i++) { /* This is the lower triangle of the sparse matrix, * therefore multiplication occurs with transposed submatrices. */ muladd_fmatrixT_fvector(to[from[i].c], from[i].m, fLongVector[from[i].r]); } } # pragma omp section { for (i = 0; i < from[0].vcount + from[0].scount; i++) { muladd_fmatrix_fvector(temp[from[i].r], from[i].m, fLongVector[from[i].c]); } } } add_lfvector_lfvector(to, to, temp, from[0].vcount); del_lfvector(temp); } /* SPARSE SYMMETRIC sub big matrix with big matrix*/ /* A -= B * float + C * float --> for big matrix */ /* VERIFIED */ DO_INLINE void subadd_bfmatrixS_bfmatrixS( fmatrix3x3 *to, fmatrix3x3 *from, float aS, fmatrix3x3 *matrix, float bS) { unsigned int i = 0; /* process diagonal elements */ for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) { subadd_fmatrixS_fmatrixS(to[i].m, from[i].m, aS, matrix[i].m, bS); } } /////////////////////////////////////////////////////////////////// // simulator start /////////////////////////////////////////////////////////////////// typedef struct Implicit_Data { /* inputs */ fmatrix3x3 *bigI; /* identity (constant) */ fmatrix3x3 *tfm; /* local coordinate transform */ fmatrix3x3 *M; /* masses */ lfVector *F; /* forces */ fmatrix3x3 *dFdV, *dFdX; /* force jacobians */ int num_blocks; /* number of off-diagonal blocks (springs) */ /* motion state data */ lfVector *X, *Xnew; /* positions */ lfVector *V, *Vnew; /* velocities */ /* internal solver data */ lfVector *B; /* B for A*dV = B */ fmatrix3x3 *A; /* A for A*dV = B */ lfVector *dV; /* velocity change (solution of A*dV = B) */ lfVector *z; /* target velocity in constrained directions */ fmatrix3x3 *S; /* filtering matrix for constraints */ fmatrix3x3 *P, *Pinv; /* pre-conditioning matrix */ } Implicit_Data; Implicit_Data *BPH_mass_spring_solver_create(int numverts, int numsprings) { Implicit_Data *id = (Implicit_Data *)MEM_callocN(sizeof(Implicit_Data), "implicit vecmat"); /* process diagonal elements */ id->tfm = create_bfmatrix(numverts, 0); id->A = create_bfmatrix(numverts, numsprings); id->dFdV = create_bfmatrix(numverts, numsprings); id->dFdX = create_bfmatrix(numverts, numsprings); id->S = create_bfmatrix(numverts, 0); id->Pinv = create_bfmatrix(numverts, numsprings); id->P = create_bfmatrix(numverts, numsprings); id->bigI = create_bfmatrix(numverts, numsprings); // TODO 0 springs id->M = create_bfmatrix(numverts, numsprings); id->X = create_lfvector(numverts); id->Xnew = create_lfvector(numverts); id->V = create_lfvector(numverts); id->Vnew = create_lfvector(numverts); id->F = create_lfvector(numverts); id->B = create_lfvector(numverts); id->dV = create_lfvector(numverts); id->z = create_lfvector(numverts); initdiag_bfmatrix(id->bigI, I); return id; } void BPH_mass_spring_solver_free(Implicit_Data *id) { del_bfmatrix(id->tfm); del_bfmatrix(id->A); del_bfmatrix(id->dFdV); del_bfmatrix(id->dFdX); del_bfmatrix(id->S); del_bfmatrix(id->P); del_bfmatrix(id->Pinv); del_bfmatrix(id->bigI); del_bfmatrix(id->M); del_lfvector(id->X); del_lfvector(id->Xnew); del_lfvector(id->V); del_lfvector(id->Vnew); del_lfvector(id->F); del_lfvector(id->B); del_lfvector(id->dV); del_lfvector(id->z); MEM_freeN(id); } /* ==== Transformation from/to root reference frames ==== */ BLI_INLINE void world_to_root_v3(Implicit_Data *data, int index, float r[3], const float v[3]) { copy_v3_v3(r, v); mul_transposed_m3_v3(data->tfm[index].m, r); } BLI_INLINE void root_to_world_v3(Implicit_Data *data, int index, float r[3], const float v[3]) { mul_v3_m3v3(r, data->tfm[index].m, v); } BLI_INLINE void world_to_root_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3]) { float trot[3][3]; copy_m3_m3(trot, data->tfm[index].m); transpose_m3(trot); mul_m3_m3m3(r, trot, m); } BLI_INLINE void root_to_world_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3]) { mul_m3_m3m3(r, data->tfm[index].m, m); } /* ================================ */ DO_INLINE void filter(lfVector *V, fmatrix3x3 *S) { unsigned int i = 0; for (i = 0; i < S[0].vcount; i++) { mul_m3_v3(S[i].m, V[S[i].r]); } } # if 0 /* this version of the CG algorithm does not work very well with partial constraints (where S has non-zero elements) */ static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S) { // Solves for unknown X in equation AX=B unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100; float conjgrad_epsilon = 0.0001f /* , conjgrad_lasterror=0 */ /* UNUSED */; lfVector *q, *d, *tmp, *r; float s, starget, a, s_prev; unsigned int numverts = lA[0].vcount; q = create_lfvector(numverts); d = create_lfvector(numverts); tmp = create_lfvector(numverts); r = create_lfvector(numverts); // zero_lfvector(ldV, CLOTHPARTICLES); filter(ldV, S); add_lfvector_lfvector(ldV, ldV, z, numverts); // r = B - Mul(tmp, A, X); // just use B if X known to be zero cp_lfvector(r, lB, numverts); mul_bfmatrix_lfvector(tmp, lA, ldV); sub_lfvector_lfvector(r, r, tmp, numverts); filter(r, S); cp_lfvector(d, r, numverts); s = dot_lfvector(r, r, numverts); starget = s * sqrtf(conjgrad_epsilon); while (s > starget && conjgrad_loopcount < conjgrad_looplimit) { // Mul(q, A, d); // q = A*d; mul_bfmatrix_lfvector(q, lA, d); filter(q, S); a = s / dot_lfvector(d, q, numverts); // X = X + d*a; add_lfvector_lfvectorS(ldV, ldV, d, a, numverts); // r = r - q*a; sub_lfvector_lfvectorS(r, r, q, a, numverts); s_prev = s; s = dot_lfvector(r, r, numverts); //d = r+d*(s/s_prev); add_lfvector_lfvectorS(d, r, d, (s / s_prev), numverts); filter(d, S); conjgrad_loopcount++; } /* conjgrad_lasterror = s; */ /* UNUSED */ del_lfvector(q); del_lfvector(d); del_lfvector(tmp); del_lfvector(r); // printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount); return conjgrad_loopcount < conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable } # endif static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, ImplicitSolverResult *result) { // Solves for unknown X in equation AX=B unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100; float conjgrad_epsilon = 0.01f; unsigned int numverts = lA[0].vcount; lfVector *fB = create_lfvector(numverts); lfVector *AdV = create_lfvector(numverts); lfVector *r = create_lfvector(numverts); lfVector *c = create_lfvector(numverts); lfVector *q = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); float bnorm2, delta_new, delta_old, delta_target, alpha; cp_lfvector(ldV, z, numverts); /* d0 = filter(B)^T * P * filter(B) */ cp_lfvector(fB, lB, numverts); filter(fB, S); bnorm2 = dot_lfvector(fB, fB, numverts); delta_target = conjgrad_epsilon * conjgrad_epsilon * bnorm2; /* r = filter(B - A * dV) */ mul_bfmatrix_lfvector(AdV, lA, ldV); sub_lfvector_lfvector(r, lB, AdV, numverts); filter(r, S); /* c = filter(P^-1 * r) */ cp_lfvector(c, r, numverts); filter(c, S); /* delta = r^T * c */ delta_new = dot_lfvector(r, c, numverts); # ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT printf("==== A ====\n"); print_bfmatrix(lA); printf("==== z ====\n"); print_lvector(z, numverts); printf("==== B ====\n"); print_lvector(lB, numverts); printf("==== S ====\n"); print_bfmatrix(S); # endif while (delta_new > delta_target && conjgrad_loopcount < conjgrad_looplimit) { mul_bfmatrix_lfvector(q, lA, c); filter(q, S); alpha = delta_new / dot_lfvector(c, q, numverts); add_lfvector_lfvectorS(ldV, ldV, c, alpha, numverts); add_lfvector_lfvectorS(r, r, q, -alpha, numverts); /* s = P^-1 * r */ cp_lfvector(s, r, numverts); delta_old = delta_new; delta_new = dot_lfvector(r, s, numverts); add_lfvector_lfvectorS(c, s, c, delta_new / delta_old, numverts); filter(c, S); conjgrad_loopcount++; } # ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT printf("==== dV ====\n"); print_lvector(ldV, numverts); printf("========\n"); # endif del_lfvector(fB); del_lfvector(AdV); del_lfvector(r); del_lfvector(c); del_lfvector(q); del_lfvector(s); // printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount); result->status = conjgrad_loopcount < conjgrad_looplimit ? BPH_SOLVER_SUCCESS : BPH_SOLVER_NO_CONVERGENCE; result->iterations = conjgrad_loopcount; result->error = bnorm2 > 0.0f ? sqrtf(delta_new / bnorm2) : 0.0f; return conjgrad_loopcount < conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable } # if 0 // block diagonalizer DO_INLINE void BuildPPinv(fmatrix3x3 *lA, fmatrix3x3 *P, fmatrix3x3 *Pinv) { unsigned int i = 0; // Take only the diagonal blocks of A // #pragma omp parallel for private(i) if (lA[0].vcount > CLOTH_OPENMP_LIMIT) for (i = 0; i < lA[0].vcount; i++) { // block diagonalizer cp_fmatrix(P[i].m, lA[i].m); inverse_fmatrix(Pinv[i].m, P[i].m); } } # if 0 // version 1.3 static int cg_filtered_pre(lfVector *dv, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, fmatrix3x3 *P, fmatrix3x3 *Pinv) { unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100; float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0; float conjgrad_epsilon = 0.0001; // 0.2 is dt for steps=5 lfVector *r = create_lfvector(numverts); lfVector *p = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); lfVector *h = create_lfvector(numverts); BuildPPinv(lA, P, Pinv); filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); deltaNew = dot_lfvector(r, p, numverts); delta0 = deltaNew * sqrt(conjgrad_epsilon); # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif while ((deltaNew > delta0) && (iterations < conjgrad_looplimit)) { iterations++; mul_bfmatrix_lfvector(s, lA, p); filter(s, S); alpha = deltaNew / dot_lfvector(p, s, numverts); add_lfvector_lfvectorS(dv, dv, p, alpha, numverts); add_lfvector_lfvectorS(r, r, s, -alpha, numverts); mul_prevfmatrix_lfvector(h, Pinv, r); filter(h, S); deltaOld = deltaNew; deltaNew = dot_lfvector(r, h, numverts); add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts); filter(p, S); } # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered_pre time: %f\n", (float)(end - start)); # endif del_lfvector(h); del_lfvector(s); del_lfvector(p); del_lfvector(r); printf("iterations: %d\n", iterations); return iterations < conjgrad_looplimit; } # endif // version 1.4 static int cg_filtered_pre(lfVector *dv, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, fmatrix3x3 *P, fmatrix3x3 *Pinv, fmatrix3x3 *bigI) { unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100; float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0, tol = 0; lfVector *r = create_lfvector(numverts); lfVector *p = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); lfVector *h = create_lfvector(numverts); lfVector *bhat = create_lfvector(numverts); lfVector *btemp = create_lfvector(numverts); BuildPPinv(lA, P, Pinv); initdiag_bfmatrix(bigI, I); sub_bfmatrix_Smatrix(bigI, bigI, S); // x = Sx_0+(I-S)z filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); // b_hat = S(b-A(I-S)z) mul_bfmatrix_lfvector(r, lA, z); mul_bfmatrix_lfvector(bhat, bigI, r); sub_lfvector_lfvector(bhat, lB, bhat, numverts); // r = S(b-Ax) mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); // p = SP^-1r mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); // delta0 = bhat^TP^-1bhat mul_prevfmatrix_lfvector(btemp, Pinv, bhat); delta0 = dot_lfvector(bhat, btemp, numverts); // deltaNew = r^TP deltaNew = dot_lfvector(r, p, numverts); # if 0 filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); deltaNew = dot_lfvector(r, p, numverts); delta0 = deltaNew * sqrt(conjgrad_epsilon); # endif # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif tol = (0.01 * 0.2); while ((deltaNew > delta0 * tol * tol) && (iterations < conjgrad_looplimit)) { iterations++; mul_bfmatrix_lfvector(s, lA, p); filter(s, S); alpha = deltaNew / dot_lfvector(p, s, numverts); add_lfvector_lfvectorS(dv, dv, p, alpha, numverts); add_lfvector_lfvectorS(r, r, s, -alpha, numverts); mul_prevfmatrix_lfvector(h, Pinv, r); filter(h, S); deltaOld = deltaNew; deltaNew = dot_lfvector(r, h, numverts); add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts); filter(p, S); } # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered_pre time: %f\n", (float)(end - start)); # endif del_lfvector(btemp); del_lfvector(bhat); del_lfvector(h); del_lfvector(s); del_lfvector(p); del_lfvector(r); // printf("iterations: %d\n", iterations); return iterations < conjgrad_looplimit; } # endif bool BPH_mass_spring_solve_velocities(Implicit_Data *data, float dt, ImplicitSolverResult *result) { unsigned int numverts = data->dFdV[0].vcount; lfVector *dFdXmV = create_lfvector(numverts); zero_lfvector(data->dV, numverts); cp_bfmatrix(data->A, data->M); subadd_bfmatrixS_bfmatrixS(data->A, data->dFdV, dt, data->dFdX, (dt * dt)); mul_bfmatrix_lfvector(dFdXmV, data->dFdX, data->V); add_lfvectorS_lfvectorS(data->B, data->F, dt, dFdXmV, (dt * dt), numverts); # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif cg_filtered(data->dV, data->A, data->B, data->z, data->S, result); /* conjugate gradient algorithm to solve Ax=b */ // cg_filtered_pre(id->dV, id->A, id->B, id->z, id->S, id->P, id->Pinv, id->bigI); # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered calc time: %f\n", (float)(end - start)); # endif // advance velocities add_lfvector_lfvector(data->Vnew, data->V, data->dV, numverts); del_lfvector(dFdXmV); return result->status == BPH_SOLVER_SUCCESS; } bool BPH_mass_spring_solve_positions(Implicit_Data *data, float dt) { int numverts = data->M[0].vcount; // advance positions add_lfvector_lfvectorS(data->Xnew, data->X, data->Vnew, dt, numverts); return true; } void BPH_mass_spring_apply_result(Implicit_Data *data) { int numverts = data->M[0].vcount; cp_lfvector(data->X, data->Xnew, numverts); cp_lfvector(data->V, data->Vnew, numverts); } void BPH_mass_spring_set_vertex_mass(Implicit_Data *data, int index, float mass) { unit_m3(data->M[index].m); mul_m3_fl(data->M[index].m, mass); } void BPH_mass_spring_set_rest_transform(Implicit_Data *data, int index, float tfm[3][3]) { # ifdef CLOTH_ROOT_FRAME copy_m3_m3(data->tfm[index].m, tfm); # else unit_m3(data->tfm[index].m); (void)tfm; # endif } void BPH_mass_spring_set_motion_state(Implicit_Data *data, int index, const float x[3], const float v[3]) { world_to_root_v3(data, index, data->X[index], x); world_to_root_v3(data, index, data->V[index], v); } void BPH_mass_spring_set_position(Implicit_Data *data, int index, const float x[3]) { world_to_root_v3(data, index, data->X[index], x); } void BPH_mass_spring_set_velocity(Implicit_Data *data, int index, const float v[3]) { world_to_root_v3(data, index, data->V[index], v); } void BPH_mass_spring_get_motion_state(struct Implicit_Data *data, int index, float x[3], float v[3]) { if (x) root_to_world_v3(data, index, x, data->X[index]); if (v) root_to_world_v3(data, index, v, data->V[index]); } void BPH_mass_spring_get_position(struct Implicit_Data *data, int index, float x[3]) { root_to_world_v3(data, index, x, data->X[index]); } void BPH_mass_spring_get_new_position(struct Implicit_Data *data, int index, float x[3]) { root_to_world_v3(data, index, x, data->Xnew[index]); } void BPH_mass_spring_set_new_position(struct Implicit_Data *data, int index, const float x[3]) { world_to_root_v3(data, index, data->Xnew[index], x); } void BPH_mass_spring_get_new_velocity(struct Implicit_Data *data, int index, float v[3]) { root_to_world_v3(data, index, v, data->Vnew[index]); } void BPH_mass_spring_set_new_velocity(struct Implicit_Data *data, int index, const float v[3]) { world_to_root_v3(data, index, data->Vnew[index], v); } /* -------------------------------- */ static int BPH_mass_spring_add_block(Implicit_Data *data, int v1, int v2) { int s = data->M[0].vcount + data->num_blocks; /* index from array start */ BLI_assert(s < data->M[0].vcount + data->M[0].scount); ++data->num_blocks; /* tfm and S don't have spring entries (diagonal blocks only) */ init_fmatrix(data->bigI + s, v1, v2); init_fmatrix(data->M + s, v1, v2); init_fmatrix(data->dFdX + s, v1, v2); init_fmatrix(data->dFdV + s, v1, v2); init_fmatrix(data->A + s, v1, v2); init_fmatrix(data->P + s, v1, v2); init_fmatrix(data->Pinv + s, v1, v2); return s; } void BPH_mass_spring_clear_constraints(Implicit_Data *data) { int i, numverts = data->S[0].vcount; for (i = 0; i < numverts; ++i) { unit_m3(data->S[i].m); zero_v3(data->z[i]); } } void BPH_mass_spring_add_constraint_ndof0(Implicit_Data *data, int index, const float dV[3]) { zero_m3(data->S[index].m); world_to_root_v3(data, index, data->z[index], dV); } void BPH_mass_spring_add_constraint_ndof1( Implicit_Data *data, int index, const float c1[3], const float c2[3], const float dV[3]) { float m[3][3], p[3], q[3], u[3], cmat[3][3]; world_to_root_v3(data, index, p, c1); mul_fvectorT_fvector(cmat, p, p); sub_m3_m3m3(m, I, cmat); world_to_root_v3(data, index, q, c2); mul_fvectorT_fvector(cmat, q, q); sub_m3_m3m3(m, m, cmat); /* XXX not sure but multiplication should work here */ copy_m3_m3(data->S[index].m, m); // mul_m3_m3m3(data->S[index].m, data->S[index].m, m); world_to_root_v3(data, index, u, dV); add_v3_v3(data->z[index], u); } void BPH_mass_spring_add_constraint_ndof2(Implicit_Data *data, int index, const float c1[3], const float dV[3]) { float m[3][3], p[3], u[3], cmat[3][3]; world_to_root_v3(data, index, p, c1); mul_fvectorT_fvector(cmat, p, p); sub_m3_m3m3(m, I, cmat); copy_m3_m3(data->S[index].m, m); // mul_m3_m3m3(data->S[index].m, data->S[index].m, m); world_to_root_v3(data, index, u, dV); add_v3_v3(data->z[index], u); } void BPH_mass_spring_clear_forces(Implicit_Data *data) { int numverts = data->M[0].vcount; zero_lfvector(data->F, numverts); init_bfmatrix(data->dFdX, ZERO); init_bfmatrix(data->dFdV, ZERO); data->num_blocks = 0; } void BPH_mass_spring_force_reference_frame(Implicit_Data *data, int index, const float acceleration[3], const float omega[3], const float domega_dt[3], float mass) { # ifdef CLOTH_ROOT_FRAME float acc[3], w[3], dwdt[3]; float f[3], dfdx[3][3], dfdv[3][3]; float euler[3], coriolis[3], centrifugal[3], rotvel[3]; float deuler[3][3], dcoriolis[3][3], dcentrifugal[3][3], drotvel[3][3]; world_to_root_v3(data, index, acc, acceleration); world_to_root_v3(data, index, w, omega); world_to_root_v3(data, index, dwdt, domega_dt); cross_v3_v3v3(euler, dwdt, data->X[index]); cross_v3_v3v3(coriolis, w, data->V[index]); mul_v3_fl(coriolis, 2.0f); cross_v3_v3v3(rotvel, w, data->X[index]); cross_v3_v3v3(centrifugal, w, rotvel); sub_v3_v3v3(f, acc, euler); sub_v3_v3(f, coriolis); sub_v3_v3(f, centrifugal); mul_v3_fl(f, mass); /* F = m * a */ cross_v3_identity(deuler, dwdt); cross_v3_identity(dcoriolis, w); mul_m3_fl(dcoriolis, 2.0f); cross_v3_identity(drotvel, w); cross_m3_v3m3(dcentrifugal, w, drotvel); add_m3_m3m3(dfdx, deuler, dcentrifugal); negate_m3(dfdx); mul_m3_fl(dfdx, mass); copy_m3_m3(dfdv, dcoriolis); negate_m3(dfdv); mul_m3_fl(dfdv, mass); add_v3_v3(data->F[index], f); add_m3_m3m3(data->dFdX[index].m, data->dFdX[index].m, dfdx); add_m3_m3m3(data->dFdV[index].m, data->dFdV[index].m, dfdv); # else (void)data; (void)index; (void)acceleration; (void)omega; (void)domega_dt; # endif } void BPH_mass_spring_force_gravity(Implicit_Data *data, int index, float mass, const float g[3]) { /* force = mass * acceleration (in this case: gravity) */ float f[3]; world_to_root_v3(data, index, f, g); mul_v3_fl(f, mass); add_v3_v3(data->F[index], f); } void BPH_mass_spring_force_drag(Implicit_Data *data, float drag) { int i, numverts = data->M[0].vcount; for (i = 0; i < numverts; i++) { float tmp[3][3]; /* NB: uses root space velocity, no need to transform */ madd_v3_v3fl(data->F[i], data->V[i], -drag); copy_m3_m3(tmp, I); mul_m3_fl(tmp, -drag); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tmp); } } void BPH_mass_spring_force_extern( struct Implicit_Data *data, int i, const float f[3], float dfdx[3][3], float dfdv[3][3]) { float tf[3], tdfdx[3][3], tdfdv[3][3]; world_to_root_v3(data, i, tf, f); world_to_root_m3(data, i, tdfdx, dfdx); world_to_root_m3(data, i, tdfdv, dfdv); add_v3_v3(data->F[i], tf); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, tdfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tdfdv); } static float calc_nor_area_tri(float nor[3], const float v1[3], const float v2[3], const float v3[3]) { float n1[3], n2[3]; sub_v3_v3v3(n1, v1, v2); sub_v3_v3v3(n2, v2, v3); cross_v3_v3v3(nor, n1, n2); return normalize_v3(nor); } /* XXX does not support force jacobians yet, since the effector system does not provide them either */ void BPH_mass_spring_force_face_wind( Implicit_Data *data, int v1, int v2, int v3, const float (*winvec)[3]) { const float effector_scale = 0.02f; float win[3], nor[3], area; float factor; /* calculate face normal and area */ area = calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]); factor = effector_scale * area / 3.0f; world_to_root_v3(data, v1, win, winvec[v1]); madd_v3_v3fl(data->F[v1], nor, factor * dot_v3v3(win, nor)); world_to_root_v3(data, v2, win, winvec[v2]); madd_v3_v3fl(data->F[v2], nor, factor * dot_v3v3(win, nor)); world_to_root_v3(data, v3, win, winvec[v3]); madd_v3_v3fl(data->F[v3], nor, factor * dot_v3v3(win, nor)); } static void edge_wind_vertex(const float dir[3], float length, float radius, const float wind[3], float f[3], float UNUSED(dfdx[3][3]), float UNUSED(dfdv[3][3])) { const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */ float cos_alpha, sin_alpha, cross_section; float windlen = len_v3(wind); if (windlen == 0.0f) { zero_v3(f); return; } /* angle of wind direction to edge */ cos_alpha = dot_v3v3(wind, dir) / windlen; sin_alpha = sqrtf(1.0f - cos_alpha * cos_alpha); cross_section = radius * ((float)M_PI * radius * sin_alpha + length * cos_alpha); mul_v3_v3fl(f, wind, density * cross_section); } void BPH_mass_spring_force_edge_wind( Implicit_Data *data, int v1, int v2, float radius1, float radius2, const float (*winvec)[3]) { float win[3], dir[3], length; float f[3], dfdx[3][3], dfdv[3][3]; sub_v3_v3v3(dir, data->X[v1], data->X[v2]); length = normalize_v3(dir); world_to_root_v3(data, v1, win, winvec[v1]); edge_wind_vertex(dir, length, radius1, win, f, dfdx, dfdv); add_v3_v3(data->F[v1], f); world_to_root_v3(data, v2, win, winvec[v2]); edge_wind_vertex(dir, length, radius2, win, f, dfdx, dfdv); add_v3_v3(data->F[v2], f); } void BPH_mass_spring_force_vertex_wind(Implicit_Data *data, int v, float UNUSED(radius), const float (*winvec)[3]) { const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */ float wind[3]; float f[3]; world_to_root_v3(data, v, wind, winvec[v]); mul_v3_v3fl(f, wind, density); add_v3_v3(data->F[v], f); } BLI_INLINE void dfdx_spring(float to[3][3], const float dir[3], float length, float L, float k) { // dir is unit length direction, rest is spring's restlength, k is spring constant. //return ( (I-outerprod(dir, dir))*Min(1.0f, rest/length) - I) * -k; outerproduct(to, dir, dir); sub_m3_m3m3(to, I, to); mul_m3_fl(to, (L / length)); sub_m3_m3m3(to, to, I); mul_m3_fl(to, k); } /* unused */ # if 0 BLI_INLINE void dfdx_damp(float to[3][3], const float dir[3], float length, const float vel[3], float rest, float damping) { // inner spring damping vel is the relative velocity of the endpoints. // return (I-outerprod(dir, dir)) * (-damping * -(dot(dir, vel)/Max(length, rest))); mul_fvectorT_fvector(to, dir, dir); sub_fmatrix_fmatrix(to, I, to); mul_fmatrix_S(to, (-damping * -(dot_v3v3(dir, vel) / MAX2(length, rest)))); } # endif BLI_INLINE void dfdv_damp(float to[3][3], const float dir[3], float damping) { // derivative of force wrt velocity outerproduct(to, dir, dir); mul_m3_fl(to, -damping); } BLI_INLINE float fb(float length, float L) { float x = length / L; float xx = x * x; float xxx = xx * x; float xxxx = xxx * x; return (-11.541f * xxxx + 34.193f * xxx - 39.083f * xx + 23.116f * x - 9.713f); } BLI_INLINE float fbderiv(float length, float L) { float x = length / L; float xx = x * x; float xxx = xx * x; return (-46.164f * xxx + 102.579f * xx - 78.166f * x + 23.116f); } BLI_INLINE float fbstar(float length, float L, float kb, float cb) { float tempfb_fl = kb * fb(length, L); float fbstar_fl = cb * (length - L); if (tempfb_fl < fbstar_fl) return fbstar_fl; else return tempfb_fl; } // function to calculae bending spring force (taken from Choi & Co) BLI_INLINE float fbstar_jacobi(float length, float L, float kb, float cb) { float tempfb_fl = kb * fb(length, L); float fbstar_fl = cb * (length - L); if (tempfb_fl < fbstar_fl) { return -cb; } else { return -kb * fbderiv(length, L); } } /* calculate elonglation */ BLI_INLINE bool spring_length(Implicit_Data *data, int i, int j, float r_extent[3], float r_dir[3], float *r_length, float r_vel[3]) { sub_v3_v3v3(r_extent, data->X[j], data->X[i]); sub_v3_v3v3(r_vel, data->V[j], data->V[i]); *r_length = len_v3(r_extent); if (*r_length > ALMOST_ZERO) { # if 0 if (length > L) { if ((clmd->sim_parms->flags & CSIMSETT_FLAG_TEARING_ENABLED) && (((length - L) * 100.0f / L) > clmd->sim_parms->maxspringlen)) { // cut spring! s->flags |= CSPRING_FLAG_DEACTIVATE; return false; } } # endif mul_v3_v3fl(r_dir, r_extent, 1.0f / (*r_length)); } else { zero_v3(r_dir); } return true; } BLI_INLINE void apply_spring( Implicit_Data *data, int i, int j, const float f[3], float dfdx[3][3], float dfdv[3][3]) { int block_ij = BPH_mass_spring_add_block(data, i, j); add_v3_v3(data->F[i], f); sub_v3_v3(data->F[j], f); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfdx); sub_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv); add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfdv); sub_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfdv); } bool BPH_mass_spring_force_spring_linear(Implicit_Data *data, int i, int j, float restlen, float stiffness_tension, float damping_tension, float stiffness_compression, float damping_compression, bool resist_compress, bool new_compress, float clamp_force) { float extent[3], length, dir[3], vel[3]; float f[3], dfdx[3][3], dfdv[3][3]; float damping = 0; // calculate elonglation spring_length(data, i, j, extent, dir, &length, vel); /* This code computes not only the force, but also its derivative. * Zero derivative effectively disables the spring for the implicit solver. * Thus length > restlen makes cloth unconstrained at the start of simulation. */ if ((length >= restlen && length > 0) || resist_compress) { float stretch_force; damping = damping_tension; stretch_force = stiffness_tension * (length - restlen); if (clamp_force > 0.0f && stretch_force > clamp_force) { stretch_force = clamp_force; } mul_v3_v3fl(f, dir, stretch_force); dfdx_spring(dfdx, dir, length, restlen, stiffness_tension); } else if (new_compress) { /* This is based on the Choi and Ko bending model, which works surprisingly well for compression. */ float kb = stiffness_compression; float cb = kb; /* cb equal to kb seems to work, but a factor can be added if necessary */ damping = damping_compression; mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb)); outerproduct(dfdx, dir, dir); mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb)); } else { return false; } madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir)); dfdv_damp(dfdv, dir, damping); apply_spring(data, i, j, f, dfdx, dfdv); return true; } /* See "Stable but Responsive Cloth" (Choi, Ko 2005) */ bool BPH_mass_spring_force_spring_bending( Implicit_Data *data, int i, int j, float restlen, float kb, float cb) { float extent[3], length, dir[3], vel[3]; // calculate elonglation spring_length(data, i, j, extent, dir, &length, vel); if (length < restlen) { float f[3], dfdx[3][3], dfdv[3][3]; mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb)); outerproduct(dfdx, dir, dir); mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb)); /* XXX damping not supported */ zero_m3(dfdv); apply_spring(data, i, j, f, dfdx, dfdv); return true; } else { return false; } } BLI_INLINE void poly_avg(lfVector *data, int *inds, int len, float r_avg[3]) { float fact = 1.0f / (float)len; zero_v3(r_avg); for (int i = 0; i < len; i++) { madd_v3_v3fl(r_avg, data[inds[i]], fact); } } BLI_INLINE void poly_norm(lfVector *data, int i, int j, int *inds, int len, float r_dir[3]) { float mid[3]; poly_avg(data, inds, len, mid); normal_tri_v3(r_dir, data[i], data[j], mid); } BLI_INLINE void edge_avg(lfVector *data, int i, int j, float r_avg[3]) { r_avg[0] = (data[i][0] + data[j][0]) * 0.5f; r_avg[1] = (data[i][1] + data[j][1]) * 0.5f; r_avg[2] = (data[i][2] + data[j][2]) * 0.5f; } BLI_INLINE void edge_norm(lfVector *data, int i, int j, float r_dir[3]) { sub_v3_v3v3(r_dir, data[i], data[j]); normalize_v3(r_dir); } BLI_INLINE float bend_angle(float dir_a[3], float dir_b[3], float dir_e[3]) { float cos, sin; float tmp[3]; cos = dot_v3v3(dir_a, dir_b); cross_v3_v3v3(tmp, dir_a, dir_b); sin = dot_v3v3(tmp, dir_e); return atan2f(sin, cos); } BLI_INLINE void spring_angle(Implicit_Data *data, int i, int j, int *i_a, int *i_b, int len_a, int len_b, float r_dir_a[3], float r_dir_b[3], float *r_angle, float r_vel_a[3], float r_vel_b[3]) { float dir_e[3], vel_e[3]; poly_norm(data->X, j, i, i_a, len_a, r_dir_a); poly_norm(data->X, i, j, i_b, len_b, r_dir_b); edge_norm(data->X, i, j, dir_e); *r_angle = bend_angle(r_dir_a, r_dir_b, dir_e); poly_avg(data->V, i_a, len_a, r_vel_a); poly_avg(data->V, i_b, len_b, r_vel_b); edge_avg(data->V, i, j, vel_e); sub_v3_v3(r_vel_a, vel_e); sub_v3_v3(r_vel_b, vel_e); } /* Angular springs roughly based on the bending model proposed by Baraff and Witkin in "Large Steps in Cloth Simulation". */ bool BPH_mass_spring_force_spring_angular(Implicit_Data *data, int i, int j, int *i_a, int *i_b, int len_a, int len_b, float restang, float stiffness, float damping) { float angle, dir_a[3], dir_b[3], vel_a[3], vel_b[3]; float f_a[3], f_b[3], f_e[3]; float force; int x; spring_angle(data, i, j, i_a, i_b, len_a, len_b, dir_a, dir_b, &angle, vel_a, vel_b); /* spring force */ force = stiffness * (angle - restang); /* damping force */ force += -damping * (dot_v3v3(vel_a, dir_a) + dot_v3v3(vel_b, dir_b)); mul_v3_v3fl(f_a, dir_a, force / len_a); mul_v3_v3fl(f_b, dir_b, force / len_b); for (x = 0; x < len_a; x++) { add_v3_v3(data->F[i_a[x]], f_a); } for (x = 0; x < len_b; x++) { add_v3_v3(data->F[i_b[x]], f_b); } mul_v3_v3fl(f_a, dir_a, force * 0.5f); mul_v3_v3fl(f_b, dir_b, force * 0.5f); add_v3_v3v3(f_e, f_a, f_b); sub_v3_v3(data->F[i], f_e); sub_v3_v3(data->F[j], f_e); return true; } /* Jacobian of a direction vector. * Basically the part of the differential orthogonal to the direction, * inversely proportional to the length of the edge. * * dD_ij/dx_i = -dD_ij/dx_j = (D_ij * D_ij^T - I) / len_ij */ BLI_INLINE void spring_grad_dir( Implicit_Data *data, int i, int j, float edge[3], float dir[3], float grad_dir[3][3]) { float length; sub_v3_v3v3(edge, data->X[j], data->X[i]); length = normalize_v3_v3(dir, edge); if (length > ALMOST_ZERO) { outerproduct(grad_dir, dir, dir); sub_m3_m3m3(grad_dir, I, grad_dir); mul_m3_fl(grad_dir, 1.0f / length); } else { zero_m3(grad_dir); } } BLI_INLINE void spring_hairbend_forces(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, const float dx[3], const float dv[3], float r_f[3]) { float edge_ij[3], dir_ij[3]; float edge_jk[3], dir_jk[3]; float vel_ij[3], vel_jk[3], vel_ortho[3]; float f_bend[3], f_damp[3]; float fk[3]; float dist[3]; zero_v3(fk); sub_v3_v3v3(edge_ij, data->X[j], data->X[i]); if (q == i) sub_v3_v3(edge_ij, dx); if (q == j) add_v3_v3(edge_ij, dx); normalize_v3_v3(dir_ij, edge_ij); sub_v3_v3v3(edge_jk, data->X[k], data->X[j]); if (q == j) sub_v3_v3(edge_jk, dx); if (q == k) add_v3_v3(edge_jk, dx); normalize_v3_v3(dir_jk, edge_jk); sub_v3_v3v3(vel_ij, data->V[j], data->V[i]); if (q == i) sub_v3_v3(vel_ij, dv); if (q == j) add_v3_v3(vel_ij, dv); sub_v3_v3v3(vel_jk, data->V[k], data->V[j]); if (q == j) sub_v3_v3(vel_jk, dv); if (q == k) add_v3_v3(vel_jk, dv); /* bending force */ sub_v3_v3v3(dist, goal, edge_jk); mul_v3_v3fl(f_bend, dist, stiffness); add_v3_v3(fk, f_bend); /* damping force */ madd_v3_v3v3fl(vel_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk)); mul_v3_v3fl(f_damp, vel_ortho, damping); sub_v3_v3(fk, f_damp); copy_v3_v3(r_f, fk); } /* Finite Differences method for estimating the jacobian of the force */ BLI_INLINE void spring_hairbend_estimate_dfdx(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, float dfdx[3][3]) { const float delta = 0.00001f; // TODO find a good heuristic for this float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3]; float f[3]; int a, b; zero_m3(dvec_null); unit_m3(dvec_pos); mul_m3_fl(dvec_pos, delta * 0.5f); copy_m3_m3(dvec_neg, dvec_pos); negate_m3(dvec_neg); /* XXX TODO offset targets to account for position dependency */ for (a = 0; a < 3; ++a) { spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_pos[a], dvec_null[a], f); copy_v3_v3(dfdx[a], f); spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_neg[a], dvec_null[a], f); sub_v3_v3(dfdx[a], f); for (b = 0; b < 3; ++b) { dfdx[a][b] /= delta; } } } /* Finite Differences method for estimating the jacobian of the force */ BLI_INLINE void spring_hairbend_estimate_dfdv(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, float dfdv[3][3]) { const float delta = 0.00001f; // TODO find a good heuristic for this float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3]; float f[3]; int a, b; zero_m3(dvec_null); unit_m3(dvec_pos); mul_m3_fl(dvec_pos, delta * 0.5f); copy_m3_m3(dvec_neg, dvec_pos); negate_m3(dvec_neg); /* XXX TODO offset targets to account for position dependency */ for (a = 0; a < 3; ++a) { spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_pos[a], f); copy_v3_v3(dfdv[a], f); spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_neg[a], f); sub_v3_v3(dfdv[a], f); for (b = 0; b < 3; ++b) { dfdv[a][b] /= delta; } } } /* Angular spring that pulls the vertex toward the local target * See "Artistic Simulation of Curly Hair" (Pixar technical memo #12-03a) */ bool BPH_mass_spring_force_spring_bending_hair(Implicit_Data *data, int i, int j, int k, const float target[3], float stiffness, float damping) { float goal[3]; float fj[3], fk[3]; float dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3]; float dfj_dvi[3][3], dfj_dvj[3][3], dfk_dvi[3][3], dfk_dvj[3][3], dfk_dvk[3][3]; const float vecnull[3] = {0.0f, 0.0f, 0.0f}; int block_ij = BPH_mass_spring_add_block(data, i, j); int block_jk = BPH_mass_spring_add_block(data, j, k); int block_ik = BPH_mass_spring_add_block(data, i, k); world_to_root_v3(data, j, goal, target); spring_hairbend_forces(data, i, j, k, goal, stiffness, damping, k, vecnull, vecnull, fk); negate_v3_v3(fj, fk); /* counterforce */ spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, i, dfk_dxi); spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, j, dfk_dxj); spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, k, dfk_dxk); copy_m3_m3(dfj_dxi, dfk_dxi); negate_m3(dfj_dxi); copy_m3_m3(dfj_dxj, dfk_dxj); negate_m3(dfj_dxj); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, i, dfk_dvi); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, j, dfk_dvj); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, k, dfk_dvk); copy_m3_m3(dfj_dvi, dfk_dvi); negate_m3(dfj_dvi); copy_m3_m3(dfj_dvj, dfk_dvj); negate_m3(dfj_dvj); /* add forces and jacobians to the solver data */ add_v3_v3(data->F[j], fj); add_v3_v3(data->F[k], fk); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj); add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk); add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi); add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj); add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi); add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfj_dvj); add_m3_m3m3(data->dFdV[k].m, data->dFdV[k].m, dfk_dvk); add_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfj_dvi); add_m3_m3m3(data->dFdV[block_jk].m, data->dFdV[block_jk].m, dfk_dvj); add_m3_m3m3(data->dFdV[block_ik].m, data->dFdV[block_ik].m, dfk_dvi); /* XXX analytical calculation of derivatives below is incorrect. * This proved to be difficult, but for now just using the finite difference method for * estimating the jacobians should be sufficient. */ # if 0 float edge_ij[3], dir_ij[3], grad_dir_ij[3][3]; float edge_jk[3], dir_jk[3], grad_dir_jk[3][3]; float dist[3], vel_jk[3], vel_jk_ortho[3], projvel[3]; float target[3]; float tmp[3][3]; float fi[3], fj[3], fk[3]; float dfi_dxi[3][3], dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3]; float dfdvi[3][3]; // TESTING damping = 0.0f; zero_v3(fi); zero_v3(fj); zero_v3(fk); zero_m3(dfi_dxi); zero_m3(dfj_dxi); zero_m3(dfk_dxi); zero_m3(dfk_dxj); zero_m3(dfk_dxk); /* jacobian of direction vectors */ spring_grad_dir(data, i, j, edge_ij, dir_ij, grad_dir_ij); spring_grad_dir(data, j, k, edge_jk, dir_jk, grad_dir_jk); sub_v3_v3v3(vel_jk, data->V[k], data->V[j]); /* bending force */ mul_v3_v3fl(target, dir_ij, restlen); sub_v3_v3v3(dist, target, edge_jk); mul_v3_v3fl(fk, dist, stiffness); /* damping force */ madd_v3_v3v3fl(vel_jk_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk)); madd_v3_v3fl(fk, vel_jk_ortho, damping); /* XXX this only holds true as long as we assume straight rest shape! * eventually will become a bit more involved since the opposite segment * gets its own target, under condition of having equal torque on both sides. */ copy_v3_v3(fi, fk); /* counterforce on the middle point */ sub_v3_v3(fj, fi); sub_v3_v3(fj, fk); /* === derivatives === */ madd_m3_m3fl(dfk_dxi, grad_dir_ij, stiffness * restlen); madd_m3_m3fl(dfk_dxj, grad_dir_ij, -stiffness * restlen); madd_m3_m3fl(dfk_dxj, I, stiffness); madd_m3_m3fl(dfk_dxk, I, -stiffness); copy_m3_m3(dfi_dxi, dfk_dxk); negate_m3(dfi_dxi); /* dfj_dfi == dfi_dfj due to symmetry, * dfi_dfj == dfk_dfj due to fi == fk * XXX see comment above on future bent rest shapes */ copy_m3_m3(dfj_dxi, dfk_dxj); /* dfj_dxj == -(dfi_dxj + dfk_dxj) due to fj == -(fi + fk) */ sub_m3_m3m3(dfj_dxj, dfj_dxj, dfj_dxi); sub_m3_m3m3(dfj_dxj, dfj_dxj, dfk_dxj); /* add forces and jacobians to the solver data */ add_v3_v3(data->F[i], fi); add_v3_v3(data->F[j], fj); add_v3_v3(data->F[k], fk); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfi_dxi); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj); add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk); add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi); add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj); add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi); # endif return true; } bool BPH_mass_spring_force_spring_goal(Implicit_Data *data, int i, const float goal_x[3], const float goal_v[3], float stiffness, float damping) { float root_goal_x[3], root_goal_v[3], extent[3], length, dir[3], vel[3]; float f[3], dfdx[3][3], dfdv[3][3]; /* goal is in world space */ world_to_root_v3(data, i, root_goal_x, goal_x); world_to_root_v3(data, i, root_goal_v, goal_v); sub_v3_v3v3(extent, root_goal_x, data->X[i]); sub_v3_v3v3(vel, root_goal_v, data->V[i]); length = normalize_v3_v3(dir, extent); if (length > ALMOST_ZERO) { mul_v3_v3fl(f, dir, stiffness * length); // Ascher & Boxman, p.21: Damping only during elonglation // something wrong with it... madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir)); dfdx_spring(dfdx, dir, length, 0.0f, stiffness); dfdv_damp(dfdv, dir, damping); add_v3_v3(data->F[i], f); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv); return true; } else { return false; } } #endif /* IMPLICIT_SOLVER_BLENDER */
IcgMask.c
// Copyright (C) 2016 Gernot Riegler // Institute for Computer Graphics and Vision (ICG) // Graz University of Technology (TU GRAZ) // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // 3. All advertising materials mentioning features or use of this software // must display the following acknowledgement: // This product includes software developed by the ICG, TU GRAZ. // 4. Neither the name of the ICG, TU GRAZ nor the // names of its contributors may be used to endorse or promote products // derived from this software without specific prior written permission. // THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE PROVIDER BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/IcgMask.c" #else static void icgnn_(IcgMask_updateOutputGrid)(lua_State* L, THTensor* in, THTensor* out, real mask_value) { int height_factor = luaT_getfieldcheckint(L, 1, "height_factor"); int width_factor = luaT_getfieldcheckint(L, 1, "width_factor"); long n_dim = in->nDimension; luaL_argcheck(L, n_dim == 3 || n_dim == 4, 2, "3D or 4D(batch mode) tensor expected"); in = THTensor_(newContiguous)(in); long num, channels, height, width; if(n_dim == 3) { num = 1; channels = in->size[0]; height = in->size[1]; width = in->size[2]; THTensor_(resize3d)(out, channels, height, width); } else if(n_dim == 4) { num = in->size[0]; channels = in->size[1]; height = in->size[2]; width = in->size[3]; THTensor_(resize4d)(out, num, channels, height, width); } real* in_data = THTensor_(data)(in); real* out_data = THTensor_(data)(out); long h_offset = (height_factor - 1) / 2; long w_offset = (width_factor - 1) / 2; long n; #pragma omp parallel for private(n) for(n = 0; n < num * channels; ++n) { long h; for(h = 0; h < height; ++h) { long w; for(w = 0; w < width; ++w) { long idx = (n * height + h) * width + w; if(h % height_factor == h_offset && w % width_factor == w_offset) { out_data[idx] = in_data[idx]; } else { out_data[idx] = mask_value; } } } } THTensor_(free)(in); } static void icgnn_(IcgMask_updateOutputBorder)(lua_State* L, THTensor* in, THTensor* out, real mask_value) { int border = luaT_getfieldcheckint(L, 1, "border"); long n_dim = in->nDimension; luaL_argcheck(L, n_dim == 3 || n_dim == 4, 2, "3D or 4D(batch mode) tensor expected"); in = THTensor_(newContiguous)(in); long num, channels, height, width; if(n_dim == 3) { num = 1; channels = in->size[0]; height = in->size[1]; width = in->size[2]; THTensor_(resize3d)(out, channels, height, width); } else if(n_dim == 4) { num = in->size[0]; channels = in->size[1]; height = in->size[2]; width = in->size[3]; THTensor_(resize4d)(out, num, channels, height, width); } real* in_data = THTensor_(data)(in); real* out_data = THTensor_(data)(out); long n; #pragma omp parallel for private(n) for(n = 0; n < num * channels; ++n) { long h; for(h = 0; h < height; ++h) { long w; for(w = 0; w < width; ++w) { long idx = (n * height + h) * width + w; if(h >= border && h < height - border && w >= border && w < width - border) { out_data[idx] = in_data[idx]; } else { out_data[idx] = mask_value; } } } } THTensor_(free)(in); } static int icgnn_(IcgMask_updateOutput)(lua_State* L) { THTensor* in = luaT_checkudata(L, 2, torch_Tensor); THTensor* out = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor); const char* mask_type = luaT_getfieldcheckstring(L, 1, "mask_type"); real mask_value = luaT_getfieldchecknumber(L, 1, "mask_value"); if(strcmp(mask_type, "grid") == 0) { icgnn_(IcgMask_updateOutputGrid)(L, in, out, mask_value); } else if(strcmp(mask_type, "border") == 0) { icgnn_(IcgMask_updateOutputBorder)(L, in, out, mask_value); } else { luaL_error(L, "unknown mask type: %s", mask_type); } return 1; } static void icgnn_(IcgMask_updateGradInputGrid)(lua_State* L, THTensor* in, THTensor* grad_out, THTensor* out, THTensor* grad_in, real mask_value) { int height_factor = luaT_getfieldcheckint(L, 1, "height_factor"); int width_factor = luaT_getfieldcheckint(L, 1, "width_factor"); real* in_data = THTensor_(data)(in); real* out_data = THTensor_(data)(out); real* grad_in_data = THTensor_(data)(grad_in); real* grad_out_data = THTensor_(data)(grad_out); long n_dim = in->nDimension; long num, channels, height, width, out_height, out_width; if(n_dim == 3) { num = 1; channels = in->size[0]; height = in->size[1]; width = in->size[2]; } else if(n_dim == 4) { num = in->size[0]; channels = in->size[1]; height = in->size[2]; width = in->size[3]; } long h_offset = (height_factor - 1) / 2; long w_offset = (width_factor - 1) / 2; long n; #pragma omp parallel for private(n) for(n = 0; n < num * channels; ++n) { long h; for(h = 0; h < height; ++h) { long w; for(w = 0; w < width; ++w) { long idx = (n * height + h) * width + w; if(h % height_factor == h_offset && w % width_factor == w_offset) { grad_in_data[idx] = grad_out_data[idx]; } else { grad_in_data[idx] = 0; } } } } } static void icgnn_(IcgMask_updateGradInputBorder)(lua_State* L, THTensor* in, THTensor* grad_out, THTensor* out, THTensor* grad_in, real mask_value) { int border = luaT_getfieldcheckint(L, 1, "border"); real* in_data = THTensor_(data)(in); real* out_data = THTensor_(data)(out); real* grad_in_data = THTensor_(data)(grad_in); real* grad_out_data = THTensor_(data)(grad_out); long n_dim = in->nDimension; long num, channels, height, width, out_height, out_width; if(n_dim == 3) { num = 1; channels = in->size[0]; height = in->size[1]; width = in->size[2]; } else if(n_dim == 4) { num = in->size[0]; channels = in->size[1]; height = in->size[2]; width = in->size[3]; } long n; #pragma omp parallel for private(n) for(n = 0; n < num * channels; ++n) { long h; for(h = 0; h < height; ++h) { long w; for(w = 0; w < width; ++w) { long idx = (n * height + h) * width + w; if(h >= border && h < height - border && w >= border && w < width - border) { grad_in_data[idx] = grad_out_data[idx]; } else { grad_in_data[idx] = 0; } } } } } static int icgnn_(IcgMask_updateGradInput)(lua_State *L) { THTensor* in = luaT_checkudata(L, 2, torch_Tensor); THTensor* grad_out = luaT_checkudata(L, 3, torch_Tensor); THTensor* out = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor); THTensor* grad_in = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor); THTensor_(resizeAs)(grad_in, in); const char* mask_type = luaT_getfieldcheckstring(L, 1, "mask_type"); real mask_value = luaT_getfieldchecknumber(L, 1, "mask_value"); if(strcmp(mask_type, "grid") == 0) { icgnn_(IcgMask_updateGradInputGrid)(L, in, grad_out, out, grad_in, mask_value); } else if(strcmp(mask_type, "border") == 0) { icgnn_(IcgMask_updateGradInputBorder)(L, in, grad_out, out, grad_in, mask_value); } else { luaL_error(L, "unknown mask type: %s", mask_type); } return 1; } static const struct luaL_Reg icgnn_(IcgMask__) [] = { {"IcgMask_updateOutput", icgnn_(IcgMask_updateOutput)}, {"IcgMask_updateGradInput", icgnn_(IcgMask_updateGradInput)}, {NULL, NULL} }; static void icgnn_(IcgMask_init)(lua_State *L) { luaT_pushmetatable(L, torch_Tensor); luaT_registeratname(L, icgnn_(IcgMask__), "icgnn"); lua_pop(L,1); } #endif
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 24; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
c-tree.h
/* Definitions for C parsing and type checking. Copyright (C) 1987-2019 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_C_TREE_H #define GCC_C_TREE_H #include "c-family/c-common.h" #include "diagnostic.h" /* struct lang_identifier is private to c-decl.c, but langhooks.c needs to know how big it is. This is sanity-checked in c-decl.c. */ #define C_SIZEOF_STRUCT_LANG_IDENTIFIER \ (sizeof (struct c_common_identifier) + 3 * sizeof (void *)) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ #define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */ #define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE) /* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE nonzero if the definition of the type has already started. */ #define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE) /* In an incomplete RECORD_TYPE, UNION_TYPE or ENUMERAL_TYPE, a list of variable declarations whose type would be completed by completing that type. */ #define C_TYPE_INCOMPLETE_VARS(TYPE) \ TYPE_LANG_SLOT_1 (TREE_CHECK4 (TYPE, RECORD_TYPE, UNION_TYPE, \ QUAL_UNION_TYPE, ENUMERAL_TYPE)) /* In an IDENTIFIER_NODE, nonzero if this identifier is actually a keyword. C_RID_CODE (node) is then the RID_* value of the keyword. */ #define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID) /* Record whether a type or decl was written with nonconstant size. Note that TYPE_SIZE may have simplified to a constant. */ #define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE) #define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE) /* Record whether a type is defined inside a struct or union type. This is used for -Wc++-compat. */ #define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE) /* Record whether an "incomplete type" error was given for the type. */ #define C_TYPE_ERROR_REPORTED(TYPE) TYPE_LANG_FLAG_3 (TYPE) /* Record whether a typedef for type `int' was actually `signed int'. */ #define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was defined without an explicit return type. */ #define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */ #define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP) /* For a PARM_DECL, nonzero if it was declared as an array. */ #define C_ARRAY_PARAMETER(NODE) DECL_LANG_FLAG_0 (NODE) /* For FUNCTION_DECLs, evaluates true if the decl is built-in but has been declared. */ #define C_DECL_DECLARED_BUILTIN(EXP) \ DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP)) /* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a built-in prototype and does not have a non-built-in prototype. */ #define C_DECL_BUILTIN_PROTOTYPE(EXP) \ DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP)) /* Record whether a decl was declared register. This is strictly a front-end flag, whereas DECL_REGISTER is used for code generation; they may differ for structures with volatile fields. */ #define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP) /* Record whether a decl was used in an expression anywhere except an unevaluated operand of sizeof / typeof / alignof. This is only used for functions declared static but not defined, though outside sizeof and typeof it is set for other function decls as well. */ #define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP)) /* Record whether a variable has been declared threadprivate by #pragma omp threadprivate. */ #define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL)) /* Set on VAR_DECLs for compound literals. */ #define C_DECL_COMPOUND_LITERAL_P(DECL) \ DECL_LANG_FLAG_5 (VAR_DECL_CHECK (DECL)) /* Nonzero for a decl which either doesn't exist or isn't a prototype. N.B. Could be simplified if all built-in decls had complete prototypes (but this is presently difficult because some of them need FILE*). */ #define C_DECL_ISNT_PROTOTYPE(EXP) \ (EXP == 0 \ || (!prototype_p (TREE_TYPE (EXP)) \ && !fndecl_built_in_p (EXP))) /* For FUNCTION_TYPE, a hidden list of types of arguments. The same as TYPE_ARG_TYPES for functions with prototypes, but created for functions without prototypes. */ #define TYPE_ACTUAL_ARG_TYPES(NODE) \ TYPE_LANG_SLOT_1 (FUNCTION_TYPE_CHECK (NODE)) /* For a CONSTRUCTOR, whether some initializer contains a subexpression meaning it is not a constant expression. */ #define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR)) /* For a SAVE_EXPR, nonzero if the operand of the SAVE_EXPR has already been folded. */ #define SAVE_EXPR_FOLDED_P(EXP) TREE_LANG_FLAG_1 (SAVE_EXPR_CHECK (EXP)) /* Record parser information about an expression that is irrelevant for code generation alongside a tree representing its value. */ struct c_expr { /* The value of the expression. */ tree value; /* Record the original unary/binary operator of an expression, which may have been changed by fold, STRING_CST for unparenthesized string constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls (even if parenthesized), for subexpressions, and for non-constant initializers, or ERROR_MARK for other expressions (including parenthesized expressions). */ enum tree_code original_code; /* If not NULL, the original type of an expression. This will differ from the type of the value field for an enum constant. The type of an enum constant is a plain integer type, but this field will be the enum type. */ tree original_type; /* The source range of this expression. This is redundant for node values that have locations, but not all node kinds have locations (e.g. constants, and references to params, locals, etc), so we stash a copy here. */ source_range src_range; /* Access to the first and last locations within the source spelling of this expression. */ location_t get_start () const { return src_range.m_start; } location_t get_finish () const { return src_range.m_finish; } location_t get_location () const { if (EXPR_HAS_LOCATION (value)) return EXPR_LOCATION (value); else return make_location (get_start (), get_start (), get_finish ()); } /* Set the value to error_mark_node whilst ensuring that src_range is initialized. */ void set_error () { value = error_mark_node; src_range.m_start = UNKNOWN_LOCATION; src_range.m_finish = UNKNOWN_LOCATION; } }; /* Type alias for struct c_expr. This allows to use the structure inside the VEC types. */ typedef struct c_expr c_expr_t; /* A kind of type specifier. Note that this information is currently only used to distinguish tag definitions, tag references and typeof uses. */ enum c_typespec_kind { /* No typespec. This appears only in struct c_declspec. */ ctsk_none, /* A reserved keyword type specifier. */ ctsk_resword, /* A reference to a tag, previously declared, such as "struct foo". This includes where the previous declaration was as a different kind of tag, in which case this is only valid if shadowing that tag in an inner scope. */ ctsk_tagref, /* A reference to a tag, not previously declared in a visible scope. */ ctsk_tagfirstref, /* A definition of a tag such as "struct foo { int a; }". */ ctsk_tagdef, /* A typedef name. */ ctsk_typedef, /* An ObjC-specific kind of type specifier. */ ctsk_objc, /* A typeof specifier, or _Atomic ( type-name ). */ ctsk_typeof }; /* A type specifier: this structure is created in the parser and passed to declspecs_add_type only. */ struct c_typespec { /* What kind of type specifier this is. */ enum c_typespec_kind kind; /* Whether the expression has operands suitable for use in constant expressions. */ bool expr_const_operands; /* The specifier itself. */ tree spec; /* An expression to be evaluated before the type specifier, in the case of typeof specifiers, or NULL otherwise or if no such expression is required for a particular typeof specifier. In particular, when typeof is applied to an expression of variably modified type, that expression must be evaluated in order to determine array sizes that form part of the type, but the expression itself (as opposed to the array sizes) forms no part of the type and so needs to be recorded separately. */ tree expr; }; /* A storage class specifier. */ enum c_storage_class { csc_none, csc_auto, csc_extern, csc_register, csc_static, csc_typedef }; /* A type specifier keyword "void", "_Bool", "char", "int", "float", "double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum", or none of these. */ enum c_typespec_keyword { cts_none, cts_void, cts_bool, cts_char, cts_int, cts_float, cts_int_n, cts_double, cts_dfloat32, cts_dfloat64, cts_dfloat128, cts_floatn_nx, cts_fract, cts_accum, cts_auto_type }; /* This enum lists all the possible declarator specifiers, storage class or attribute that a user can write. There is at least one enumerator per possible declarator specifier in the struct c_declspecs below. It is used to index the array of declspec locations in struct c_declspecs. */ enum c_declspec_word { cdw_typespec /* A catch-all for a typespec. */, cdw_storage_class /* A catch-all for a storage class */, cdw_attributes, cdw_typedef, cdw_explicit_signed, cdw_deprecated, cdw_default_int, cdw_long, cdw_long_long, cdw_short, cdw_signed, cdw_unsigned, cdw_complex, cdw_inline, cdw_noreturn, cdw_thread, cdw_const, cdw_volatile, cdw_restrict, cdw_atomic, cdw_saturating, cdw_alignas, cdw_address_space, cdw_gimple, cdw_rtl, cdw_number_of_elements /* This one must always be the last enumerator. */ }; enum c_declspec_il { cdil_none, cdil_gimple, /* __GIMPLE */ cdil_gimple_cfg, /* __GIMPLE(cfg) */ cdil_gimple_ssa, /* __GIMPLE(ssa) */ cdil_rtl /* __RTL */ }; /* A sequence of declaration specifiers in C. When a new declaration specifier is added, please update the enum c_declspec_word above accordingly. */ struct c_declspecs { location_t locations[cdw_number_of_elements]; /* The type specified, if a single type specifier such as a struct, union or enum specifier, typedef name or typeof specifies the whole type, or NULL_TREE if none or a keyword such as "void" or "char" is used. Does not include qualifiers. */ tree type; /* Any expression to be evaluated before the type, from a typeof specifier. */ tree expr; /* The attributes from a typedef decl. */ tree decl_attr; /* When parsing, the attributes. Outside the parser, this will be NULL; attributes (possibly from multiple lists) will be passed separately. */ tree attrs; /* The pass to start compiling a __GIMPLE or __RTL function with. */ char *gimple_or_rtl_pass; /* The base-2 log of the greatest alignment required by an _Alignas specifier, in bytes, or -1 if no such specifiers with nonzero alignment. */ int align_log; /* For the __intN declspec, this stores the index into the int_n_* arrays. */ int int_n_idx; /* For the _FloatN and _FloatNx declspec, this stores the index into the floatn_nx_types array. */ int floatn_nx_idx; /* The storage class specifier, or csc_none if none. */ enum c_storage_class storage_class; /* Any type specifier keyword used such as "int", not reflecting modifiers such as "short", or cts_none if none. */ ENUM_BITFIELD (c_typespec_keyword) typespec_word : 8; /* The kind of type specifier if one has been seen, ctsk_none otherwise. */ ENUM_BITFIELD (c_typespec_kind) typespec_kind : 3; ENUM_BITFIELD (c_declspec_il) declspec_il : 3; /* Whether any expressions in typeof specifiers may appear in constant expressions. */ BOOL_BITFIELD expr_const_operands : 1; /* Whether any declaration specifiers have been seen at all. */ BOOL_BITFIELD declspecs_seen_p : 1; /* Whether something other than a storage class specifier or attribute has been seen. This is used to warn for the obsolescent usage of storage class specifiers other than at the start of the list. (Doing this properly would require function specifiers to be handled separately from storage class specifiers.) */ BOOL_BITFIELD non_sc_seen_p : 1; /* Whether the type is specified by a typedef or typeof name. */ BOOL_BITFIELD typedef_p : 1; /* Whether the type is explicitly "signed" or specified by a typedef whose type is explicitly "signed". */ BOOL_BITFIELD explicit_signed_p : 1; /* Whether the specifiers include a deprecated typedef. */ BOOL_BITFIELD deprecated_p : 1; /* Whether the type defaulted to "int" because there were no type specifiers. */ BOOL_BITFIELD default_int_p : 1; /* Whether "long" was specified. */ BOOL_BITFIELD long_p : 1; /* Whether "long" was specified more than once. */ BOOL_BITFIELD long_long_p : 1; /* Whether "short" was specified. */ BOOL_BITFIELD short_p : 1; /* Whether "signed" was specified. */ BOOL_BITFIELD signed_p : 1; /* Whether "unsigned" was specified. */ BOOL_BITFIELD unsigned_p : 1; /* Whether "complex" was specified. */ BOOL_BITFIELD complex_p : 1; /* Whether "inline" was specified. */ BOOL_BITFIELD inline_p : 1; /* Whether "_Noreturn" was speciied. */ BOOL_BITFIELD noreturn_p : 1; /* Whether "__thread" or "_Thread_local" was specified. */ BOOL_BITFIELD thread_p : 1; /* Whether "__thread" rather than "_Thread_local" was specified. */ BOOL_BITFIELD thread_gnu_p : 1; /* Whether "const" was specified. */ BOOL_BITFIELD const_p : 1; /* Whether "volatile" was specified. */ BOOL_BITFIELD volatile_p : 1; /* Whether "restrict" was specified. */ BOOL_BITFIELD restrict_p : 1; /* Whether "_Atomic" was specified. */ BOOL_BITFIELD atomic_p : 1; /* Whether "_Sat" was specified. */ BOOL_BITFIELD saturating_p : 1; /* Whether any alignment specifier (even with zero alignment) was specified. */ BOOL_BITFIELD alignas_p : 1; /* The address space that the declaration belongs to. */ addr_space_t address_space; }; /* The various kinds of declarators in C. */ enum c_declarator_kind { /* An identifier. */ cdk_id, /* A function. */ cdk_function, /* An array. */ cdk_array, /* A pointer. */ cdk_pointer, /* Parenthesized declarator with nested attributes. */ cdk_attrs }; struct c_arg_tag { /* The argument name. */ tree id; /* The type of the argument. */ tree type; }; /* Information about the parameters in a function declarator. */ struct c_arg_info { /* A list of parameter decls. */ tree parms; /* A list of structure, union and enum tags defined. */ vec<c_arg_tag, va_gc> *tags; /* A list of argument types to go in the FUNCTION_TYPE. */ tree types; /* A list of non-parameter decls (notably enumeration constants) defined with the parameters. */ tree others; /* A compound expression of VLA sizes from the parameters, or NULL. In a function definition, these are used to ensure that side-effects in sizes of arrays converted to pointers (such as a parameter int i[n++]) take place; otherwise, they are ignored. */ tree pending_sizes; /* True when these arguments had [*]. */ BOOL_BITFIELD had_vla_unspec : 1; }; /* A declarator. */ struct c_declarator { /* The kind of declarator. */ enum c_declarator_kind kind; location_t id_loc; /* Currently only set for cdk_id, cdk_array. */ /* Except for cdk_id, the contained declarator. For cdk_id, NULL. */ struct c_declarator *declarator; union { /* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract declarator. */ tree id; /* For functions. */ struct c_arg_info *arg_info; /* For arrays. */ struct { /* The array dimension, or NULL for [] and [*]. */ tree dimen; /* The qualifiers inside []. */ int quals; /* The attributes (currently ignored) inside []. */ tree attrs; /* Whether [static] was used. */ BOOL_BITFIELD static_p : 1; /* Whether [*] was used. */ BOOL_BITFIELD vla_unspec_p : 1; } array; /* For pointers, the qualifiers on the pointer type. */ int pointer_quals; /* For attributes. */ tree attrs; } u; }; /* A type name. */ struct c_type_name { /* The declaration specifiers. */ struct c_declspecs *specs; /* The declarator. */ struct c_declarator *declarator; }; /* A parameter. */ struct c_parm { /* The declaration specifiers, minus any prefix attributes. */ struct c_declspecs *specs; /* The attributes. */ tree attrs; /* The declarator. */ struct c_declarator *declarator; /* The location of the parameter. */ location_t loc; }; /* Used when parsing an enum. Initialized by start_enum. */ struct c_enum_contents { /* While defining an enum type, this is 1 plus the last enumerator constant value. */ tree enum_next_value; /* Nonzero means that there was overflow computing enum_next_value. */ int enum_overflow; }; /* A type of reference to a static identifier in an inline function. */ enum c_inline_static_type { /* Identifier with internal linkage used in function that may be an inline definition (i.e., file-scope static). */ csi_internal, /* Modifiable object with static storage duration defined in function that may be an inline definition (i.e., local static). */ csi_modifiable }; /* in c-parser.c */ extern void c_parse_init (void); extern bool c_keyword_starts_typename (enum rid keyword); /* in c-aux-info.c */ extern void gen_aux_info_record (tree, int, int, int); /* in c-decl.c */ struct c_spot_bindings; struct c_struct_parse_info; extern struct obstack parser_obstack; extern tree c_break_label; extern tree c_cont_label; extern bool global_bindings_p (void); extern tree pushdecl (tree); extern void push_scope (void); extern tree pop_scope (void); extern void c_bindings_start_stmt_expr (struct c_spot_bindings *); extern void c_bindings_end_stmt_expr (struct c_spot_bindings *); extern void record_inline_static (location_t, tree, tree, enum c_inline_static_type); extern void c_init_decl_processing (void); extern void c_print_identifier (FILE *, tree, int); extern int quals_from_declspecs (const struct c_declspecs *); extern struct c_declarator *build_array_declarator (location_t, tree, struct c_declspecs *, bool, bool); extern tree build_enumerator (location_t, location_t, struct c_enum_contents *, tree, tree); extern tree check_for_loop_decls (location_t, bool); extern void mark_forward_parm_decls (void); extern void declare_parm_level (void); extern void undeclared_variable (location_t, tree); extern tree lookup_label_for_goto (location_t, tree); extern tree declare_label (tree); extern tree define_label (location_t, tree); extern struct c_spot_bindings *c_get_switch_bindings (void); extern void c_release_switch_bindings (struct c_spot_bindings *); extern bool c_check_switch_jump_warnings (struct c_spot_bindings *, location_t, location_t); extern void finish_decl (tree, location_t, tree, tree, tree); extern tree finish_enum (tree, tree, tree); extern void finish_function (location_t = input_location); extern tree finish_struct (location_t, tree, tree, tree, struct c_struct_parse_info *); extern struct c_arg_info *build_arg_info (void); extern struct c_arg_info *get_parm_info (bool, tree); extern tree grokfield (location_t, struct c_declarator *, struct c_declspecs *, tree, tree *); extern tree groktypename (struct c_type_name *, tree *, bool *); extern tree grokparm (const struct c_parm *, tree *); extern tree implicitly_declare (location_t, tree); extern void keep_next_level (void); extern void pending_xref_error (void); extern void c_push_function_context (void); extern void c_pop_function_context (void); extern void push_parm_decl (const struct c_parm *, tree *); extern struct c_declarator *set_array_declarator_inner (struct c_declarator *, struct c_declarator *); extern tree c_builtin_function (tree); extern tree c_builtin_function_ext_scope (tree); extern void shadow_tag (const struct c_declspecs *); extern void shadow_tag_warned (const struct c_declspecs *, int); extern tree start_enum (location_t, struct c_enum_contents *, tree); extern bool start_function (struct c_declspecs *, struct c_declarator *, tree); extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool, tree); extern tree start_struct (location_t, enum tree_code, tree, struct c_struct_parse_info **); extern void store_parm_decls (void); extern void store_parm_decls_from (struct c_arg_info *); extern void temp_store_parm_decls (tree, tree); extern void temp_pop_parm_decls (void); extern tree xref_tag (enum tree_code, tree); extern struct c_typespec parser_xref_tag (location_t, enum tree_code, tree); extern struct c_parm *build_c_parm (struct c_declspecs *, tree, struct c_declarator *, location_t); extern struct c_declarator *build_attrs_declarator (tree, struct c_declarator *); extern struct c_declarator *build_function_declarator (struct c_arg_info *, struct c_declarator *); extern struct c_declarator *build_id_declarator (tree); extern struct c_declarator *make_pointer_declarator (struct c_declspecs *, struct c_declarator *); extern struct c_declspecs *build_null_declspecs (void); extern struct c_declspecs *declspecs_add_qual (location_t, struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_type (location_t, struct c_declspecs *, struct c_typespec); extern struct c_declspecs *declspecs_add_scspec (location_t, struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_attrs (location_t, struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_addrspace (location_t, struct c_declspecs *, addr_space_t); extern struct c_declspecs *declspecs_add_alignas (location_t, struct c_declspecs *, tree); extern struct c_declspecs *finish_declspecs (struct c_declspecs *); /* in c-objc-common.c */ extern bool c_objc_common_init (void); extern bool c_missing_noreturn_ok_p (tree); extern bool c_warn_unused_global_decl (const_tree); extern void c_initialize_diagnostics (diagnostic_context *); extern bool c_vla_unspec_p (tree x, tree fn); extern alias_set_type c_get_alias_set (tree); /* in c-typeck.c */ extern int in_alignof; extern int in_sizeof; extern int in_typeof; extern tree c_last_sizeof_arg; extern location_t c_last_sizeof_loc; extern struct c_switch *c_switch_stack; extern tree c_objc_common_truthvalue_conversion (location_t, tree); extern tree require_complete_type (location_t, tree); extern bool same_translation_unit_p (const_tree, const_tree); extern int comptypes (tree, tree); extern int comptypes_check_different_types (tree, tree, bool *); extern bool c_vla_type_p (const_tree); extern bool c_mark_addressable (tree, bool = false); extern void c_incomplete_type_error (location_t, const_tree, const_tree); extern tree c_type_promotes_to (tree); extern struct c_expr default_function_array_conversion (location_t, struct c_expr); extern struct c_expr default_function_array_read_conversion (location_t, struct c_expr); extern struct c_expr convert_lvalue_to_rvalue (location_t, struct c_expr, bool, bool); extern tree decl_constant_value_1 (tree, bool); extern void mark_exp_read (tree); extern tree composite_type (tree, tree); extern tree build_component_ref (location_t, tree, tree, location_t); extern tree build_array_ref (location_t, tree, tree); extern tree build_external_ref (location_t, tree, bool, tree *); extern void pop_maybe_used (bool); extern struct c_expr c_expr_sizeof_expr (location_t, struct c_expr); extern struct c_expr c_expr_sizeof_type (location_t, struct c_type_name *); extern struct c_expr parser_build_unary_op (location_t, enum tree_code, struct c_expr); extern struct c_expr parser_build_binary_op (location_t, enum tree_code, struct c_expr, struct c_expr); extern tree build_conditional_expr (location_t, tree, bool, tree, tree, location_t, tree, tree, location_t); extern tree build_compound_expr (location_t, tree, tree); extern tree c_cast_expr (location_t, struct c_type_name *, tree); extern tree build_c_cast (location_t, tree, tree); extern void store_init_value (location_t, tree, tree, tree); extern void maybe_warn_string_init (location_t, tree, struct c_expr); extern void start_init (tree, tree, int, rich_location *); extern void finish_init (void); extern void really_start_incremental_init (tree); extern void finish_implicit_inits (location_t, struct obstack *); extern void push_init_level (location_t, int, struct obstack *); extern struct c_expr pop_init_level (location_t, int, struct obstack *, location_t); extern void set_init_index (location_t, tree, tree, struct obstack *); extern void set_init_label (location_t, tree, location_t, struct obstack *); extern void process_init_element (location_t, struct c_expr, bool, struct obstack *); extern tree build_compound_literal (location_t, tree, tree, bool, unsigned int); extern void check_compound_literal_type (location_t, struct c_type_name *); extern tree c_start_case (location_t, location_t, tree, bool); extern void c_finish_case (tree, tree); extern tree build_asm_expr (location_t, tree, tree, tree, tree, tree, bool, bool); extern tree build_asm_stmt (bool, tree); extern int c_types_compatible_p (tree, tree); extern tree c_begin_compound_stmt (bool); extern tree c_end_compound_stmt (location_t, tree, bool); extern void c_finish_if_stmt (location_t, tree, tree, tree); extern void c_finish_loop (location_t, location_t, tree, location_t, tree, tree, tree, tree, bool); extern tree c_begin_stmt_expr (void); extern tree c_finish_stmt_expr (location_t, tree); extern tree c_process_expr_stmt (location_t, tree); extern tree c_finish_expr_stmt (location_t, tree); extern tree c_finish_return (location_t, tree, tree); extern tree c_finish_bc_stmt (location_t, tree *, bool); extern tree c_finish_goto_label (location_t, tree); extern tree c_finish_goto_ptr (location_t, tree); extern tree c_expr_to_decl (tree, bool *, bool *); extern tree c_finish_omp_construct (location_t, enum tree_code, tree, tree); extern tree c_finish_oacc_data (location_t, tree, tree); extern tree c_finish_oacc_host_data (location_t, tree, tree); extern tree c_begin_omp_parallel (void); extern tree c_finish_omp_parallel (location_t, tree, tree); extern tree c_begin_omp_task (void); extern tree c_finish_omp_task (location_t, tree, tree); extern void c_finish_omp_cancel (location_t, tree); extern void c_finish_omp_cancellation_point (location_t, tree); extern tree c_finish_omp_clauses (tree, enum c_omp_region_type); extern tree c_build_va_arg (location_t, tree, location_t, tree); extern tree c_finish_transaction (location_t, tree, int); extern bool c_tree_equal (tree, tree); extern tree c_build_function_call_vec (location_t, vec<location_t>, tree, vec<tree, va_gc> *, vec<tree, va_gc> *); extern tree c_omp_clause_copy_ctor (tree, tree, tree); /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ extern int current_function_returns_value; /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ extern int current_function_returns_null; /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ extern int current_function_returns_abnormally; /* In c-decl.c */ /* Tell the binding oracle what kind of binding we are looking for. */ enum c_oracle_request { C_ORACLE_SYMBOL, C_ORACLE_TAG, C_ORACLE_LABEL }; /* If this is non-NULL, then it is a "binding oracle" which can lazily create bindings when needed by the C compiler. The oracle is told the name and type of the binding to create. It can call pushdecl or the like to ensure the binding is visible; or do nothing, leaving the binding untouched. c-decl.c takes note of when the oracle has been called and will not call it again if it fails to create a given binding. */ typedef void c_binding_oracle_function (enum c_oracle_request, tree identifier); extern c_binding_oracle_function *c_binding_oracle; extern void c_finish_incomplete_decl (tree); extern tree c_omp_reduction_id (enum tree_code, tree); extern tree c_omp_reduction_decl (tree); extern tree c_omp_reduction_lookup (tree, tree); extern tree c_check_omp_declare_reduction_r (tree *, int *, void *); extern void c_pushtag (location_t, tree, tree); extern void c_bind (location_t, tree, bool); extern bool tag_exists_p (enum tree_code, tree); /* In c-errors.c */ extern bool pedwarn_c90 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); extern bool pedwarn_c99 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); extern bool pedwarn_c11 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); extern void set_c_expr_source_range (c_expr *expr, location_t start, location_t finish); extern void set_c_expr_source_range (c_expr *expr, source_range src_range); /* In c-fold.c */ extern vec<tree> incomplete_record_decls; #if CHECKING_P namespace selftest { extern void run_c_tests (void); } // namespace selftest #endif /* #if CHECKING_P */ #endif /* ! GCC_C_TREE_H */
target.c
#pragma omp target [clauses] structured-block
nanort.h
// // NanoRT, single header only modern ray tracing kernel. // /* The MIT License (MIT) Copyright (c) 2015 - 2016 Light Transport Entertainment, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef NANORT_H_ #define NANORT_H_ #include <algorithm> #include <cassert> #include <cmath> #include <cstdio> #include <cstdlib> #include <cstring> #include <functional> #include <limits> #include <memory> #include <queue> #include <string> #include <vector> namespace nanort { #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif // Parallelized BVH build is not yet fully tested, // thus turn off if you face a problem when building BVH. #define NANORT_ENABLE_PARALLEL_BUILD (1) // ---------------------------------------------------------------------------- // Small vector class useful for multi-threaded environment. // // stack_container.h // // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This allocator can be used with STL containers to provide a stack buffer // from which to allocate memory and overflows onto the heap. This stack buffer // would be allocated on the stack and allows us to avoid heap operations in // some situations. // // STL likes to make copies of allocators, so the allocator itself can't hold // the data. Instead, we make the creator responsible for creating a // StackAllocator::Source which contains the data. Copying the allocator // merely copies the pointer to this shared source, so all allocators created // based on our allocator will share the same stack buffer. // // This stack buffer implementation is very simple. The first allocation that // fits in the stack buffer will use the stack buffer. Any subsequent // allocations will not use the stack buffer, even if there is unused room. // This makes it appropriate for array-like containers, but the caller should // be sure to reserve() in the container up to the stack buffer size. Otherwise // the container will allocate a small array which will "use up" the stack // buffer. template <typename T, size_t stack_capacity> class StackAllocator : public std::allocator<T> { public: typedef typename std::allocator<T>::pointer pointer; typedef typename std::allocator<T>::size_type size_type; // Backing store for the allocator. The container owner is responsible for // maintaining this for as long as any containers using this allocator are // live. struct Source { Source() : used_stack_buffer_(false) {} // Casts the buffer in its right type. T *stack_buffer() { return reinterpret_cast<T *>(stack_buffer_); } const T *stack_buffer() const { return reinterpret_cast<const T *>(stack_buffer_); } // // IMPORTANT: Take care to ensure that stack_buffer_ is aligned // since it is used to mimic an array of T. // Be careful while declaring any unaligned types (like bool) // before stack_buffer_. // // The buffer itself. It is not of type T because we don't want the // constructors and destructors to be automatically called. Define a POD // buffer of the right size instead. char stack_buffer_[sizeof(T[stack_capacity])]; // Set when the stack buffer is used for an allocation. We do not track // how much of the buffer is used, only that somebody is using it. bool used_stack_buffer_; }; // Used by containers when they want to refer to an allocator of type U. template <typename U> struct rebind { typedef StackAllocator<U, stack_capacity> other; }; // For the straight up copy c-tor, we can share storage. StackAllocator(const StackAllocator<T, stack_capacity> &rhs) : source_(rhs.source_) {} // ISO C++ requires the following constructor to be defined, // and std::vector in VC++2008SP1 Release fails with an error // in the class _Container_base_aux_alloc_real (from <xutility>) // if the constructor does not exist. // For this constructor, we cannot share storage; there's // no guarantee that the Source buffer of Ts is large enough // for Us. // TODO(Google): If we were fancy pants, perhaps we could share storage // iff sizeof(T) == sizeof(U). template <typename U, size_t other_capacity> StackAllocator(const StackAllocator<U, other_capacity> &other) : source_(NULL) { (void)other; } explicit StackAllocator(Source *source) : source_(source) {} // Actually do the allocation. Use the stack buffer if nobody has used it yet // and the size requested fits. Otherwise, fall through to the standard // allocator. pointer allocate(size_type n, void *hint = 0) { if (source_ != NULL && !source_->used_stack_buffer_ && n <= stack_capacity) { source_->used_stack_buffer_ = true; return source_->stack_buffer(); } else { return std::allocator<T>::allocate(n, hint); } } // Free: when trying to free the stack buffer, just mark it as free. For // non-stack-buffer pointers, just fall though to the standard allocator. void deallocate(pointer p, size_type n) { if (source_ != NULL && p == source_->stack_buffer()) source_->used_stack_buffer_ = false; else std::allocator<T>::deallocate(p, n); } private: Source *source_; }; // A wrapper around STL containers that maintains a stack-sized buffer that the // initial capacity of the vector is based on. Growing the container beyond the // stack capacity will transparently overflow onto the heap. The container must // support reserve(). // // WATCH OUT: the ContainerType MUST use the proper StackAllocator for this // type. This object is really intended to be used only internally. You'll want // to use the wrappers below for different types. template <typename TContainerType, int stack_capacity> class StackContainer { public: typedef TContainerType ContainerType; typedef typename ContainerType::value_type ContainedType; typedef StackAllocator<ContainedType, stack_capacity> Allocator; // Allocator must be constructed before the container! StackContainer() : allocator_(&stack_data_), container_(allocator_) { // Make the container use the stack allocation by reserving our buffer size // before doing anything else. container_.reserve(stack_capacity); } // Getters for the actual container. // // Danger: any copies of this made using the copy constructor must have // shorter lifetimes than the source. The copy will share the same allocator // and therefore the same stack buffer as the original. Use std::copy to // copy into a "real" container for longer-lived objects. ContainerType &container() { return container_; } const ContainerType &container() const { return container_; } // Support operator-> to get to the container. This allows nicer syntax like: // StackContainer<...> foo; // std::sort(foo->begin(), foo->end()); ContainerType *operator->() { return &container_; } const ContainerType *operator->() const { return &container_; } #ifdef UNIT_TEST // Retrieves the stack source so that that unit tests can verify that the // buffer is being used properly. const typename Allocator::Source &stack_data() const { return stack_data_; } #endif protected: typename Allocator::Source stack_data_; unsigned char pad_[7]; Allocator allocator_; ContainerType container_; // DISALLOW_EVIL_CONSTRUCTORS(StackContainer); StackContainer(const StackContainer &); void operator=(const StackContainer &); }; // StackVector // // Example: // StackVector<int, 16> foo; // foo->push_back(22); // we have overloaded operator-> // foo[0] = 10; // as well as operator[] template <typename T, size_t stack_capacity> class StackVector : public StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >, stack_capacity> { public: StackVector() : StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >, stack_capacity>() {} // We need to put this in STL containers sometimes, which requires a copy // constructor. We can't call the regular copy constructor because that will // take the stack buffer from the original. Here, we create an empty object // and make a stack buffer of its own. StackVector(const StackVector<T, stack_capacity> &other) : StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >, stack_capacity>() { this->container().assign(other->begin(), other->end()); } StackVector<T, stack_capacity> &operator=( const StackVector<T, stack_capacity> &other) { this->container().assign(other->begin(), other->end()); return *this; } // Vectors are commonly indexed, which isn't very convenient even with // operator-> (using "->at()" does exception stuff we don't want). T &operator[](size_t i) { return this->container().operator[](i); } const T &operator[](size_t i) const { return this->container().operator[](i); } }; // ---------------------------------------------------------------------------- template <typename T = float> class real3 { public: real3() {} real3(T x) { v[0] = x; v[1] = x; v[2] = x; } real3(T xx, T yy, T zz) { v[0] = xx; v[1] = yy; v[2] = zz; } explicit real3(const T *p) { v[0] = p[0]; v[1] = p[1]; v[2] = p[2]; } inline T x() const { return v[0]; } inline T y() const { return v[1]; } inline T z() const { return v[2]; } real3 operator*(T f) const { return real3(x() * f, y() * f, z() * f); } real3 operator-(const real3 &f2) const { return real3(x() - f2.x(), y() - f2.y(), z() - f2.z()); } real3 operator*(const real3 &f2) const { return real3(x() * f2.x(), y() * f2.y(), z() * f2.z()); } real3 operator+(const real3 &f2) const { return real3(x() + f2.x(), y() + f2.y(), z() + f2.z()); } real3 &operator+=(const real3 &f2) { v[0] += f2.x(); v[1] += f2.y(); v[2] += f2.z(); return (*this); } real3 operator/(const real3 &f2) const { return real3(x() / f2.x(), y() / f2.y(), z() / f2.z()); } real3 operator-() const { return real3(-x(), -y(), -z()); } T operator[](int i) const { return v[i]; } T &operator[](int i) { return v[i]; } T v[3]; // T pad; // for alignment(when T = float) }; template <typename T> inline real3<T> operator*(T f, const real3<T> &v) { return real3<T>(v.x() * f, v.y() * f, v.z() * f); } template <typename T> inline real3<T> vneg(const real3<T> &rhs) { return real3<T>(-rhs.x(), -rhs.y(), -rhs.z()); } template <typename T> inline T vlength(const real3<T> &rhs) { return std::sqrt(rhs.x() * rhs.x() + rhs.y() * rhs.y() + rhs.z() * rhs.z()); } template <typename T> inline real3<T> vnormalize(const real3<T> &rhs) { real3<T> v = rhs; T len = vlength(rhs); if (std::fabs(len) > std::numeric_limits<T>::epsilon()) { T inv_len = static_cast<T>(1.0) / len; v.v[0] *= inv_len; v.v[1] *= inv_len; v.v[2] *= inv_len; } return v; } template <typename T> inline real3<T> vcross(const real3<T> a, const real3<T> b) { real3<T> c; c[0] = a[1] * b[2] - a[2] * b[1]; c[1] = a[2] * b[0] - a[0] * b[2]; c[2] = a[0] * b[1] - a[1] * b[0]; return c; } template <typename T> inline T vdot(const real3<T> a, const real3<T> b) { return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]; } template <typename T> inline real3<T> vsafe_inverse(const real3<T> v) { real3<T> r; // TODO(LTE): Handle signed zero using std::signbit() or std::copysign() when C++11 compiler is available. if (std::fabs(v[0]) < std::numeric_limits<T>::epsilon()) { r[0] = std::numeric_limits<T>::infinity(); } else { r[0] = static_cast<T>(1.0) / v[0]; } if (std::fabs(v[1]) < std::numeric_limits<T>::epsilon()) { r[1] = std::numeric_limits<T>::infinity(); } else { r[1] = static_cast<T>(1.0) / v[1]; } if (std::fabs(v[2]) < std::numeric_limits<T>::epsilon()) { r[2] = std::numeric_limits<T>::infinity(); } else { r[2] = static_cast<T>(1.0) / v[2]; } return r; } template <typename real> inline const real *get_vertex_addr(const real *p, const size_t idx, const size_t stride_bytes) { return reinterpret_cast<const real *>( reinterpret_cast<const unsigned char *>(p) + idx * stride_bytes); } template <typename T = float> class Ray { public: Ray() : min_t(static_cast<T>(0.0)), max_t(std::numeric_limits<T>::max()) { org[0] = static_cast<T>(0.0); org[1] = static_cast<T>(0.0); org[2] = static_cast<T>(0.0); dir[0] = static_cast<T>(0.0); dir[1] = static_cast<T>(0.0); dir[2] = static_cast<T>(-1.0); } T org[3]; // must set T dir[3]; // must set T min_t; // minimum ray hit distance. T max_t; // maximum ray hit distance. T inv_dir[3]; // filled internally int dir_sign[3]; // filled internally }; template <typename T = float> class BVHNode { public: BVHNode() {} BVHNode(const BVHNode &rhs) { bmin[0] = rhs.bmin[0]; bmin[1] = rhs.bmin[1]; bmin[2] = rhs.bmin[2]; flag = rhs.flag; bmax[0] = rhs.bmax[0]; bmax[1] = rhs.bmax[1]; bmax[2] = rhs.bmax[2]; axis = rhs.axis; data[0] = rhs.data[0]; data[1] = rhs.data[1]; } BVHNode &operator=(const BVHNode &rhs) { bmin[0] = rhs.bmin[0]; bmin[1] = rhs.bmin[1]; bmin[2] = rhs.bmin[2]; flag = rhs.flag; bmax[0] = rhs.bmax[0]; bmax[1] = rhs.bmax[1]; bmax[2] = rhs.bmax[2]; axis = rhs.axis; data[0] = rhs.data[0]; data[1] = rhs.data[1]; return (*this); } ~BVHNode() {} T bmin[3]; T bmax[3]; int flag; // 1 = leaf node, 0 = branch node int axis; // leaf // data[0] = npoints // data[1] = index // // branch // data[0] = child[0] // data[1] = child[1] unsigned int data[2]; }; template <class H> class IntersectComparator { public: bool operator()(const H &a, const H &b) const { return a.t < b.t; } }; /// BVH build option. template <typename T = float> struct BVHBuildOptions { T cost_t_aabb; unsigned int min_leaf_primitives; unsigned int max_tree_depth; unsigned int bin_size; unsigned int shallow_depth; unsigned int min_primitives_for_parallel_build; // Cache bounding box computation. // Requires more memory, but BVHbuild can be faster. bool cache_bbox; unsigned char pad[3]; // Set default value: Taabb = 0.2 BVHBuildOptions() : cost_t_aabb(static_cast<T>(0.2)), min_leaf_primitives(4), max_tree_depth(256), bin_size(64), shallow_depth(3), min_primitives_for_parallel_build(1024 * 128), cache_bbox(false) {} }; /// BVH build statistics. class BVHBuildStatistics { public: unsigned int max_tree_depth; unsigned int num_leaf_nodes; unsigned int num_branch_nodes; float build_secs; // Set default value: Taabb = 0.2 BVHBuildStatistics() : max_tree_depth(0), num_leaf_nodes(0), num_branch_nodes(0), build_secs(0.0f) {} }; /// BVH trace option. class BVHTraceOptions { public: // Hit only for face IDs in indexRange. // This feature is good to mimic something like glDrawArrays() unsigned int prim_ids_range[2]; bool cull_back_face; unsigned char pad[3]; ///< Padding(not used) BVHTraceOptions() { prim_ids_range[0] = 0; prim_ids_range[1] = 0x7FFFFFFF; // Up to 2G face IDs. cull_back_face = false; } }; template <typename T> class BBox { public: real3<T> bmin; real3<T> bmax; BBox() { bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<T>::max(); bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<T>::max(); } }; template <typename T> class NodeHit { public: NodeHit() : t_min(std::numeric_limits<T>::max()), t_max(-std::numeric_limits<T>::max()), node_id(static_cast<unsigned int>(-1)) {} NodeHit(const NodeHit<T> &rhs) { t_min = rhs.t_min; t_max = rhs.t_max; node_id = rhs.node_id; } NodeHit &operator=(const NodeHit<T> &rhs) { t_min = rhs.t_min; t_max = rhs.t_max; node_id = rhs.node_id; return (*this); } ~NodeHit() {} T t_min; T t_max; unsigned int node_id; }; template <typename T> class NodeHitComparator { public: inline bool operator()(const NodeHit<T> &a, const NodeHit<T> &b) { return a.t_min < b.t_min; } }; template <typename T> class BVHAccel { public: BVHAccel() : pad0_(0) { (void)pad0_; } ~BVHAccel() {} /// /// Build BVH for input primitives. /// template <class P, class Pred> bool Build(const unsigned int num_primitives, const P &p, const Pred &pred, const BVHBuildOptions<T> &options = BVHBuildOptions<T>()); /// /// Get statistics of built BVH tree. Valid after Build() /// BVHBuildStatistics GetStatistics() const { return stats_; } /// /// Dump built BVH to the file. /// bool Dump(const char *filename); /// /// Load BVH binary /// bool Load(const char *filename); void Debug(); /// /// Traverse into BVH along ray and find closest hit point & primitive if /// found /// template <class I, class H> bool Traverse(const Ray<T> &ray, const I &intersector, H *isect, const BVHTraceOptions &options = BVHTraceOptions()) const; #if 0 /// Multi-hit ray traversal /// Returns `max_intersections` frontmost intersections template<class I, class H, class Comp> bool MultiHitTraverse(const Ray<T> &ray, int max_intersections, const I &intersector, StackVector<H, 128> *isects, const BVHTraceOptions &options = BVHTraceOptions()) const; #endif /// /// List up nodes which intersects along the ray. /// This function is useful for two-level BVH traversal. /// template <class I> bool ListNodeIntersections(const Ray<T> &ray, int max_intersections, const I &intersector, StackVector<NodeHit<T>, 128> *hits) const; const std::vector<BVHNode<T> > &GetNodes() const { return nodes_; } const std::vector<unsigned int> &GetIndices() const { return indices_; } /// /// Returns bounding box of built BVH. /// void BoundingBox(T bmin[3], T bmax[3]) const { if (nodes_.empty()) { bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<T>::max(); bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<T>::max(); } else { bmin[0] = nodes_[0].bmin[0]; bmin[1] = nodes_[0].bmin[1]; bmin[2] = nodes_[0].bmin[2]; bmax[0] = nodes_[0].bmax[0]; bmax[1] = nodes_[0].bmax[1]; bmax[2] = nodes_[0].bmax[2]; } } bool IsValid() const { return nodes_.size() > 0; } private: #if NANORT_ENABLE_PARALLEL_BUILD typedef struct { unsigned int left_idx; unsigned int right_idx; unsigned int offset; } ShallowNodeInfo; // Used only during BVH construction std::vector<ShallowNodeInfo> shallow_node_infos_; /// Builds shallow BVH tree recursively. template <class P, class Pred> unsigned int BuildShallowTree(std::vector<BVHNode<T> > *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, unsigned int max_shallow_depth, const P &p, const Pred &pred); #endif /// Builds BVH tree recursively. template <class P, class Pred> unsigned int BuildTree(BVHBuildStatistics *out_stat, std::vector<BVHNode<T> > *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, const P &p, const Pred &pred); template <class I> bool TestLeafNode(const BVHNode<T> &node, const Ray<T> &ray, const I &intersector) const; template <class I> bool TestLeafNodeIntersections( const BVHNode<T> &node, const Ray<T> &ray, const int max_intersections, const I &intersector, std::priority_queue<NodeHit<T>, std::vector<NodeHit<T> >, NodeHitComparator<T> > *isect_pq) const; #if 0 template<class I, class H, class Comp> bool MultiHitTestLeafNode(std::priority_queue<H, std::vector<H>, Comp> *isect_pq, int max_intersections, const BVHNode<T> &node, const Ray<T> &ray, const I &intersector) const; #endif std::vector<BVHNode<T> > nodes_; std::vector<unsigned int> indices_; // max 4G triangles. std::vector<BBox<T> > bboxes_; BVHBuildOptions<T> options_; BVHBuildStatistics stats_; unsigned int pad0_; }; // Predefined SAH predicator for triangle. template <typename T = float> class TriangleSAHPred { public: TriangleSAHPred( const T *vertices, const unsigned int *faces, size_t vertex_stride_bytes) // e.g. 12 for sizeof(float) * XYZ : axis_(0), pos_(static_cast<T>(0.0)), vertices_(vertices), faces_(faces), vertex_stride_bytes_(vertex_stride_bytes) {} void Set(int axis, T pos) const { axis_ = axis; pos_ = pos; } bool operator()(unsigned int i) const { int axis = axis_; T pos = pos_; unsigned int i0 = faces_[3 * i + 0]; unsigned int i1 = faces_[3 * i + 1]; unsigned int i2 = faces_[3 * i + 2]; real3<T> p0(get_vertex_addr<T>(vertices_, i0, vertex_stride_bytes_)); real3<T> p1(get_vertex_addr<T>(vertices_, i1, vertex_stride_bytes_)); real3<T> p2(get_vertex_addr<T>(vertices_, i2, vertex_stride_bytes_)); T center = p0[axis] + p1[axis] + p2[axis]; return (center < pos * static_cast<T>(3.0)); } private: mutable int axis_; mutable T pos_; const T *vertices_; const unsigned int *faces_; const size_t vertex_stride_bytes_; }; // Predefined Triangle mesh geometry. template <typename T = float> class TriangleMesh { public: TriangleMesh( const T *vertices, const unsigned int *faces, const size_t vertex_stride_bytes) // e.g. 12 for sizeof(float) * XYZ : vertices_(vertices), faces_(faces), vertex_stride_bytes_(vertex_stride_bytes) {} /// Compute bounding box for `prim_index`th triangle. /// This function is called for each primitive in BVH build. void BoundingBox(real3<T> *bmin, real3<T> *bmax, unsigned int prim_index) const { (*bmin)[0] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0], vertex_stride_bytes_)[0]; (*bmin)[1] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0], vertex_stride_bytes_)[1]; (*bmin)[2] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0], vertex_stride_bytes_)[2]; (*bmax)[0] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0], vertex_stride_bytes_)[0]; (*bmax)[1] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0], vertex_stride_bytes_)[1]; (*bmax)[2] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0], vertex_stride_bytes_)[2]; for (unsigned int i = 1; i < 3; i++) { for (unsigned int k = 0; k < 3; k++) { if ((*bmin)[static_cast<int>(k)] > get_vertex_addr<T>(vertices_, faces_[3 * prim_index + i], vertex_stride_bytes_)[k]) { (*bmin)[static_cast<int>(k)] = get_vertex_addr<T>( vertices_, faces_[3 * prim_index + i], vertex_stride_bytes_)[k]; } if ((*bmax)[static_cast<int>(k)] < get_vertex_addr<T>(vertices_, faces_[3 * prim_index + i], vertex_stride_bytes_)[k]) { (*bmax)[static_cast<int>(k)] = get_vertex_addr<T>( vertices_, faces_[3 * prim_index + i], vertex_stride_bytes_)[k]; } } } } const T *vertices_; const unsigned int *faces_; const size_t vertex_stride_bytes_; }; template <typename T = float> class TriangleIntersection { public: T u; T v; // Required member variables. T t; unsigned int prim_id; }; template <typename T = float, class H = TriangleIntersection<T> > class TriangleIntersector { public: TriangleIntersector(const T *vertices, const unsigned int *faces, const size_t vertex_stride_bytes) // e.g. // vertex_stride_bytes // = 12 = sizeof(float) // * 3 : vertices_(vertices), faces_(faces), vertex_stride_bytes_(vertex_stride_bytes) {} // For Watertight Ray/Triangle Intersection. typedef struct { T Sx; T Sy; T Sz; int kx; int ky; int kz; } RayCoeff; /// Do ray interesection stuff for `prim_index` th primitive and return hit /// distance `t`, /// varycentric coordinate `u` and `v`. /// Returns true if there's intersection. bool Intersect(T *t_inout, const unsigned int prim_index) const { if ((prim_index < trace_options_.prim_ids_range[0]) || (prim_index >= trace_options_.prim_ids_range[1])) { return false; } const unsigned int f0 = faces_[3 * prim_index + 0]; const unsigned int f1 = faces_[3 * prim_index + 1]; const unsigned int f2 = faces_[3 * prim_index + 2]; const real3<T> p0(get_vertex_addr(vertices_, f0 + 0, vertex_stride_bytes_)); const real3<T> p1(get_vertex_addr(vertices_, f1 + 0, vertex_stride_bytes_)); const real3<T> p2(get_vertex_addr(vertices_, f2 + 0, vertex_stride_bytes_)); const real3<T> A = p0 - ray_org_; const real3<T> B = p1 - ray_org_; const real3<T> C = p2 - ray_org_; const T Ax = A[ray_coeff_.kx] - ray_coeff_.Sx * A[ray_coeff_.kz]; const T Ay = A[ray_coeff_.ky] - ray_coeff_.Sy * A[ray_coeff_.kz]; const T Bx = B[ray_coeff_.kx] - ray_coeff_.Sx * B[ray_coeff_.kz]; const T By = B[ray_coeff_.ky] - ray_coeff_.Sy * B[ray_coeff_.kz]; const T Cx = C[ray_coeff_.kx] - ray_coeff_.Sx * C[ray_coeff_.kz]; const T Cy = C[ray_coeff_.ky] - ray_coeff_.Sy * C[ray_coeff_.kz]; T U = Cx * By - Cy * Bx; T V = Ax * Cy - Ay * Cx; T W = Bx * Ay - By * Ax; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wfloat-equal" #endif // Fall back to test against edges using double precision. if (U == static_cast<T>(0.0) || V == static_cast<T>(0.0) || W == static_cast<T>(0.0)) { double CxBy = static_cast<double>(Cx) * static_cast<double>(By); double CyBx = static_cast<double>(Cy) * static_cast<double>(Bx); U = static_cast<T>(CxBy - CyBx); double AxCy = static_cast<double>(Ax) * static_cast<double>(Cy); double AyCx = static_cast<double>(Ay) * static_cast<double>(Cx); V = static_cast<T>(AxCy - AyCx); double BxAy = static_cast<double>(Bx) * static_cast<double>(Ay); double ByAx = static_cast<double>(By) * static_cast<double>(Ax); W = static_cast<T>(BxAy - ByAx); } if (trace_options_.cull_back_face) { if (U < static_cast<T>(0.0) || V < static_cast<T>(0.0) || W < static_cast<T>(0.0)) return false; } else { if ((U < static_cast<T>(0.0) || V < static_cast<T>(0.0) || W < static_cast<T>(0.0)) && (U > static_cast<T>(0.0) || V > static_cast<T>(0.0) || W > static_cast<T>(0.0))) { return false; } } T det = U + V + W; if (det == static_cast<T>(0.0)) return false; #ifdef __clang__ #pragma clang diagnostic pop #endif const T Az = ray_coeff_.Sz * A[ray_coeff_.kz]; const T Bz = ray_coeff_.Sz * B[ray_coeff_.kz]; const T Cz = ray_coeff_.Sz * C[ray_coeff_.kz]; const T D = U * Az + V * Bz + W * Cz; const T rcpDet = static_cast<T>(1.0) / det; T tt = D * rcpDet; if (tt > (*t_inout)) { return false; } if (tt < t_min_) { return false; } (*t_inout) = tt; // Use Thomas-Mueller style barycentric coord. // U + V + W = 1.0 and interp(p) = U * p0 + V * p1 + W * p2 // We want interp(p) = (1 - u - v) * p0 + u * v1 + v * p2; // => u = V, v = W. u_ = V * rcpDet; v_ = W * rcpDet; return true; } /// Returns the nearest hit distance. T GetT() const { return t_; } /// Update is called when initializing intesection and nearest hit is found. void Update(T t, unsigned int prim_idx) const { t_ = t; prim_id_ = prim_idx; } /// Prepare BVH traversal(e.g. compute inverse ray direction) /// This function is called only once in BVH traversal. void PrepareTraversal(const Ray<T> &ray, const BVHTraceOptions &trace_options) const { ray_org_[0] = ray.org[0]; ray_org_[1] = ray.org[1]; ray_org_[2] = ray.org[2]; // Calculate dimension where the ray direction is maximal. ray_coeff_.kz = 0; T absDir = std::fabs(ray.dir[0]); if (absDir < std::fabs(ray.dir[1])) { ray_coeff_.kz = 1; absDir = std::fabs(ray.dir[1]); } if (absDir < std::fabs(ray.dir[2])) { ray_coeff_.kz = 2; absDir = std::fabs(ray.dir[2]); } ray_coeff_.kx = ray_coeff_.kz + 1; if (ray_coeff_.kx == 3) ray_coeff_.kx = 0; ray_coeff_.ky = ray_coeff_.kx + 1; if (ray_coeff_.ky == 3) ray_coeff_.ky = 0; // Swap kx and ky dimension to preserve widing direction of triangles. if (ray.dir[ray_coeff_.kz] < static_cast<T>(0.0)) std::swap(ray_coeff_.kx, ray_coeff_.ky); // Calculate shear constants. ray_coeff_.Sx = ray.dir[ray_coeff_.kx] / ray.dir[ray_coeff_.kz]; ray_coeff_.Sy = ray.dir[ray_coeff_.ky] / ray.dir[ray_coeff_.kz]; ray_coeff_.Sz = static_cast<T>(1.0) / ray.dir[ray_coeff_.kz]; trace_options_ = trace_options; t_min_ = ray.min_t; u_ = static_cast<T>(0.0); v_ = static_cast<T>(0.0); } /// Post BVH traversal stuff. /// Fill `isect` if there is a hit. void PostTraversal(const Ray<T> &ray, bool hit, H *isect) const { if (hit && isect) { (*isect).t = t_; (*isect).u = u_; (*isect).v = v_; (*isect).prim_id = prim_id_; } (void)ray; } private: const T *vertices_; const unsigned int *faces_; const size_t vertex_stride_bytes_; mutable real3<T> ray_org_; mutable RayCoeff ray_coeff_; mutable BVHTraceOptions trace_options_; mutable T t_min_; mutable T t_; mutable T u_; mutable T v_; mutable unsigned int prim_id_; int _pad_; }; // // Robust BVH Ray Traversal : http://jcgt.org/published/0002/02/02/paper.pdf // // NaN-safe min and max function. template <class T> const T &safemin(const T &a, const T &b) { return (a < b) ? a : b; } template <class T> const T &safemax(const T &a, const T &b) { return (a > b) ? a : b; } // // SAH functions // struct BinBuffer { explicit BinBuffer(unsigned int size) { bin_size = size; bin.resize(2 * 3 * size); clear(); } void clear() { memset(&bin[0], 0, sizeof(size_t) * 2 * 3 * bin_size); } std::vector<size_t> bin; // (min, max) * xyz * binsize unsigned int bin_size; unsigned int pad0; }; template <typename T> inline T CalculateSurfaceArea(const real3<T> &min, const real3<T> &max) { real3<T> box = max - min; return static_cast<T>(2.0) * (box[0] * box[1] + box[1] * box[2] + box[2] * box[0]); } template <typename T> inline void GetBoundingBoxOfTriangle(real3<T> *bmin, real3<T> *bmax, const T *vertices, const unsigned int *faces, unsigned int index) { unsigned int f0 = faces[3 * index + 0]; unsigned int f1 = faces[3 * index + 1]; unsigned int f2 = faces[3 * index + 2]; real3<T> p[3]; p[0] = real3<T>(&vertices[3 * f0]); p[1] = real3<T>(&vertices[3 * f1]); p[2] = real3<T>(&vertices[3 * f2]); (*bmin) = p[0]; (*bmax) = p[0]; for (int i = 1; i < 3; i++) { (*bmin)[0] = std::min((*bmin)[0], p[i][0]); (*bmin)[1] = std::min((*bmin)[1], p[i][1]); (*bmin)[2] = std::min((*bmin)[2], p[i][2]); (*bmax)[0] = std::max((*bmax)[0], p[i][0]); (*bmax)[1] = std::max((*bmax)[1], p[i][1]); (*bmax)[2] = std::max((*bmax)[2], p[i][2]); } } template <typename T, class P> inline void ContributeBinBuffer(BinBuffer *bins, // [out] const real3<T> &scene_min, const real3<T> &scene_max, unsigned int *indices, unsigned int left_idx, unsigned int right_idx, const P &p) { T bin_size = static_cast<T>(bins->bin_size); // Calculate extent real3<T> scene_size, scene_inv_size; scene_size = scene_max - scene_min; for (int i = 0; i < 3; ++i) { assert(scene_size[i] >= static_cast<T>(0.0)); if (scene_size[i] > static_cast<T>(0.0)) { scene_inv_size[i] = bin_size / scene_size[i]; } else { scene_inv_size[i] = static_cast<T>(0.0); } } // Clear bin data std::fill(bins->bin.begin(), bins->bin.end(), 0); // memset(&bins->bin[0], 0, sizeof(2 * 3 * bins->bin_size)); size_t idx_bmin[3]; size_t idx_bmax[3]; for (size_t i = left_idx; i < right_idx; i++) { // // Quantize the position into [0, BIN_SIZE) // // q[i] = (int)(p[i] - scene_bmin) / scene_size // real3<T> bmin; real3<T> bmax; p.BoundingBox(&bmin, &bmax, indices[i]); // GetBoundingBoxOfTriangle(&bmin, &bmax, vertices, faces, indices[i]); real3<T> quantized_bmin = (bmin - scene_min) * scene_inv_size; real3<T> quantized_bmax = (bmax - scene_min) * scene_inv_size; // idx is now in [0, BIN_SIZE) for (int j = 0; j < 3; ++j) { int q0 = static_cast<int>(quantized_bmin[j]); if (q0 < 0) q0 = 0; int q1 = static_cast<int>(quantized_bmax[j]); if (q1 < 0) q1 = 0; idx_bmin[j] = static_cast<unsigned int>(q0); idx_bmax[j] = static_cast<unsigned int>(q1); if (idx_bmin[j] >= bin_size) idx_bmin[j] = static_cast<unsigned int>(bin_size) - 1; if (idx_bmax[j] >= bin_size) idx_bmax[j] = static_cast<unsigned int>(bin_size) - 1; assert(idx_bmin[j] < bin_size); assert(idx_bmax[j] < bin_size); // Increment bin counter bins->bin[0 * (bins->bin_size * 3) + static_cast<size_t>(j) * bins->bin_size + idx_bmin[j]] += 1; bins->bin[1 * (bins->bin_size * 3) + static_cast<size_t>(j) * bins->bin_size + idx_bmax[j]] += 1; } } } template <typename T> inline T SAH(size_t ns1, T leftArea, size_t ns2, T rightArea, T invS, T Taabb, T Ttri) { T sah; sah = static_cast<T>(2.0) * Taabb + (leftArea * invS) * static_cast<T>(ns1) * Ttri + (rightArea * invS) * static_cast<T>(ns2) * Ttri; return sah; } template <typename T> inline bool FindCutFromBinBuffer(T *cut_pos, // [out] xyz int *minCostAxis, // [out] const BinBuffer *bins, const real3<T> &bmin, const real3<T> &bmax, size_t num_primitives, T costTaabb) { // should be in [0.0, 1.0] const T kEPS = std::numeric_limits<T>::epsilon(); // * epsScale; size_t left, right; real3<T> bsize, bstep; real3<T> bminLeft, bmaxLeft; real3<T> bminRight, bmaxRight; T saLeft, saRight, saTotal; T pos; T minCost[3]; T costTtri = static_cast<T>(1.0) - costTaabb; (*minCostAxis) = 0; bsize = bmax - bmin; bstep = bsize * (static_cast<T>(1.0) / bins->bin_size); saTotal = CalculateSurfaceArea(bmin, bmax); T invSaTotal = static_cast<T>(0.0); if (saTotal > kEPS) { invSaTotal = static_cast<T>(1.0) / saTotal; } for (int j = 0; j < 3; ++j) { // // Compute SAH cost for the right side of each cell of the bbox. // Exclude both extreme side of the bbox. // // i: 0 1 2 3 // +----+----+----+----+----+ // | | | | | | // +----+----+----+----+----+ // T minCostPos = bmin[j] + static_cast<T>(1.0) * bstep[j]; minCost[j] = std::numeric_limits<T>::max(); left = 0; right = num_primitives; bminLeft = bminRight = bmin; bmaxLeft = bmaxRight = bmax; for (int i = 0; i < static_cast<int>(bins->bin_size) - 1; ++i) { left += bins->bin[0 * (3 * bins->bin_size) + static_cast<size_t>(j) * bins->bin_size + static_cast<size_t>(i)]; right -= bins->bin[1 * (3 * bins->bin_size) + static_cast<size_t>(j) * bins->bin_size + static_cast<size_t>(i)]; assert(left <= num_primitives); assert(right <= num_primitives); // // Split pos bmin + (i + 1) * (bsize / BIN_SIZE) // +1 for i since we want a position on right side of the cell. // pos = bmin[j] + (i + static_cast<T>(1.0)) * bstep[j]; bmaxLeft[j] = pos; bminRight[j] = pos; saLeft = CalculateSurfaceArea(bminLeft, bmaxLeft); saRight = CalculateSurfaceArea(bminRight, bmaxRight); T cost = SAH(left, saLeft, right, saRight, invSaTotal, costTaabb, costTtri); if (cost < minCost[j]) { // // Update the min cost // minCost[j] = cost; minCostPos = pos; // minCostAxis = j; } } cut_pos[j] = minCostPos; } // cut_axis = minCostAxis; // cut_pos = minCostPos; // Find min cost axis T cost = minCost[0]; (*minCostAxis) = 0; if (cost > minCost[1]) { (*minCostAxis) = 1; cost = minCost[1]; } if (cost > minCost[2]) { (*minCostAxis) = 2; cost = minCost[2]; } return true; } #ifdef _OPENMP template <typename T, class P> void ComputeBoundingBoxOMP(real3<T> *bmin, real3<T> *bmax, const unsigned int *indices, unsigned int left_index, unsigned int right_index, const P &p) { { p.BoundingBox(bmin, bmax, indices[left_index]); } T local_bmin[3] = {(*bmin)[0], (*bmin)[1], (*bmin)[2]}; T local_bmax[3] = {(*bmax)[0], (*bmax)[1], (*bmax)[2]}; unsigned int n = right_index - left_index; #pragma omp parallel firstprivate(local_bmin, local_bmax) if (n > (1024 * 128)) { #pragma omp parallel for for (int i = int(left_index); i < int(right_index); i++) { // for each faces unsigned int idx = indices[i]; real3<T> bbox_min, bbox_max; p.BoundingBox(&bbox_min, &bbox_max, idx); for (int k = 0; k < 3; k++) { // xyz if ((*bmin)[k] > bbox_min[k]) (*bmin)[k] = bbox_min[k]; if ((*bmax)[k] < bbox_max[k]) (*bmax)[k] = bbox_max[k]; } } #pragma omp critical { for (int k = 0; k < 3; k++) { if (local_bmin[k] < (*bmin)[k]) { { if (local_bmin[k] < (*bmin)[k]) (*bmin)[k] = local_bmin[k]; } } if (local_bmax[k] > (*bmax)[k]) { { if (local_bmax[k] > (*bmax)[k]) (*bmax)[k] = local_bmax[k]; } } } } } } #endif template <typename T, class P> inline void ComputeBoundingBox(real3<T> *bmin, real3<T> *bmax, const unsigned int *indices, unsigned int left_index, unsigned int right_index, const P &p) { { unsigned int idx = indices[left_index]; p.BoundingBox(bmin, bmax, idx); } { for (unsigned int i = left_index + 1; i < right_index; i++) { // for each primitives unsigned int idx = indices[i]; real3<T> bbox_min, bbox_max; p.BoundingBox(&bbox_min, &bbox_max, idx); for (int k = 0; k < 3; k++) { // xyz if ((*bmin)[k] > bbox_min[k]) (*bmin)[k] = bbox_min[k]; if ((*bmax)[k] < bbox_max[k]) (*bmax)[k] = bbox_max[k]; } } } } template <typename T> inline void GetBoundingBox(real3<T> *bmin, real3<T> *bmax, const std::vector<BBox<T> > &bboxes, unsigned int *indices, unsigned int left_index, unsigned int right_index) { { unsigned int i = left_index; unsigned int idx = indices[i]; (*bmin)[0] = bboxes[idx].bmin[0]; (*bmin)[1] = bboxes[idx].bmin[1]; (*bmin)[2] = bboxes[idx].bmin[2]; (*bmax)[0] = bboxes[idx].bmax[0]; (*bmax)[1] = bboxes[idx].bmax[1]; (*bmax)[2] = bboxes[idx].bmax[2]; } T local_bmin[3] = {(*bmin)[0], (*bmin)[1], (*bmin)[2]}; T local_bmax[3] = {(*bmax)[0], (*bmax)[1], (*bmax)[2]}; { for (unsigned int i = left_index; i < right_index; i++) { // for each faces unsigned int idx = indices[i]; for (int k = 0; k < 3; k++) { // xyz T minval = bboxes[idx].bmin[k]; T maxval = bboxes[idx].bmax[k]; if (local_bmin[k] > minval) local_bmin[k] = minval; if (local_bmax[k] < maxval) local_bmax[k] = maxval; } } for (int k = 0; k < 3; k++) { (*bmin)[k] = local_bmin[k]; (*bmax)[k] = local_bmax[k]; } } } // // -- // #if NANORT_ENABLE_PARALLEL_BUILD template <typename T> template <class P, class Pred> unsigned int BVHAccel<T>::BuildShallowTree(std::vector<BVHNode<T> > *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, unsigned int max_shallow_depth, const P &p, const Pred &pred) { assert(left_idx <= right_idx); unsigned int offset = static_cast<unsigned int>(out_nodes->size()); if (stats_.max_tree_depth < depth) { stats_.max_tree_depth = depth; } real3<T> bmin, bmax; ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), left_idx, right_idx, p); unsigned int n = right_idx - left_idx; if ((n <= options_.min_leaf_primitives) || (depth >= options_.max_tree_depth)) { // Create leaf node. BVHNode<T> leaf; leaf.bmin[0] = bmin[0]; leaf.bmin[1] = bmin[1]; leaf.bmin[2] = bmin[2]; leaf.bmax[0] = bmax[0]; leaf.bmax[1] = bmax[1]; leaf.bmax[2] = bmax[2]; assert(left_idx < std::numeric_limits<unsigned int>::max()); leaf.flag = 1; // leaf leaf.data[0] = n; leaf.data[1] = left_idx; out_nodes->push_back(leaf); // atomic update stats_.num_leaf_nodes++; return offset; } // // Create branch node. // if (depth >= max_shallow_depth) { // Delay to build tree ShallowNodeInfo info; info.left_idx = left_idx; info.right_idx = right_idx; info.offset = offset; shallow_node_infos_.push_back(info); // Add dummy node. BVHNode<T> node; node.axis = -1; node.flag = -1; out_nodes->push_back(node); return offset; } else { // // Compute SAH and find best split axis and position // int min_cut_axis = 0; T cut_pos[3] = {0.0, 0.0, 0.0}; BinBuffer bins(options_.bin_size); ContributeBinBuffer(&bins, bmin, bmax, &indices_.at(0), left_idx, right_idx, p); FindCutFromBinBuffer(cut_pos, &min_cut_axis, &bins, bmin, bmax, n, options_.cost_t_aabb); // Try all 3 axis until good cut position avaiable. unsigned int mid_idx = left_idx; int cut_axis = min_cut_axis; for (int axis_try = 0; axis_try < 3; axis_try++) { unsigned int *begin = &indices_[left_idx]; unsigned int *end = &indices_[right_idx - 1] + 1; // mimics end() iterator. unsigned int *mid = 0; // try min_cut_axis first. cut_axis = (min_cut_axis + axis_try) % 3; // @fixme { We want some thing like: std::partition(begin, end, // pred(cut_axis, cut_pos[cut_axis])); } pred.Set(cut_axis, cut_pos[cut_axis]); // // Split at (cut_axis, cut_pos) // indices_ will be modified. // mid = std::partition(begin, end, pred); mid_idx = left_idx + static_cast<unsigned int>((mid - begin)); if ((mid_idx == left_idx) || (mid_idx == right_idx)) { // Can't split well. // Switch to object median(which may create unoptimized tree, but // stable) mid_idx = left_idx + (n >> 1); // Try another axis if there's axis to try. } else { // Found good cut. exit loop. break; } } BVHNode<T> node; node.axis = cut_axis; node.flag = 0; // 0 = branch out_nodes->push_back(node); unsigned int left_child_index = 0; unsigned int right_child_index = 0; left_child_index = BuildShallowTree(out_nodes, left_idx, mid_idx, depth + 1, max_shallow_depth, p, pred); right_child_index = BuildShallowTree(out_nodes, mid_idx, right_idx, depth + 1, max_shallow_depth, p, pred); (*out_nodes)[offset].data[0] = left_child_index; (*out_nodes)[offset].data[1] = right_child_index; (*out_nodes)[offset].bmin[0] = bmin[0]; (*out_nodes)[offset].bmin[1] = bmin[1]; (*out_nodes)[offset].bmin[2] = bmin[2]; (*out_nodes)[offset].bmax[0] = bmax[0]; (*out_nodes)[offset].bmax[1] = bmax[1]; (*out_nodes)[offset].bmax[2] = bmax[2]; } stats_.num_branch_nodes++; return offset; } #endif template <typename T> template <class P, class Pred> unsigned int BVHAccel<T>::BuildTree(BVHBuildStatistics *out_stat, std::vector<BVHNode<T> > *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, const P &p, const Pred &pred) { assert(left_idx <= right_idx); unsigned int offset = static_cast<unsigned int>(out_nodes->size()); if (out_stat->max_tree_depth < depth) { out_stat->max_tree_depth = depth; } real3<T> bmin, bmax; if (!bboxes_.empty()) { GetBoundingBox(&bmin, &bmax, bboxes_, &indices_.at(0), left_idx, right_idx); } else { ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), left_idx, right_idx, p); } unsigned int n = right_idx - left_idx; if ((n <= options_.min_leaf_primitives) || (depth >= options_.max_tree_depth)) { // Create leaf node. BVHNode<T> leaf; leaf.bmin[0] = bmin[0]; leaf.bmin[1] = bmin[1]; leaf.bmin[2] = bmin[2]; leaf.bmax[0] = bmax[0]; leaf.bmax[1] = bmax[1]; leaf.bmax[2] = bmax[2]; assert(left_idx < std::numeric_limits<unsigned int>::max()); leaf.flag = 1; // leaf leaf.data[0] = n; leaf.data[1] = left_idx; out_nodes->push_back(leaf); // atomic update out_stat->num_leaf_nodes++; return offset; } // // Create branch node. // // // Compute SAH and find best split axis and position // int min_cut_axis = 0; T cut_pos[3] = {0.0, 0.0, 0.0}; BinBuffer bins(options_.bin_size); ContributeBinBuffer(&bins, bmin, bmax, &indices_.at(0), left_idx, right_idx, p); FindCutFromBinBuffer(cut_pos, &min_cut_axis, &bins, bmin, bmax, n, options_.cost_t_aabb); // Try all 3 axis until good cut position avaiable. unsigned int mid_idx = left_idx; int cut_axis = min_cut_axis; for (int axis_try = 0; axis_try < 3; axis_try++) { unsigned int *begin = &indices_[left_idx]; unsigned int *end = &indices_[right_idx - 1] + 1; // mimics end() iterator. unsigned int *mid = 0; // try min_cut_axis first. cut_axis = (min_cut_axis + axis_try) % 3; pred.Set(cut_axis, cut_pos[cut_axis]); // // Split at (cut_axis, cut_pos) // indices_ will be modified. // mid = std::partition(begin, end, pred); mid_idx = left_idx + static_cast<unsigned int>((mid - begin)); if ((mid_idx == left_idx) || (mid_idx == right_idx)) { // Can't split well. // Switch to object median(which may create unoptimized tree, but // stable) mid_idx = left_idx + (n >> 1); // Try another axis to find better cut. } else { // Found good cut. exit loop. break; } } BVHNode<T> node; node.axis = cut_axis; node.flag = 0; // 0 = branch out_nodes->push_back(node); unsigned int left_child_index = 0; unsigned int right_child_index = 0; left_child_index = BuildTree(out_stat, out_nodes, left_idx, mid_idx, depth + 1, p, pred); right_child_index = BuildTree(out_stat, out_nodes, mid_idx, right_idx, depth + 1, p, pred); { (*out_nodes)[offset].data[0] = left_child_index; (*out_nodes)[offset].data[1] = right_child_index; (*out_nodes)[offset].bmin[0] = bmin[0]; (*out_nodes)[offset].bmin[1] = bmin[1]; (*out_nodes)[offset].bmin[2] = bmin[2]; (*out_nodes)[offset].bmax[0] = bmax[0]; (*out_nodes)[offset].bmax[1] = bmax[1]; (*out_nodes)[offset].bmax[2] = bmax[2]; } out_stat->num_branch_nodes++; return offset; } template <typename T> template <class P, class Pred> bool BVHAccel<T>::Build(unsigned int num_primitives, const P &p, const Pred &pred, const BVHBuildOptions<T> &options) { options_ = options; stats_ = BVHBuildStatistics(); nodes_.clear(); bboxes_.clear(); assert(options_.bin_size > 1); if (num_primitives == 0) { return false; } unsigned int n = num_primitives; // // 1. Create triangle indices(this will be permutated in BuildTree) // indices_.resize(n); #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < static_cast<int>(n); i++) { indices_[static_cast<size_t>(i)] = static_cast<unsigned int>(i); } // // 2. Compute bounding box(optional). // real3<T> bmin, bmax; if (options.cache_bbox) { bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<T>::max(); bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<T>::max(); bboxes_.resize(n); for (size_t i = 0; i < n; i++) { // for each primitived unsigned int idx = indices_[i]; BBox<T> bbox; p.BoundingBox(&(bbox.bmin), &(bbox.bmax), static_cast<unsigned int>(i)); bboxes_[idx] = bbox; for (int k = 0; k < 3; k++) { // xyz if (bmin[k] > bbox.bmin[k]) { bmin[k] = bbox.bmin[k]; } if (bmax[k] < bbox.bmax[k]) { bmax[k] = bbox.bmax[k]; } } } } else { #ifdef _OPENMP ComputeBoundingBoxOMP(&bmin, &bmax, &indices_.at(0), 0, n, p); #else ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), 0, n, p); #endif } // // 3. Build tree // #ifdef _OPENMP #if NANORT_ENABLE_PARALLEL_BUILD // Do parallel build for enoughly large dataset. if (n > options.min_primitives_for_parallel_build) { BuildShallowTree(&nodes_, 0, n, /* root depth */ 0, options.shallow_depth, p, pred); // [0, n) assert(shallow_node_infos_.size() > 0); // Build deeper tree in parallel std::vector<std::vector<BVHNode<T> > > local_nodes( shallow_node_infos_.size()); std::vector<BVHBuildStatistics> local_stats(shallow_node_infos_.size()); #pragma omp parallel for for (int i = 0; i < static_cast<int>(shallow_node_infos_.size()); i++) { unsigned int left_idx = shallow_node_infos_[i].left_idx; unsigned int right_idx = shallow_node_infos_[i].right_idx; BuildTree(&(local_stats[i]), &(local_nodes[i]), left_idx, right_idx, options.shallow_depth, p, pred); } // Join local nodes for (int i = 0; i < static_cast<int>(local_nodes.size()); i++) { assert(!local_nodes[i].empty()); size_t offset = nodes_.size(); // Add offset to child index(for branch node). for (size_t j = 0; j < local_nodes[i].size(); j++) { if (local_nodes[i][j].flag == 0) { // branch local_nodes[i][j].data[0] += offset - 1; local_nodes[i][j].data[1] += offset - 1; } } // replace nodes_[shallow_node_infos_[i].offset] = local_nodes[i][0]; // Skip root element of the local node. nodes_.insert(nodes_.end(), local_nodes[i].begin() + 1, local_nodes[i].end()); } // Join statistics for (int i = 0; i < static_cast<int>(local_nodes.size()); i++) { stats_.max_tree_depth = std::max(stats_.max_tree_depth, local_stats[i].max_tree_depth); stats_.num_leaf_nodes += local_stats[i].num_leaf_nodes; stats_.num_branch_nodes += local_stats[i].num_branch_nodes; } } else { BuildTree(&stats_, &nodes_, 0, n, /* root depth */ 0, p, pred); // [0, n) } #else // !NANORT_ENABLE_PARALLEL_BUILD { BuildTree(&stats_, &nodes_, 0, n, /* root depth */ 0, p, pred); // [0, n) } #endif #else // !_OPENMP { BuildTree(&stats_, &nodes_, 0, n, /* root depth */ 0, p, pred); // [0, n) } #endif return true; } template <typename T> void BVHAccel<T>::Debug() { for (size_t i = 0; i < indices_.size(); i++) { printf("index[%d] = %d\n", int(i), int(indices_[i])); } for (size_t i = 0; i < nodes_.size(); i++) { printf("node[%d] : bmin %f, %f, %f, bmax %f, %f, %f\n", int(i), nodes_[i].bmin[0], nodes_[i].bmin[1], nodes_[i].bmin[1], nodes_[i].bmax[0], nodes_[i].bmax[1], nodes_[i].bmax[1]); } } template <typename T> bool BVHAccel<T>::Dump(const char *filename) { FILE *fp = fopen(filename, "wb"); if (!fp) { // fprintf(stderr, "[BVHAccel] Cannot write a file: %s\n", filename); return false; } size_t numNodes = nodes_.size(); assert(nodes_.size() > 0); size_t numIndices = indices_.size(); size_t r = 0; r = fwrite(&numNodes, sizeof(size_t), 1, fp); assert(r == 1); r = fwrite(&nodes_.at(0), sizeof(BVHNode<T>), numNodes, fp); assert(r == numNodes); r = fwrite(&numIndices, sizeof(size_t), 1, fp); assert(r == 1); r = fwrite(&indices_.at(0), sizeof(unsigned int), numIndices, fp); assert(r == numIndices); fclose(fp); return true; } template <typename T> bool BVHAccel<T>::Load(const char *filename) { FILE *fp = fopen(filename, "rb"); if (!fp) { // fprintf(stderr, "Cannot open file: %s\n", filename); return false; } size_t numNodes; size_t numIndices; size_t r = 0; r = fread(&numNodes, sizeof(size_t), 1, fp); assert(r == 1); assert(numNodes > 0); nodes_.resize(numNodes); r = fread(&nodes_.at(0), sizeof(BVHNode<T>), numNodes, fp); assert(r == numNodes); r = fread(&numIndices, sizeof(size_t), 1, fp); assert(r == 1); indices_.resize(numIndices); r = fread(&indices_.at(0), sizeof(unsigned int), numIndices, fp); assert(r == numIndices); fclose(fp); return true; } template <typename T> inline bool IntersectRayAABB(T *tminOut, // [out] T *tmaxOut, // [out] T min_t, T max_t, const T bmin[3], const T bmax[3], real3<T> ray_org, real3<T> ray_inv_dir, int ray_dir_sign[3]); template <> inline bool IntersectRayAABB<float>(float *tminOut, // [out] float *tmaxOut, // [out] float min_t, float max_t, const float bmin[3], const float bmax[3], real3<float> ray_org, real3<float> ray_inv_dir, int ray_dir_sign[3]) { float tmin, tmax; const float min_x = ray_dir_sign[0] ? bmax[0] : bmin[0]; const float min_y = ray_dir_sign[1] ? bmax[1] : bmin[1]; const float min_z = ray_dir_sign[2] ? bmax[2] : bmin[2]; const float max_x = ray_dir_sign[0] ? bmin[0] : bmax[0]; const float max_y = ray_dir_sign[1] ? bmin[1] : bmax[1]; const float max_z = ray_dir_sign[2] ? bmin[2] : bmax[2]; // X const float tmin_x = (min_x - ray_org[0]) * ray_inv_dir[0]; // MaxMult robust BVH traversal(up to 4 ulp). // 1.0000000000000004 for double precision. const float tmax_x = (max_x - ray_org[0]) * ray_inv_dir[0] * 1.00000024f; // Y const float tmin_y = (min_y - ray_org[1]) * ray_inv_dir[1]; const float tmax_y = (max_y - ray_org[1]) * ray_inv_dir[1] * 1.00000024f; // Z const float tmin_z = (min_z - ray_org[2]) * ray_inv_dir[2]; const float tmax_z = (max_z - ray_org[2]) * ray_inv_dir[2] * 1.00000024f; tmin = safemax(tmin_z, safemax(tmin_y, safemax(tmin_x, min_t))); tmax = safemin(tmax_z, safemin(tmax_y, safemin(tmax_x, max_t))); if (tmin <= tmax) { (*tminOut) = tmin; (*tmaxOut) = tmax; return true; } return false; // no hit } template <> inline bool IntersectRayAABB<double>(double *tminOut, // [out] double *tmaxOut, // [out] double min_t, double max_t, const double bmin[3], const double bmax[3], real3<double> ray_org, real3<double> ray_inv_dir, int ray_dir_sign[3]) { double tmin, tmax; const double min_x = ray_dir_sign[0] ? bmax[0] : bmin[0]; const double min_y = ray_dir_sign[1] ? bmax[1] : bmin[1]; const double min_z = ray_dir_sign[2] ? bmax[2] : bmin[2]; const double max_x = ray_dir_sign[0] ? bmin[0] : bmax[0]; const double max_y = ray_dir_sign[1] ? bmin[1] : bmax[1]; const double max_z = ray_dir_sign[2] ? bmin[2] : bmax[2]; // X const double tmin_x = (min_x - ray_org[0]) * ray_inv_dir[0]; // MaxMult robust BVH traversal(up to 4 ulp). const double tmax_x = (max_x - ray_org[0]) * ray_inv_dir[0] * 1.0000000000000004; // Y const double tmin_y = (min_y - ray_org[1]) * ray_inv_dir[1]; const double tmax_y = (max_y - ray_org[1]) * ray_inv_dir[1] * 1.0000000000000004; // Z const double tmin_z = (min_z - ray_org[2]) * ray_inv_dir[2]; const double tmax_z = (max_z - ray_org[2]) * ray_inv_dir[2] * 1.0000000000000004; tmin = safemax(tmin_z, safemax(tmin_y, safemax(tmin_x, min_t))); tmax = safemin(tmax_z, safemin(tmax_y, safemin(tmax_x, max_t))); if (tmin <= tmax) { (*tminOut) = tmin; (*tmaxOut) = tmax; return true; } return false; // no hit } template <typename T> template <class I> inline bool BVHAccel<T>::TestLeafNode(const BVHNode<T> &node, const Ray<T> &ray, const I &intersector) const { bool hit = false; unsigned int num_primitives = node.data[0]; unsigned int offset = node.data[1]; T t = intersector.GetT(); // current hit distance real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; for (unsigned int i = 0; i < num_primitives; i++) { unsigned int prim_idx = indices_[i + offset]; T local_t = t; if (intersector.Intersect(&local_t, prim_idx)) { // Update isect state t = local_t; intersector.Update(t, prim_idx); hit = true; } } return hit; } #if 0 // TODO(LTE): Implement template <typename T> template<class I, class H, class Comp> bool BVHAccel<T>::MultiHitTestLeafNode( std::priority_queue<H, std::vector<H>, Comp> *isect_pq, int max_intersections, const BVHNode<T> &node, const Ray<T> &ray, const I &intersector) const { bool hit = false; unsigned int num_primitives = node.data[0]; unsigned int offset = node.data[1]; T t = std::numeric_limits<T>::max(); if (isect_pq->size() >= static_cast<size_t>(max_intersections)) { t = isect_pq->top().t; // current furthest hit distance } real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; for (unsigned int i = 0; i < num_primitives; i++) { unsigned int prim_idx = indices_[i + offset]; T local_t = t, u = 0.0f, v = 0.0f; if (intersector.Intersect(&local_t, &u, &v, prim_idx)) { // Update isect state if ((local_t > ray.min_t)) { if (isect_pq->size() < static_cast<size_t>(max_intersections)) { H isect; t = local_t; isect.t = t; isect.u = u; isect.v = v; isect.prim_id = prim_idx; isect_pq->push(isect); // Update t to furthest distance. t = ray.max_t; hit = true; } else { if (local_t < isect_pq->top().t) { // delete furthest intersection and add new intersection. isect_pq->pop(); H hit; hit.t = local_t; hit.u = u; hit.v = v; hit.prim_id = prim_idx; isect_pq->push(hit); // Update furthest hit distance t = isect_pq->top().t; hit = true; } } } } } return hit; } #endif template <typename T> template <class I, class H> bool BVHAccel<T>::Traverse(const Ray<T> &ray, const I &intersector, H *isect, const BVHTraceOptions &options) const { const int kMaxStackDepth = 512; T hit_t = ray.max_t; int node_stack_index = 0; unsigned int node_stack[512]; node_stack[0] = 0; // Init isect info as no hit intersector.Update(hit_t, static_cast<unsigned int>(-1)); intersector.PrepareTraversal(ray, options); int dir_sign[3]; dir_sign[0] = ray.dir[0] < static_cast<T>(0.0) ? 1 : 0; dir_sign[1] = ray.dir[1] < static_cast<T>(0.0) ? 1 : 0; dir_sign[2] = ray.dir[2] < static_cast<T>(0.0) ? 1 : 0; real3<T> ray_inv_dir; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; ray_inv_dir = vsafe_inverse(ray_dir); real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; T min_t = std::numeric_limits<T>::max(); T max_t = -std::numeric_limits<T>::max(); while (node_stack_index >= 0) { unsigned int index = node_stack[node_stack_index]; const BVHNode<T> &node = nodes_[index]; node_stack_index--; bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin, node.bmax, ray_org, ray_inv_dir, dir_sign); if (node.flag == 0) { // branch node if (hit) { int order_near = dir_sign[node.axis]; int order_far = 1 - order_near; // Traverse near first. node_stack[++node_stack_index] = node.data[order_far]; node_stack[++node_stack_index] = node.data[order_near]; } } else { // leaf node if (hit) { if (TestLeafNode(node, ray, intersector)) { hit_t = intersector.GetT(); } } } } assert(node_stack_index < kMaxStackDepth); bool hit = (intersector.GetT() < ray.max_t); intersector.PostTraversal(ray, hit, isect); return hit; } template <typename T> template <class I> inline bool BVHAccel<T>::TestLeafNodeIntersections( const BVHNode<T> &node, const Ray<T> &ray, const int max_intersections, const I &intersector, std::priority_queue<NodeHit<T>, std::vector<NodeHit<T> >, NodeHitComparator<T> > *isect_pq) const { bool hit = false; unsigned int num_primitives = node.data[0]; unsigned int offset = node.data[1]; real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; intersector.PrepareTraversal(ray); for (unsigned int i = 0; i < num_primitives; i++) { unsigned int prim_idx = indices_[i + offset]; T min_t, max_t; if (intersector.Intersect(&min_t, &max_t, prim_idx)) { // Always add to isect lists. NodeHit<T> isect; isect.t_min = min_t; isect.t_max = max_t; isect.node_id = prim_idx; if (isect_pq->size() < static_cast<size_t>(max_intersections)) { isect_pq->push(isect); } else { if (min_t < isect_pq->top().t_min) { // delete the furthest intersection and add a new intersection. isect_pq->pop(); isect_pq->push(isect); } } } } return hit; } template <typename T> template <class I> bool BVHAccel<T>::ListNodeIntersections( const Ray<T> &ray, int max_intersections, const I &intersector, StackVector<NodeHit<T>, 128> *hits) const { const int kMaxStackDepth = 512; T hit_t = ray.max_t; int node_stack_index = 0; unsigned int node_stack[512]; node_stack[0] = 0; // Stores furthest intersection at top std::priority_queue<NodeHit<T>, std::vector<NodeHit<T> >, NodeHitComparator<T> > isect_pq; (*hits)->clear(); int dir_sign[3]; dir_sign[0] = ray.dir[0] < static_cast<T>(0.0) ? 1 : 0; dir_sign[1] = ray.dir[1] < static_cast<T>(0.0) ? 1 : 0; dir_sign[2] = ray.dir[2] < static_cast<T>(0.0) ? 1 : 0; real3<T> ray_inv_dir; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; ray_inv_dir = vsafe_inverse(ray_dir); real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; T min_t, max_t; while (node_stack_index >= 0) { unsigned int index = node_stack[node_stack_index]; const BVHNode<T> &node = nodes_[static_cast<size_t>(index)]; node_stack_index--; bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin, node.bmax, ray_org, ray_inv_dir, dir_sign); if (node.flag == 0) { // branch node if (hit) { int order_near = dir_sign[node.axis]; int order_far = 1 - order_near; // Traverse near first. node_stack[++node_stack_index] = node.data[order_far]; node_stack[++node_stack_index] = node.data[order_near]; } } else { // leaf node if (hit) { TestLeafNodeIntersections(node, ray, max_intersections, intersector, &isect_pq); } } } assert(node_stack_index < kMaxStackDepth); (void)kMaxStackDepth; if (!isect_pq.empty()) { // Store intesection in reverse order(make it frontmost order) size_t n = isect_pq.size(); (*hits)->resize(n); for (size_t i = 0; i < n; i++) { const NodeHit<T> &isect = isect_pq.top(); (*hits)[n - i - 1] = isect; isect_pq.pop(); } return true; } return false; } #if 0 // TODO(LTE): Implement template <typename T> template<class I, class H, class Comp> bool BVHAccel<T>::MultiHitTraverse(const Ray<T> &ray, int max_intersections, const I &intersector, StackVector<H, 128> *hits, const BVHTraceOptions& options) const { const int kMaxStackDepth = 512; T hit_t = ray.max_t; int node_stack_index = 0; unsigned int node_stack[512]; node_stack[0] = 0; // Stores furthest intersection at top std::priority_queue<H, std::vector<H>, Comp> isect_pq; (*hits)->clear(); // Init isect info as no hit intersector.Update(hit_t, static_cast<unsigned int>(-1)); intersector.PrepareTraversal(ray, options); int dir_sign[3]; dir_sign[0] = ray.dir[0] < static_cast<T>(0.0) ? static_cast<T>(1) : static_cast<T>(0); dir_sign[1] = ray.dir[1] < static_cast<T>(0.0) ? static_cast<T>(1) : static_cast<T>(0); dir_sign[2] = ray.dir[2] < static_cast<T>(0.0) ? static_cast<T>(1) : static_cast<T>(0); real3<T> ray_inv_dir; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; ray_inv_dir = vsafe_inverse(ray_dir); real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; T min_t, max_t; while (node_stack_index >= 0) { unsigned int index = node_stack[node_stack_index]; const BVHNode<T> &node = nodes_[static_cast<size_t>(index)]; node_stack_index--; bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin, node.bmax, ray_org, ray_inv_dir, dir_sign); if (node.flag == 0) { // branch node if (hit) { int order_near = dir_sign[node.axis]; int order_far = 1 - order_near; // Traverse near first. node_stack[++node_stack_index] = node.data[order_far]; node_stack[++node_stack_index] = node.data[order_near]; } } else { // leaf node if (hit) { if (MultiHitTestLeafNode(&isect_pq, max_intersections, node, ray, intersector)) { // Only update `hit_t` when queue is full. if (isect_pq.size() >= static_cast<size_t>(max_intersections)) { hit_t = isect_pq.top().t; } } } } } assert(node_stack_index < kMaxStackDepth); (void)kMaxStackDepth; if (!isect_pq.empty()) { // Store intesection in reverse order(make it frontmost order) size_t n = isect_pq.size(); (*hits)->resize(n); for (size_t i = 0; i < n; i++) { const H &isect = isect_pq.top(); (*hits)[n - i - 1] = isect; isect_pq.pop(); } return true; } return false; } #endif #ifdef __clang__ #pragma clang diagnostic pop #endif } // namespace nanort #endif // NANORT_H_
Example_target_data.4.c
/* * @@name: target_data.4c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success * @@version: omp_4.0 */ void vec_mult(float*, float*, float*, int); extern void init(float*, float*, int); extern void output(float*, int); void foo(float *p0, float *v1, float *v2, int N) { init(v1, v2, N); #pragma omp target data map(to: v1[0:N], v2[:N]) map(from: p0[0:N]) { vec_mult(p0, v1, v2, N); } output(p0, N); } void vec_mult(float *p1, float *v3, float *v4, int N) { int i; #pragma omp target map(to: v3[0:N], v4[:N]) map(from: p1[0:N]) #pragma omp parallel for for (i=0; i<N; i++) { p1[i] = v3[i] * v4[i]; } }
bistack.c
// -*-Mode: C++;-*- // technically C99 // * BeginRiceCopyright ***************************************************** // // $HeadURL$ // $Id$ // // -------------------------------------------------------------------------- // Part of HPCToolkit (hpctoolkit.org) // // Information about sources of support for research and development of // HPCToolkit is at 'hpctoolkit.org' and in 'README.Acknowledgments'. // -------------------------------------------------------------------------- // // Copyright ((c)) 2002-2022, Rice University // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // * Neither the name of Rice University (RICE) nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // This software is provided by RICE and contributors "as is" and any // express or implied warranties, including, but not limited to, the // implied warranties of merchantability and fitness for a particular // purpose are disclaimed. In no event shall RICE or contributors be // liable for any direct, indirect, incidental, special, exemplary, or // consequential damages (including, but not limited to, procurement of // substitute goods or services; loss of use, data, or profits; or // business interruption) however caused and on any theory of liability, // whether in contract, strict liability, or tort (including negligence // or otherwise) arising in any way out of the use of this software, even // if advised of the possibility of such damage. // // ******************************************************* EndRiceCopyright * //***************************************************************************** // local includes //***************************************************************************** #include "bistack.h" //***************************************************************************** // macros //***************************************************************************** #define Ap(s) &s.aptr //***************************************************************************** // interface functions //***************************************************************************** void bistack_init ( bistack_t *s ) { atomic_init(Ap(s->produced), 0); atomic_init(Ap(s->to_consume), 0); } void bistack_push ( bistack_t *s, s_element_t *e ) { cstack_push(&s->produced, e); } s_element_t * bistack_pop ( bistack_t *s ) { // use sstack protocol for private consumer stack s_element_t *e = sstack_pop(&s->to_consume); return e; } void bistack_reverse ( bistack_t *s ) { sstack_reverse(&s->to_consume); } void bistack_steal ( bistack_t *s ) { if (atomic_load_explicit(Ap(s->produced), memory_order_relaxed) != NULL) { s_element_t *tmp = cstack_steal(&s->produced); atomic_store_explicit(Ap(s->to_consume), tmp, memory_order_relaxed); } } //***************************************************************************** // unit test //***************************************************************************** #define UNIT_TEST 0 #if UNIT_TEST #include <stdlib.h> #include <stdio.h> #include <omp.h> #include <unistd.h> typedef struct { s_element_ptr_t next; int value; } typed_stack_elem(int); //int_q_element_t typedef s_element_ptr_t typed_stack_elem_ptr(int); //int_q_elem_ptr_t typedef bistack_t typed_bistack(int); //typed_queue_elem_ptr(int) queue; typed_bistack(int) pair; typed_bistack_impl(int) typed_stack_elem(int) * typed_stack_elem_fn(int,new)(int value) { typed_stack_elem(int) *e = (typed_stack_elem(int)* ) malloc(sizeof(int_s_element_t)); e->value = value; cstack_ptr_set(&e->next, 0); } void pop ( int n ) { int i; for(i = 0; i < n; i++) { typed_stack_elem(int) *e = typed_bistack_pop(int)(&pair); if (e == 0) { printf("%d queue empty\n", omp_get_thread_num()); break; } else { printf("%d popping %d\n", omp_get_thread_num(), e->value); } } } void push ( int min, int n ) { int i; for(i = min; i < min + n; i++) { printf("%d pushing %d\n", omp_get_thread_num(), i); typed_bistack_push(int)(&pair, typed_stack_elem_fn(int, new)(i)); } } void steal ( ) { typed_bistack_steal(int)(&pair); } #ifdef DUMP_UNORDERED_STACK void dump ( int_s_element_t *e ) { int i; for(; e; e = (int_s_element_t *) typed_stack_elem_ptr_get(int,cstack)(&e->next)) { printf("%d stole %d\n", omp_get_thread_num(), e->value); } } #endif int main ( int argc, char **argv ) { bistack_init(&pair); #pragma omp parallel num_threads(6) { if (omp_get_thread_num() != 5 ) push(0, 30); if (omp_get_thread_num() == 5 ) { sleep(3); steal(); pop(10); } if (omp_get_thread_num() != 5 ) push(100, 12); // pop(100); // int_bis_element_t *e = typed_bistack_steal(int, qtype)(&queue); //dump(e); if (omp_get_thread_num() != 5 ) push(300, 30); //typed_queue_ if (omp_get_thread_num() == 5 ) { sleep(1); steal(); pop(100); } } } #endif
GB_binop__div_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__div_uint32 // A.*B function (eWiseMult): GB_AemultB__div_uint32 // A*D function (colscale): GB_AxD__div_uint32 // D*A function (rowscale): GB_DxB__div_uint32 // C+=B function (dense accum): GB_Cdense_accumB__div_uint32 // C+=b function (dense accum): GB_Cdense_accumb__div_uint32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__div_uint32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__div_uint32 // C=scalar+B GB_bind1st__div_uint32 // C=scalar+B' GB_bind1st_tran__div_uint32 // C=A+scalar GB_bind2nd__div_uint32 // C=A'+scalar GB_bind2nd_tran__div_uint32 // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 32) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_IDIV_UNSIGNED (x, y, 32) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_UINT32 || GxB_NO_DIV_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__div_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__div_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__div_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__div_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__div_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__div_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__div_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__div_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__div_uint32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t bij = Bx [p] ; Cx [p] = GB_IDIV_UNSIGNED (x, bij, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__div_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; Cx [p] = GB_IDIV_UNSIGNED (aij, y, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 32) ; \ } GrB_Info GB_bind1st_tran__div_uint32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 32) ; \ } GrB_Info GB_bind2nd_tran__div_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
task-dependency.c
/* * task-dependency.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run | FileCheck %s // REQUIRES: tsan #include <omp.h> #include <stdio.h> #include <unistd.h> #include "ompt/ompt-signal.h" int main(int argc, char *argv[]) { int var = 0, a = 0; #pragma omp parallel num_threads(2) shared(var, a) #pragma omp master { #pragma omp task shared(var, a) depend(out : var) { var++; OMPT_SIGNAL(a); } #pragma omp task shared(var, a) depend(in : var) { OMPT_WAIT(a, 2); } #pragma omp task shared(var, a) depend(in : var) { OMPT_SIGNAL(a); var++; } // Give other thread time to steal the task. OMPT_WAIT(a, 1); } fprintf(stderr, "DONE\n"); int error = (var != 2); return error; } // CHECK-NOT: ThreadSanitizer: data race // CHECK-NOT: ThreadSanitizer: reported // CHECK: DONE
omp-parallel-if.c
#include <stdio.h> #include <omp.h> #define THREADS 4 int main(void) { #pragma omp parallel num_threads(THREADS) if(0) { #pragma omp single printf("There are %d threads\n", omp_get_num_threads()); } }
matMult_neon.c
/* * File: saxpy.c * Author: Malcolm Davis * Course: Computer Architecture II * Created on May 12, 2018 * Simple matrix multiplication with OpenMP + NEON * * Ussage: * ./argv[0] for default parameters and random vectors or; * ./argv[0] <m matrix 1 size> <n matrix 1 size> <m matrix 2 size> <n matrix 2 size> */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <unistd.h> #include <omp.h> #include <arm_neon.h> #define INT_RAND_MAX 10000 #define MATRIX_SIZE_M 1000 #define MATRIX_SIZE_N 1000 #define MATRIX_SIZE_P 1000 #define MATRIX_SIZE_Q 1000 typedef struct intMatrix{ int16_t * data; long nrows; long ncols; } intMatrix; void generateMatrix(struct intMatrix* mat); void printMatrix(struct intMatrix* mat); void matMult(struct intMatrix* A, struct intMatrix* B, struct intMatrix* C); /* * Main method, retrive command line options, create the threads */ int main(int argc, char const *argv[]) { const int printMatrixB = getenv("PRINT_MATRIX") ? 1 : 0; double start_time, run_time; srand(time(NULL)); // If the vector size is inserted then use it if not then use the default long m = argc > 1 && atol(argv[1]) > 0 ? atol(argv[1]) : MATRIX_SIZE_M; long n = argc > 2 && atol(argv[2]) > 0 ? atol(argv[2]) : MATRIX_SIZE_N; long p = argc > 3 && atol(argv[3]) > 0 ? atol(argv[3]) : MATRIX_SIZE_P; long q = argc > 4 && atol(argv[4]) > 0 ? atol(argv[4]) : MATRIX_SIZE_Q; if(n!=q){ printf("Incompatible matrix sizes %ldx%ld and %ldx%ld", m,n,p,q); return -1; } // Allocate memory for the Matrix struct intMatrix A; struct intMatrix B; struct intMatrix C; A.data = (int16_t*)malloc(sizeof(int16_t)*m*n); A.nrows = m; A.ncols = n; B.data = (int16_t*)malloc(sizeof(int16_t)*p*q); B.nrows = p; B.ncols = q; C.data = (int16_t*)calloc(m*q,sizeof(int16_t)); C.nrows = m; C.ncols = q; // Generate random Matrix generateMatrix(&A); generateMatrix(&B); // Print the Matrix if(printMatrixB){ printf("----C=A*B----\n"); printf("A: "); printMatrix(&A); printf("B: "); printMatrix(&B); } //Do the actual matMult and take the time start_time = omp_get_wtime(); matMult(&A, &B, &C); run_time = omp_get_wtime() - start_time; //Print the result if(printMatrixB){ printf("C: "); printMatrix(&C); } printf("Size(NXM) %ld Seconds(s) %f \n", q*n, run_time); // Free the allocated memmory free(A.data); free(B.data); free(C.data); return 0; } /* * matMult Function C = A*B * @param C the return matrix * @param A a matrix of ints * @param B a matrix of ints */ void matMult(struct intMatrix* A, struct intMatrix* B, struct intMatrix* C){ long i, j, k; double sum = 0; #ifdef PARALLEL #pragma omp parallel for private(i,j,k, sum) shared(A, B, C) #endif for (i = 0; i < A->nrows; i++) { for (j = 0; j < B->ncols; j++) { for (k = 0; k < B->nrows; k++) { sum = sum + A->data[i*A->nrows+k]*B->data[k*B->nrows+j]; } C->data[i*C->nrows+j] = sum; sum = 0; } } } /* * Function that fills a vector of size "size" with random numbers * @param (INPUT)size the length of the vector * @param (OUTPUT)vector the place where the data will be stored. */ void generateMatrix(struct intMatrix* matrix){ long i, j; #ifdef PARALLEL #pragma omp parallel for private(i, j) shared(matrix) #endif for(i=0; i < matrix->nrows; i++){ for(j=0; j < matrix->ncols; j++){ matrix->data[i*matrix->nrows+j] = (int16_t)ceil(((double)rand()/(double)(RAND_MAX)) * INT_RAND_MAX); } } } /* * Function that prints a vector on screen * @param (INPUT)size the length of the vector * @param (INPUT)vector the place where the data will be stored. */ void printMatrix(struct intMatrix* matrix){ for(long i=0; i < matrix->nrows; i++){ printf("["); for(long j=0; j < matrix->ncols; j++){ printf(" %hd ", matrix->data[i*matrix->nrows+j]); } printf("]\n"); } }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 16; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
GB_unop__tanh_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__tanh_fc64_fc64) // op(A') function: GB (_unop_tran__tanh_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = ctanh (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ctanh (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = ctanh (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TANH || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__tanh_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = ctanh (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = ctanh (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__tanh_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
functions.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include "omp.h" #include "functions.h" //compute a*b mod p safely unsigned int modprod(unsigned int a, unsigned int b, unsigned int p) { unsigned int za = a; unsigned int ab = 0; while (b > 0) { if (b%2 == 1) ab = (ab + za) % p; za = (2 * za) % p; b /= 2; } return ab; } //compute a^b mod p safely unsigned int modExp(unsigned int a, unsigned int b, unsigned int p) { unsigned int z = a; unsigned int aExpb = 1; while (b > 0) { if (b%2 == 1) aExpb = modprod(aExpb, z, p); z = modprod(z, z, p); b /= 2; } return aExpb; } //returns either 0 or 1 randomly unsigned int randomBit() { return rand()%2; } //returns a random integer which is between 2^{n-1} and 2^{n} unsigned int randXbitInt(unsigned int n) { unsigned int r = 1; for (unsigned int i=0; i<n-1; i++) { r = r*2 + randomBit(); } return r; } //tests for primality and return 1 if N is probably prime and 0 if N is composite unsigned int isProbablyPrime(unsigned int N) { if (N%2==0) return 0; //not interested in even numbers (including 2) unsigned int NsmallPrimes = 168; unsigned int smallPrimeList[168] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997}; //before using a probablistic primality check, check directly using the small primes list for (unsigned int n=1;n<NsmallPrimes;n++) { if (N==smallPrimeList[n]) return 1; //true if (N%smallPrimeList[n]==0) return 0; //false } //if we're testing a large number switch to Miller-Rabin primality test unsigned int r = 0; unsigned int d = N-1; while (d%2 == 0) { d /= 2; r += 1; } for (unsigned int n=0;n<NsmallPrimes;n++) { unsigned int k = smallPrimeList[n]; unsigned int x = modExp(k,d,N); if ((x==1) || (x==N-1)) continue; for (unsigned int i=1;i<r-1;i++) { x = modprod(x,x,N); if (x == 1) return 0; //false if (x == N-1) break; } // see whether we left the loop becasue x==N-1 if (x == N-1) continue; return 0; //false } return 1; //true } //Finds a generator of Z_p using the assumption that p=2*q+1 unsigned int findGenerator(unsigned int p) { unsigned int g; unsigned int q = (p-1)/2; do { //make a random number 1<= g < p g = randXbitInt(32)%p; //could also have passed n to findGenerator } while (g==0 || (modExp(g,q,p)==1) || (modExp(g,2,p)==1)); return g; } void setupElGamal(unsigned int n, unsigned int *p, unsigned int *g, unsigned int *h, unsigned int *x) { /* Use isProbablyPrime and randomXbitInt to find a new random n-bit prime number which satisfies p=2*q+1 where q is also prime */ unsigned int q; do { *p = randXbitInt(n); q = (*p-1)/2; } while (!isProbablyPrime(*p) || !isProbablyPrime(q)); /* Use the fact that p=2*q+1 to quickly find a generator */ *g = findGenerator(*p); //pick a secret key, x *x = randXbitInt(n)%(*p); //compute h *h = modExp(*g,*x,*p); printf("ElGamal Setup successful.\n"); printf("p = %u. \n", *p); printf("g = %u is a generator of Z_%u \n", *g, *p); printf("Secret key: x = %u \n", *x); printf("h = g^x = %u\n", *h); printf("\n"); } void ElGamalEncrypt(unsigned int *m, unsigned int *a, unsigned int Nints, unsigned int p, unsigned int g, unsigned int h) { /* Q2.1 Parallelize this function with OpenMP */ #pragma omp parallel for for (unsigned int i=0; i<Nints;i++) { //pick y in Z_p randomly unsigned int y; do { y = randXbitInt(32)%p; } while (y==0); //dont allow y=0 //compute a = g^y a[i] = modExp(g,y,p); //compute s = h^y unsigned int s = modExp(h,y,p); //encrypt m by multiplying with s m[i] = modprod(m[i],s,p); } } void ElGamalDecrypt(unsigned int *m, unsigned int *a, unsigned int Nints, unsigned int p, unsigned int x) { /* Q2.1 Parallelize this function with OpenMP */ #pragma omp parallel for for (unsigned int i=0; i<Nints;i++) { //compute s = a^x unsigned int s = modExp(a[i],x,p); //compute s^{-1} = s^{p-2} unsigned int invS = modExp(s,p-2,p); //decrypt message by multplying by invS m[i] = modprod(m[i],invS,p); } } //Pad the end of string so its length is divisible by Nchars // Assume there is enough allocated storage for the padded string void padString(unsigned char* string, unsigned int charsPerInt) { /* Q1.2 Complete this function */ unsigned int length = strlen(string); unsigned int rem = length%charsPerInt; unsigned char pad = ' '; //str_append(*string, pad)*rem; for (int i = rem; i>0; i--) { string[strlen(string)] = ' '+'\0'; } } void convertStringToZ(unsigned char *string, unsigned int Nchars, unsigned int *Z, unsigned int Nints) { /* Q1.3 Complete this function */ /* Q2.2 Parallelize this function with OpenMP */ unsigned int x = 0; unsigned int charsPerInt = Nchars/Nints; #pragma omp parallel for for (unsigned int i = 0; i < Nints; i++) { unsigned int num = 0; for (unsigned int j = 0; j < charsPerInt; j++) { unsigned char c = *string+j+x; unsigned int charNum = (unsigned int) c; if (charsPerInt - j == 1) { num = num+charNum; } else { num = (num+charNum)*256; } //x += charsPerInt; //*Z[i] = num; } x += charsPerInt; Z[i] = num; } } void convertZToString(unsigned int *Z, unsigned int Nints, unsigned char *string, unsigned int Nchars) { /* Q1.4 Complete this function */ /* Q2.2 Parallelize this function with OpenMP */ #pragma omp parallel for for (unsigned int i=0; i<Nints; i++) { unsigned int num = Z[i]; unsigned int secChar = num%256; if ((num-secChar) != 0) { unsigned int firstChar = (num -secChar)/256; unsigned char c1 = (unsigned char) firstChar; string[strlen(string)+1] = c1; } unsigned char c2 = (unsigned char) secChar; string[strlen(string)+1] = c2; } }
3mm.c
/** * 3mm.c: This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> #endif #include "BenchmarksUtil.h" // define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 /* Problem size. */ #ifdef RUN_TEST #define SIZE 1100 #elif RUN_BENCHMARK #define SIZE 9600 #else #define SIZE 1000 #endif #define NI SIZE #define NJ SIZE #define NK SIZE #define NL SIZE #define NM SIZE /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_array(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *D) { int i, j; for (i = 0; i < NI; i++) { for (j = 0; j < NK; j++) { A[i * NK + j] = ((DATA_TYPE)i * j) / NI; } } for (i = 0; i < NK; i++) { for (j = 0; j < NJ; j++) { B[i * NJ + j] = ((DATA_TYPE)i * (j + 1)) / NJ; } } for (i = 0; i < NJ; i++) { for (j = 0; j < NM; j++) { C[i * NM + j] = ((DATA_TYPE)i * (j + 3)) / NL; } } for (i = 0; i < NM; i++) { for (j = 0; j < NL; j++) { D[i * NL + j] = ((DATA_TYPE)i * (j + 2)) / NK; } } } int compareResults(DATA_TYPE *G, DATA_TYPE *G_outputFromGpu) { int i, j, fail; fail = 0; for (i = 0; i < NI; i++) { for (j = 0; j < NL; j++) { if (percentDiff(G[i * NL + j], G_outputFromGpu[i * NL + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); return fail; } void mm3_cpu(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G) { int i, j, k; /* E := A*B */ for (i = 0; i < NI; i++) { for (j = 0; j < NJ; j++) { E[i * NJ + j] = 0; for (k = 0; k < NK; ++k) { E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j]; } } } /* F := C*D */ for (i = 0; i < NJ; i++) { for (j = 0; j < NL; j++) { F[i * NL + j] = 0; for (k = 0; k < NM; ++k) { F[i * NL + j] += C[i * NM + k] * D[k * NL + j]; } } } /* G := E*F */ for (i = 0; i < NI; i++) { for (j = 0; j < NL; j++) { G[i * NL + j] = 0; for (k = 0; k < NJ; ++k) { G[i * NL + j] += E[i * NJ + k] * F[k * NL + j]; } } } } void mm3_OMP(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G) { /* E := A*B */ #pragma omp target teams map(to : A[ : NI *NK], B[ : NK *NJ], C[ : NJ *NM], D[ : NM *NL]) map(from : E[ : NI *NJ], F[ : NJ *NL], G[ : NI *NL]) device(DEVICE_ID) thread_limit(128) { #pragma omp distribute parallel for collapse(2) for (int i = 0; i < NI; i++) { for (int j = 0; j < NJ; j++) { E[i * NJ + j] = 0; for (int k = 0; k < NK; ++k) { E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j]; } } } /* F := C*D */ #pragma omp distribute parallel for collapse(2) for (int i = 0; i < NJ; i++) { for (int j = 0; j < NL; j++) { F[i * NL + j] = 0; for (int k = 0; k < NM; ++k) { F[i * NL + j] += C[i * NM + k] * D[k * NL + j]; } } } /* G := E*F */ #pragma omp distribute parallel for collapse(2) for (int i = 0; i < NI; i++) { for (int j = 0; j < NL; j++) { G[i * NL + j] = 0; for (int k = 0; k < NJ; ++k) { G[i * NL + j] += E[i * NJ + k] * F[k * NL + j]; } } } } } int main(int argc, char **argv) { double t_start, t_end; int fail = 0; DATA_TYPE *A; DATA_TYPE *B; DATA_TYPE *C; DATA_TYPE *D; DATA_TYPE *E; DATA_TYPE *F; DATA_TYPE *G; DATA_TYPE *E_outputFromGpu; DATA_TYPE *F_outputFromGpu; DATA_TYPE *G_outputFromGpu; A = (DATA_TYPE *)malloc(NI * NK * sizeof(DATA_TYPE)); B = (DATA_TYPE *)malloc(NK * NJ * sizeof(DATA_TYPE)); C = (DATA_TYPE *)malloc(NJ * NM * sizeof(DATA_TYPE)); D = (DATA_TYPE *)malloc(NM * NL * sizeof(DATA_TYPE)); E = (DATA_TYPE *)malloc(NI * NJ * sizeof(DATA_TYPE)); F = (DATA_TYPE *)malloc(NJ * NL * sizeof(DATA_TYPE)); G = (DATA_TYPE *)malloc(NI * NL * sizeof(DATA_TYPE)); E_outputFromGpu = (DATA_TYPE *)calloc(NI * NJ, sizeof(DATA_TYPE)); F_outputFromGpu = (DATA_TYPE *)calloc(NJ * NL, sizeof(DATA_TYPE)); G_outputFromGpu = (DATA_TYPE *)calloc(NI * NL, sizeof(DATA_TYPE)); fprintf( stdout, "<< Linear Algebra: 3 Matrix Multiplications (E=A.B; F=C.D; G=E.F) >>\n"); init_array(A, B, C, D); t_start = rtclock(); mm3_OMP(A, B, C, D, E_outputFromGpu, F_outputFromGpu, G_outputFromGpu); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); #ifdef RUN_TEST t_start = rtclock(); mm3_cpu(A, B, C, D, E, F, G); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); fail = compareResults(G, G_outputFromGpu); #endif free(A); free(B); free(C); free(D); free(E); free(F); free(G); free(G_outputFromGpu); return fail; }
inplace_binary_operation.h
/* Copyright 2021 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef __NUMPY_INPLACE_BINARY_OPERATION_H__ #define __NUMPY_INPLACE_BINARY_OPERATION_H__ #include "point_task.h" namespace legate { namespace numpy { #if defined(LEGATE_USE_CUDA) && defined(__CUDACC__) template <int DIM, typename BinaryFunction, typename Args> __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) gpu_inplace_binary_op(const Args args, const bool dense) { const size_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= args.volume) return; BinaryFunction func; if (dense) { args.inoutptr[idx] = func(args.inoutptr[idx], args.inptr[idx]); } else { const Legion::Point<DIM> point = args.pitches.unflatten(idx, args.rect.lo); args.inout[point] = func(args.inout[point], args.in[point]); } } #endif // Base class for all Legate's inplace binary operation tasks template <typename Derived, typename BinaryFunction> class InplaceBinaryOperationTask : public PointTask<Derived> { private: using first_argument_type = typename BinaryFunction::first_argument_type; using second_argument_type = typename BinaryFunction::second_argument_type; using result_type = std::result_of_t<BinaryFunction(first_argument_type, second_argument_type)>; public: static const int TASK_ID = task_id<BinaryFunction::op_code, NUMPY_INPLACE_VARIANT_OFFSET, result_type, first_argument_type, second_argument_type>; // inout_region = op(inout_region, in_region) static const int REGIONS = 2; template <int N> struct DeserializedArgs { Legion::Rect<N> rect; AccessorRW<first_argument_type, N> inout; AccessorRO<second_argument_type, N> in; Pitches<N - 1> pitches; size_t volume; first_argument_type* inoutptr; const second_argument_type* inptr; bool deserialize(LegateDeserializer& derez, const Legion::Task* task, const std::vector<Legion::PhysicalRegion>& regions) { rect = NumPyProjectionFunctor::unpack_shape<N>(task, derez); inout = derez.unpack_accessor_RW<first_argument_type, N>(regions[0], rect); in = derez.unpack_accessor_RO<first_argument_type, N>(regions[1], rect); volume = pitches.flatten(rect); #ifndef LEGION_BOUNDS_CHECKS // Check to see if this is dense or not return inout.accessor.is_dense_row_major(rect) && in.accessor.is_dense_row_major(rect) && (inoutptr = inout.ptr(rect)) && (inptr = in.ptr(rect)); #else // No dense execution if we're doing bounds checks return false; #endif } }; template <int DIM> static void dispatch_cpu(const Legion::Task* task, const std::vector<Legion::PhysicalRegion>& regions, LegateDeserializer& derez) { DeserializedArgs<DIM> args; const bool dense = args.deserialize(derez, task, regions); if (args.volume == 0) return; BinaryFunction func; if (dense) { for (size_t idx = 0; idx < args.volume; ++idx) args.inoutptr[idx] = func(args.inoutptr[idx], args.inptr[idx]); } else { CPULoop<DIM>::binary_inplace(func, args.inout, args.in, args.rect); } } #ifdef LEGATE_USE_OPENMP template <int DIM> static void dispatch_omp(const Legion::Task* task, const std::vector<Legion::PhysicalRegion>& regions, LegateDeserializer& derez) { DeserializedArgs<DIM> args; const bool dense = args.deserialize(derez, task, regions); if (args.volume == 0) return; BinaryFunction func; if (dense) { #pragma omp parallel for schedule(static) for (size_t idx = 0; idx < args.volume; ++idx) args.inoutptr[idx] = func(args.inoutptr[idx], args.inptr[idx]); } else { OMPLoop<DIM>::binary_inplace(func, args.inout, args.in, args.rect); } } #endif #if defined(LEGATE_USE_CUDA) && defined(__CUDACC__) template <int DIM> static void dispatch_gpu(const Legion::Task* task, const std::vector<Legion::PhysicalRegion>& regions, LegateDeserializer& derez) { DeserializedArgs<DIM> args; const bool dense = args.deserialize(derez, task, regions); if (args.volume == 0) return; const size_t blocks = (args.volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; gpu_inplace_binary_op<DIM, BinaryFunction, DeserializedArgs<DIM>> <<<blocks, THREADS_PER_BLOCK>>>(args, dense); } #elif defined(LEGATE_USE_CUDA) template <int DIM> static void dispatch_gpu(const Legion::Task* task, const std::vector<Legion::PhysicalRegion>& regions, LegateDeserializer& derez); #endif }; } // namespace numpy } // namespace legate #endif // __NUMPY_INPLACE_BINARY_OPERATION_H__
GB_binop__band_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__band_int16) // A.*B function (eWiseMult): GB (_AemultB_01__band_int16) // A.*B function (eWiseMult): GB (_AemultB_02__band_int16) // A.*B function (eWiseMult): GB (_AemultB_03__band_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__band_int16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__band_int16) // C+=b function (dense accum): GB (_Cdense_accumb__band_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__band_int16) // C=scalar+B GB (_bind1st__band_int16) // C=scalar+B' GB (_bind1st_tran__band_int16) // C=A+scalar GB (_bind2nd__band_int16) // C=A'+scalar GB (_bind2nd_tran__band_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij) & (bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) & (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BAND || GxB_NO_INT16 || GxB_NO_BAND_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__band_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__band_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__band_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__band_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__band_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__band_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__band_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__band_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__band_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x) & (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__band_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) & (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) & (aij) ; \ } GrB_Info GB (_bind1st_tran__band_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) & (y) ; \ } GrB_Info GB (_bind2nd_tran__band_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
neighbor.h
#pragma once #ifdef USE_SIMPLEMAP #include "simplemap.hpp" #endif class ExPair{ public: PS::S64 id_in; PS::S64 id_out; PS::S64 id_cluster; PS::S32 * rank_list; static PS::S32 size; static PS::S32 rem; static PS::S32 n_bit; static void initialize() { const PS::S32 n_proc = PS::Comm::getNumberOfProc(); n_bit = 8 * sizeof(PS::S32); size = (PS::S32)std::ceil((PS::F64)n_proc/n_bit); rem = n_bit*size - n_proc; } static PS::S32 getSize() { return size+3; } ExPair(){ //PS::S32 myrank = PS::Comm::getRank(); id_in = id_out = id_cluster = 0; rank_list = new PS::S32[size]; for ( PS::S32 i=0; i<size; i++ ) rank_list[i] = 0; //setFlag(myrank); } ExPair(PS::S64 id_in0, PS::S64 id_out0, PS::S64 id_cluster0){ //PS::S32 myrank = PS::Comm::getRank(); id_in = id_in0; id_out = id_out0; id_cluster = id_cluster0; rank_list = new PS::S32[size]; for ( PS::S32 i=0; i<size; i++ ) rank_list[i] = 0; //setFlag(myrank); } ExPair(const ExPair & ep){ id_in = ep.id_in; id_out = ep.id_out; id_cluster = ep.id_cluster; rank_list = new PS::S32[size]; for ( PS::S32 i=0; i<size; i++ ) rank_list[i] = ep.rank_list[i]; } ExPair &operator=(const ExPair & ep){ if ( this != &ep ){ id_in = ep.id_in; id_out = ep.id_out; id_cluster = ep.id_cluster; for ( PS::S32 i=0; i<size; i++ ) this->rank_list[i] = ep.rank_list[i]; } return *this; } ~ExPair(){ delete [] rank_list; } PS::S64 getId() const { return id_in; } std::pair<PS::S64,PS::S64> getPair() const { return std::make_pair(id_in, id_out); } PS::S64 getIdCluster() const { return id_cluster; } PS::S64 setIdCluster(PS::S64 id_cluster0) { return id_cluster = id_cluster0; } PS::S32 input(PS::S32 * inp){ id_in = inp[1]; id_out = inp[0]; id_cluster = inp[2]; for ( PS::S32 i=0; i<size; i++ ) rank_list[i] = inp[i+3]; return size+3; } PS::S32 output(PS::S32 * outp){ outp[0] = id_in; outp[1] = id_out; outp[2] = id_cluster; for ( PS::S32 i=0; i<size; i++ ) outp[i+3] = rank_list[i]; return size+3; } bool checkFlag(const PS::S32 i) const { PS::S32 n = i / n_bit; PS::S32 ii = i - n_bit * n; return rank_list[n] & (1<<ii); } void setFlag(const PS::S32 i) { PS::S32 n = i / n_bit; PS::S32 ii = i - n_bit * n; rank_list[n] |= (1<<ii); } void unsetFlag(const PS::S32 i) { PS::S32 n = i / n_bit; PS::S32 ii = i - n_bit * n; rank_list[n] &= ~(1<<ii); } void resetFlag() { for ( PS::S32 i=0; i<size; i++ ) rank_list[i] = 0; } bool equalFlag(const ExPair & ep) const { bool check = true; for ( PS::S32 i=0; i<size; i++ ) check &= (rank_list[i]==ep.rank_list[i]); return check; } PS::S32 getMinFlag() const { const PS::S32 n_proc = PS::Comm::getNumberOfProc(); for (PS::S32 i=0; i<n_proc; i++) if ( checkFlag(i) ) return i; return n_proc; } void operator &= (const ExPair & ep) { for ( PS::S32 i=0; i<size; i++ ) this->rank_list[i] &= ep.rank_list[i]; } void operator |= (const ExPair & ep) { for ( PS::S32 i=0; i<size; i++ ) this->rank_list[i] |= ep.rank_list[i]; } bool exchange(const ExPair & ep) { bool check = (this->id_cluster != ep.id_cluster); this->id_cluster = std::min(this->id_cluster, ep.id_cluster); for ( PS::S32 i=0; i<size; i++ ) { check |= (this->rank_list[i] != ep.rank_list[i]); this->rank_list[i] |= ep.rank_list[i]; } return check; } void show(){ const PS::S32 n_proc = PS::Comm::getNumberOfProc(); std::cout << PS::Comm::getRank() << "\t" << id_in << "\t" << id_out << "\t" << id_cluster << "\t"; for ( PS::S32 i=0; i<n_proc; i++ ) std::cout << (checkFlag(i)); std::cout << std::endl; } }; PS::S32 ExPair::size; PS::S32 ExPair::rem; PS::S32 ExPair::n_bit; class NeighborList{ public: std::vector<std::vector<PS::S64> > n_list; #ifndef USE_SIMPLEMAP std::map<PS::S64, PS::S32> id_map; #else SimpleMapLib::Map<PS::S64, PS::S32> id_map; #endif std::vector<PS::S32> with_neighbor_list; std::vector<std::pair<PS::S32, PS::S32> > pair_list; std::vector<std::pair<PS::S64,PS::S64> > ex_list; std::vector<std::pair<PS::S32,PS::S32> > ex_adr_list; std::vector<PS::S32> connected_list; std::vector<std::vector<ExPair> > ex_data; std::map<std::pair<PS::S32,PS::S32>, std::pair<PS::S32, PS::S32> > ex_data_map; std::vector<std::vector<PS::S32> > recv_list; std::vector<std::vector<PS::S32> > send_list; std::vector<PS::S32> recv_rank_list; std::vector<PS::S32> send_rank_list; std::vector<std::vector<PS::S32> > ex_data_send; std::vector<std::vector<PS::S32> > ex_data_recv; std::vector<PS::S64> & operator[](PS::S32 i){ return n_list[i]; } NeighborList() { const PS::S32 n_proc = PS::Comm::getNumberOfProc(); n_list.clear(); id_map.clear(); with_neighbor_list.clear(); pair_list.clear(); ex_list.clear(); ex_adr_list.clear(); connected_list.clear(); ex_data_map.clear(); recv_rank_list.clear(); send_rank_list.clear(); ex_data_send.clear(); ex_data_recv.clear(); ex_data.resize(n_proc); recv_list.resize(n_proc); send_list.resize(n_proc); #pragma omp parallel for for (PS::S32 i=0; i<n_proc; i++){ ex_data[i].clear(); recv_list[i].clear(); send_list[i].clear(); } ExPair::initialize(); } template <class Tpsys> void initializeList(Tpsys & pp) { const PS::S32 n_proc = PS::Comm::getNumberOfProc(); const PS::S32 n_loc = pp.getNumberOfParticleLocal(); n_list.clear(); //id_map.clear(); with_neighbor_list.clear(); pair_list.clear(); ex_list.clear(); ex_adr_list.clear(); connected_list.clear(); ex_data_map.clear(); recv_rank_list.clear(); send_rank_list.clear(); ex_data_send.clear(); ex_data_recv.clear(); #pragma omp parallel for for ( PS::S32 i=0; i<n_proc; i++ ){ ex_data[i].clear(); recv_list[i].clear(); send_list[i].clear(); } n_list.resize(n_loc); #pragma omp parallel for for(PS::S32 i=0; i<n_loc; i++) n_list.at(i).clear(); } ExPair & getExData(std::pair<PS::S32, PS::S32> adr) { return ex_data[adr.first][adr.second]; } PS::S32 getNumberOfParticlesWithNeighbor() const { return with_neighbor_list.size(); } PS::S32 getNumberOfNeighborPairsLocal() const { return pair_list.size(); } PS::S32 getNumberOfRankSend() const { return send_rank_list.size(); } PS::S32 getNumberOfRankRecv() const { return recv_rank_list.size(); } PS::S32 getNumberOfRankConnected() const { return connected_list.size(); } PS::S32 getNumberOfPairConnected(const PS::S32 ii) const { return ex_data[connected_list.at(ii)].size(); } template <class Tpsys> void addNeighbor(Tpsys & pp, PS::S32 i, PS::S64 j_id, PS::S32 j_rank, PS::S32 j_id_local=-1) { n_list[i].push_back(j_id); pp[i].neighbor ++; pp[i].id_cluster = std::min(pp[i].id_cluster, j_id); if ( j_rank != pp[i].myrank ) { #pragma omp critical { ex_list.push_back(std::make_pair(pp[i].id, j_id)); ex_adr_list.push_back(std::make_pair(j_rank, ex_data.at(j_rank).size())); ex_data_map[std::make_pair(pp[i].id, j_id)] = std::make_pair(j_rank, ex_data.at(j_rank).size()); ExPair ex_pair(pp[i].id, j_id, pp[i].id_cluster); ex_pair.setFlag(pp[i].myrank); ex_pair.setFlag(j_rank); ex_data.at(j_rank).push_back(ex_pair); } pp[i].inDomain = false; } else { if ( j_id_local < 0 ) j_id_local = id_map.at(j_id); if ( i<j_id_local ) { #pragma omp critical { pair_list.push_back(std::make_pair(i, j_id_local)); } } } } template <class Tpsys> void checkNeighbor(Tpsys & pp) { const PS::S32 n_loc = n_list.size(); bool check = true; PS::S32 nei_tot = 0; for ( PS::S32 i=0; i<n_loc; i++ ) { if ( !pp[i].isDead ) assert ( id_map.at(pp[i].id) == i ); } for ( PS::S32 i=0; i<n_loc; i++ ) { PS::S32 n_ngb = n_list.at(i).size(); //if ( pp[i].neighbor ) // std::cout << pp[i].id << "\t"; nei_tot += n_ngb; for ( PS::S32 jj=0; jj<n_ngb; jj++ ) { PS::S32 j_id = n_list.at(i).at(jj); //if ( pp[i].neighbor ) // std::cout << j_id << " "; auto itr = id_map.find(j_id); if ( itr == id_map.end() ) continue; #ifndef USE_SIMPLEMAP PS::S32 j = itr->second; #else PS::S32 j = id_map.second(itr); #endif PS::S32 n_ngb_j = n_list.at(j).size(); PS::S32 n_p = 0; for ( PS::S32 k=0; k<n_ngb_j; k++ ) { PS::S32 k_id = n_list.at(j).at(k); auto itr1 = id_map.find(k_id); if ( itr1 == id_map.end() ) continue; #ifndef USE_SIMPLEMAP auto ss = itr1->second; #else auto ss = id_map.second(itr1); #endif if ( (ss) == i ) n_p ++ ; } if ( n_p != 1 ) { std::cout << i << "\t" << pp[i].id << "\t" << j << "\t" << j_id << std::endl; std::cout << "Neighbor of " << pp[i].id << ": "; for (PS::S32 k=0; k<n_list.at(i).size(); k++) std::cout << n_list.at(i).at(k) << "\t"; std::cout << std::endl; std::cout << "Neighbor of " << j_id << ": "; for (PS::S32 k=0; k<n_list.at(j).size(); k++) std::cout << n_list.at(j).at(k) << "\t"; std::cout << std::endl; check = check && false; check = check && false; } } //if ( pp[i].neighbor ) // std::cout << std::endl; } PS::S32 nei_tot_glb = PS::Comm::getSum(nei_tot); assert ( nei_tot_glb%2 == 0 ); if ( false ) { PS::Abort(); } } void createConnectedRankList(){ const PS::S32 n_proc = PS::Comm::getNumberOfProc(); connected_list.clear(); for ( PS::S32 i=0; i<n_proc; i++ ) { if ( ex_data[i].size() ) { connected_list.push_back(i); assert( i != PS::Comm::getRank() ); } } } void resizeExDataBuffer() { PS::S32 n_send = connected_list.size(); ex_data_send.resize(n_send); ex_data_recv.resize(n_send); for ( PS::S32 i=0; i<n_send; i++ ) { PS::S32 n_size = ex_data[connected_list.at(i)].size() * ExPair::getSize(); ex_data_send.at(i).resize(n_size); ex_data_recv.at(i).resize(n_size); } } template <class Tpsys> void makeIdMap(Tpsys & pp){ const PS::S32 n_loc = pp.getNumberOfParticleLocal(); id_map.clear(); //assert( (PS::S32)(n_list.size()) == n_loc ); #ifdef USE_SIMPLEMAP id_map.resize(n_loc); #pragma omp parallel for schedule(static) #endif for(PS::S32 i=0; i<n_loc; i++){ //assert( pp[i].neighbor == (PS::S32)(n_list[i].size()) ); if ( !pp[i].isDead ) { #ifndef USE_SIMPLEMAP id_map[pp[i].id] = i; #else id_map.set(pp[i].id, i); #endif }else{ #ifdef USE_SIMPLEMAP id_map.set(-1, i); #endif } } #ifdef USE_SIMPLEMAP id_map.makemap(); #endif } #if 1 template <class Tpsys> void createNeighborCluster(Tpsys & pp){ //const PS::S32 n_loc = pp.getNumberOfParticleLocal(); const PS::S32 n_wngb = with_neighbor_list.size(); const PS::S32 n_pair = pair_list.size(); bool check = true; while( check ){ check = false; #pragma omp parallel for reduction (||:check) for(PS::S32 ii=0; ii<n_pair; ii++){ PS::S32 i = pair_list.at(ii).first; PS::S32 j = pair_list.at(ii).second; if ( pp[i].id_cluster != pp[j].id_cluster ) { #pragma omp critical { pp[i].id_cluster = pp[j].id_cluster = std::min(pp[i].id_cluster, pp[j].id_cluster); } check = check || true; } } } if( ex_list.size() != 0 ){ PS::S32 n_out = ex_list.size(); #pragma omp parallel for for(PS::S32 ii=0; ii<n_wngb; ii++){ PS::S32 i = with_neighbor_list.at(ii); for(PS::S32 j=0; j<n_out; j++){ PS::S32 i_out = id_map.at(ex_list.at(j).first); PS::S32 id_cluster_out = pp[i_out].id_cluster; if( pp[i].id_cluster == id_cluster_out ) pp[i].inDomain = false; } } } } #else template <class Tpsys> void createNeighborCluster(Tpsys & pp){ const PS::S32 n_loc = pp.getNumberOfParticleLocal(); PS::S64 j_id_cluster = 0; PS::S64 id_cluster[n_loc]; bool check = true; while( check ){ check = false; #pragma omp parallel for for(PS::S32 i=0; i<n_loc; i++){ PS::S64 j_id = 0; PS::S32 nei = 0; nei = pp[i].neighbor; id_cluster[i] = pp[i].id_cluster; if(nei == 0) continue; for(PS::S32 j=0; j<nei; j++){ auto itr = id_map.find(n_list[i].at(j)); if ( itr == id_map.end() ) continue; #ifndef USE_SIMPLEMAP j_id = itr->second; #else j_id = id_map.second(itr); #endif j_id_cluster = pp[j_id].id_cluster; if( id_cluster[i] > j_id_cluster ) id_cluster[i] = j_id_cluster; } } #pragma omp parallel for reduction (||:check) for(PS::S32 i=0; i<n_loc; i++){ if ( pp[i].id_cluster != id_cluster[i] ) { check = check || true; pp[i].id_cluster = id_cluster[i]; } assert( pp[i].id >= id_cluster[i] ); } } if( ex_list.size() != 0 ){ PS::S32 n_out = ex_list.size(); #pragma omp parallel for for(PS::S32 i=0; i<n_loc; i++){ for(PS::S32 j=0; j<n_out; j++){ PS::S32 i_out = id_map.at(ex_list.at(j).first); PS::S32 id_cluster_out = pp[i_out].id_cluster; if( pp[i].id_cluster == id_cluster_out ) pp[i].inDomain = false; } } } } #endif template <class Tpsys> void inputExData(Tpsys & pp){ const PS::S32 n_out = ex_list.size(); #pragma omp parallel for for ( PS::S32 j=0; j<n_out; j++ ){ std::pair<PS::S64,PS::S64> pair = ex_list.at(j); std::pair<PS::S32,PS::S32> ex_adr = ex_adr_list.at(j); assert( getExData(ex_adr).getId() == pair.first ); getExData(ex_adr).setIdCluster(pp[id_map.at(pair.first)].id_cluster); } for ( PS::S32 j=0; j<n_out; j++ ){ //std::pair<PS::S32,PS::S32> pair = ex_list.at(j); std::pair<PS::S32,PS::S32> ex_adr = ex_adr_list.at(j); //assert( getExData(ex_adr).getId() == pair.first ); //getExData(ex_adr).setIdCluster(pp[id_map.at(pair.first)].id_cluster); for ( PS::S32 k=0; k<n_out; k++ ){ if ( k == j ) continue; //std::pair<PS::S32,PS::S32> pair2 = ex_list.at(k); std::pair<PS::S32,PS::S32> ex_adr2 = ex_adr_list.at(k); if ( getExData(ex_adr2).getIdCluster() == getExData(ex_adr).getIdCluster() ) { getExData(ex_adr).exchange(getExData(ex_adr2)); } } } } template <class Tpsys> bool exchangeExData(Tpsys & pp, PS::S32 TAG){ //PS::S32** & ex_data_send, //PS::S32** & ex_data_recv){ //const PS::S32 n_proc = PS::Comm::getNumberOfProc(); const PS::S32 n_send = connected_list.size(); //PS::S32 ** ex_data_send = new PS::S32*[n_send]; //PS::S32 ** ex_data_recv = new PS::S32*[n_send]; //for ( PS::S32 ii=0; ii<n_send; ii++ ) { // PS::S32 i = connected_list.at(ii); // PS::S32 n_size = ex_data[i].size() * ExPair::getSize(); // ex_data_send[ii] = new PS::S32[n_size]; // ex_data_recv[ii] = new PS::S32[n_size]; //} #pragma omp parallel for for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = connected_list.at(ii); PS::S32 n_data = ex_data[i].size(); PS::S32 jj = 0; for ( PS::S32 j=0; j<n_data; j++ ) { jj += ex_data[i][j].output(&ex_data_send[ii][jj]); } } #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL MPI_Request req0[n_send], req1[n_send]; MPI_Status stat0[n_send], stat1[n_send]; for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = connected_list.at(ii); PS::S32 n_size = ex_data[i].size() * ExPair::getSize(); MPI_Isend(&ex_data_send[ii][0], n_size, PS::GetDataType(ex_data_send[ii][0]), i, TAG, MPI_COMM_WORLD, &req0[ii]); MPI_Irecv(&ex_data_recv[ii][0], n_size, PS::GetDataType(ex_data_recv[ii][0]), i, TAG, MPI_COMM_WORLD, &req1[ii]); } MPI_Waitall(n_send, req0, stat0); MPI_Waitall(n_send, req1, stat1); #else assert ( n_send == 0 ); #endif bool check = false; #pragma omp parallel for reduction (||:check) for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = connected_list.at(ii); PS::S32 n_data = ex_data[i].size(); PS::S32 jj = 0; for ( PS::S32 j=0; j<n_data; j++ ) { ExPair recv_pair; jj += recv_pair.input(&ex_data_recv[ii][jj]); std::pair<PS::S32,PS::S32> adr = ex_data_map.at(recv_pair.getPair()); assert ( adr.first == i ); assert ( recv_pair.getPair() == getExData(adr).getPair() ); bool check_1 = getExData(adr).exchange(recv_pair); check = check || check_1; //getExData(adr).show(); #pragma omp critical { PS::S32 i_loc = id_map.at(getExData(adr).getId()); pp[i_loc].id_cluster = std::min(pp[i_loc].id_cluster, getExData(adr).getIdCluster()); } } //delete [] ex_data_send[ii]; //delete [] ex_data_recv[ii]; } //delete [] ex_data_send; //delete [] ex_data_recv; //PS::Comm::barrier(); //bool check_glb = PS::Comm::synchronizeConditionalBranchOR(check); return check; } template <class Tpsys> void selectSendRecvParticle(Tpsys & pp){ const PS::S32 myrank = PS::Comm::getRank(); const PS::S32 n_proc = PS::Comm::getNumberOfProc(); const PS::S32 n_ptcl = ex_list.size(); std::vector<PS::S64> ex_cluster; std::vector<std::pair<PS::S32,PS::S32> > ex_cluster_adr; ex_cluster.clear(); ex_cluster_adr.clear(); for ( PS::S32 ii=0; ii<n_ptcl; ii++ ) { //std::pair<PS::S32,PS::S32> pair = ex_list.at(ii); std::pair<PS::S32,PS::S32> adr = ex_adr_list.at(ii); PS::S64 id_cluster = getExData(adr).id_cluster; PS::S32 n_l = ex_cluster.size(); std::pair<PS::S32,PS::S32> adr2 = std::make_pair(-1,-1); for (PS::S32 j=0; j<n_l; j++){ if ( id_cluster == ex_cluster.at(j) ){ adr2 = ex_cluster_adr.at(j); assert( getExData(adr).equalFlag(getExData(adr2)) ); } } if ( adr2 == std::make_pair(-1,-1) ){ ex_cluster.push_back(id_cluster); ex_cluster_adr.push_back(adr); PS::S32 min_rank = getExData(adr).getMinFlag(); if ( min_rank == myrank ) { for ( PS::S32 j=0; j<n_proc; j++ ) { if ( getExData(adr).checkFlag(j) ) { if ( j == myrank ) continue; recv_list[j].push_back(id_cluster); assert ( j > myrank ); } } } else { assert ( min_rank < myrank ); send_list[min_rank].push_back(id_cluster); } } } for ( PS::S32 i=0; i<n_proc; i++ ) { if ( recv_list[i].size() ) recv_rank_list.push_back(i); if ( send_list[i].size() ) send_rank_list.push_back(i); } } private: void operator =(const NeighborList& NL){} NeighborList(const NeighborList& NL) {} }; template <class Tp> class ExParticleSystem { public : PS::S32 n_send; PS::S32 n_recv; PS::S32 n_ex_ptcl_send_tot; PS::S32 n_ex_nei_send_tot; PS::S32 n_ex_ptcl_recv_tot; PS::S32 n_ex_nei_recv_tot; std::vector<Tp> ex_ptcl_send; std::vector<PS::S64> ex_nei_send; std::vector<Tp> ex_ptcl_recv; std::vector<PS::S64> ex_nei_recv; std::vector<std::vector<PS::S32> > ex_ptcl_send_list; std::vector<PS::S64*> n_list; std::vector<PS::S32> n_ex_ptcl_send; std::vector<PS::S32> n_ex_nei_send; std::vector<PS::S32> n_ex_ptcl_recv; std::vector<PS::S32> n_ex_nei_recv; std::vector<PS::S32> adr_ex_ptcl_send; std::vector<PS::S32> adr_ex_nei_send; std::vector<PS::S32> adr_ex_ptcl_recv; std::vector<PS::S32> adr_ex_nei_recv; Tp & operator[](PS::S32 i){ return ex_ptcl_recv[i]; } PS::S32 getNumberOfParticleLocal() const { return n_ex_ptcl_recv_tot; } void initialize() { n_send = n_recv = 0; n_ex_ptcl_send_tot = n_ex_ptcl_recv_tot = 0; n_ex_nei_send_tot = n_ex_nei_recv_tot = 0; ex_ptcl_send.clear(); ex_nei_send.clear(); ex_ptcl_recv.clear(); ex_nei_recv.clear(); ex_ptcl_send_list.clear(); n_ex_ptcl_send.clear(); n_ex_nei_send.clear(); n_ex_ptcl_recv.clear(); n_ex_nei_recv.clear(); adr_ex_ptcl_send.clear(); adr_ex_nei_send.clear(); adr_ex_ptcl_recv.clear(); adr_ex_nei_recv.clear(); } void resize(PS::S32 n_send0, PS::S32 n_recv0){ n_send = n_send0; n_ex_ptcl_send.resize(n_send); n_ex_nei_send.resize(n_send); adr_ex_ptcl_send.resize(n_send); adr_ex_nei_send.resize(n_send); ex_ptcl_send_list.resize(n_send); #pragma omp parallel for for ( PS::S32 i=0; i<n_send; i++ ) ex_ptcl_send_list[i].clear(); n_recv = n_recv0; n_ex_ptcl_recv.resize(n_recv); n_ex_nei_recv.resize(n_recv); adr_ex_ptcl_recv.resize(n_recv); adr_ex_nei_recv.resize(n_recv); } PS::S32 getNumberOfParticleSend() const { return n_ex_ptcl_send_tot; } PS::S32 getNumberOfParticleRecv() const { return n_ex_ptcl_recv_tot; } PS::S32 getNumberOfNeighborSend() const { return n_ex_nei_send_tot; } PS::S32 getNumberOfNeighborRecv() const { return n_ex_nei_recv_tot; } template <class Tpsys> void inputNumberOfExParticleSend(Tpsys & pp, NeighborList & NList){ const PS::S32 n_loc = pp.getNumberOfParticleLocal(); #pragma omp parallel for for ( PS::S32 ii=0; ii<n_send; ii++ ) n_ex_ptcl_send[ii] = n_ex_nei_send[ii] = 0; if ( n_send ) { #pragma omp parallel for for ( PS::S32 i=0; i<n_loc; i++) { if ( !pp[i].inDomain ) { for ( PS::S32 jj=0; jj<n_send; jj++ ){ PS::S32 j = NList.send_rank_list[jj]; PS::S32 n_data = NList.send_list[j].size(); for ( PS::S32 k=0; k<n_data; k++ ) { if ( NList.send_list[j][k] == pp[i].id_cluster ) { #pragma omp critical { n_ex_ptcl_send[jj] ++; n_ex_nei_send[jj] += pp[i].neighbor; assert ( pp[i].neighbor == (PS::S32)(NList.n_list[i].size()) ); ex_ptcl_send_list[jj].push_back(i); } } } } } } } #pragma omp parallel for for ( PS::S32 ii=0; ii<n_send; ii++ ) assert( ex_ptcl_send_list[ii].size() ); } void sendRecvNumberOfExParticle(NeighborList & NList, PS::S32 TAG = 0){ #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL MPI_Request req0[n_send], req1[n_send]; MPI_Status stat0[n_send], stat1[n_send]; for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = NList.send_rank_list[ii]; MPI_Isend(&n_ex_ptcl_send[ii], 1, PS::GetDataType(n_ex_ptcl_send[0]), i, TAG, MPI_COMM_WORLD, &req0[ii]); MPI_Isend(&n_ex_nei_send[ii], 1, PS::GetDataType(n_ex_nei_send[0]), i, TAG+1, MPI_COMM_WORLD, &req1[ii]); } MPI_Request req2[n_recv], req3[n_recv]; MPI_Status stat2[n_recv], stat3[n_recv]; for ( PS::S32 ii=0; ii<n_recv; ii++ ) { PS::S32 i = NList.recv_rank_list[ii]; MPI_Irecv(&n_ex_ptcl_recv[ii], 1, PS::GetDataType(n_ex_ptcl_recv[0]), i, TAG, MPI_COMM_WORLD, &req2[ii]); MPI_Irecv(&n_ex_nei_recv[ii], 1, PS::GetDataType(n_ex_nei_recv[0]), i, TAG+1, MPI_COMM_WORLD, &req3[ii]); } MPI_Waitall(n_send, req0, stat0); MPI_Waitall(n_send, req1, stat1); MPI_Waitall(n_recv, req2, stat2); MPI_Waitall(n_recv, req3, stat3); #endif } void inputAdress(){ n_ex_ptcl_send_tot = n_ex_nei_send_tot = 0; for (PS::S32 i=0; i<n_send; i++){ adr_ex_ptcl_send.at(i) = n_ex_ptcl_send_tot; adr_ex_nei_send.at(i) = n_ex_nei_send_tot; n_ex_ptcl_send_tot += n_ex_ptcl_send.at(i); n_ex_nei_send_tot += n_ex_nei_send.at(i); } n_ex_ptcl_recv_tot = n_ex_nei_recv_tot = 0; for (PS::S32 i=0; i<n_recv; i++){ adr_ex_ptcl_recv.at(i) = n_ex_ptcl_recv_tot; adr_ex_nei_recv.at(i) = n_ex_nei_recv_tot; n_ex_ptcl_recv_tot += n_ex_ptcl_recv.at(i); n_ex_nei_recv_tot += n_ex_nei_recv.at(i); } ex_ptcl_send.resize(n_ex_ptcl_send_tot); ex_nei_send.resize(n_ex_nei_send_tot); ex_ptcl_recv.resize(n_ex_ptcl_recv_tot); ex_nei_recv.resize(n_ex_nei_recv_tot); n_list.resize(n_ex_ptcl_recv_tot); } template <class Tpsys> void inputExParticleSend(Tpsys & pp, NeighborList & NList){ #pragma omp parallel for for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 n_data = n_ex_ptcl_send.at(ii); PS::S32 adr_ptcl = adr_ex_ptcl_send.at(ii); PS::S32 adr_nei = adr_ex_nei_send.at(ii); PS::S32 n_nei = 0; for ( PS::S32 jj=0; jj<n_data; jj++ ) { PS::S32 j = ex_ptcl_send_list[ii].at(jj); pp[j].isSent = true; ex_ptcl_send.at(adr_ptcl + jj) = pp[j]; assert( !pp[j].inDomain ); for ( PS::S32 k=0; k<pp[j].neighbor; k++ ) { ex_nei_send.at(adr_nei + n_nei) = NList.n_list[j].at(k); n_nei ++; } } assert ( n_ex_nei_send.at(ii) == n_nei ); } } void sendRecvExParticle(NeighborList & NList, PS::S32 TAG = 0){ #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL MPI_Request req0[n_send], req1[n_send]; MPI_Status stat0[n_send], stat1[n_send]; for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = NList.send_rank_list[ii]; MPI_Isend(&ex_ptcl_send[adr_ex_ptcl_send[ii]], n_ex_ptcl_send[ii], PS::GetDataType(ex_ptcl_send[0]), i, TAG+2, MPI_COMM_WORLD, &req0[ii]); MPI_Isend(&ex_nei_send[adr_ex_nei_send[ii]], n_ex_nei_send[ii], PS::GetDataType(ex_nei_send[0]), i, TAG+3, MPI_COMM_WORLD, &req1[ii]); } MPI_Request req2[n_recv], req3[n_recv]; MPI_Status stat2[n_recv], stat3[n_recv]; for ( PS::S32 ii=0; ii<n_recv; ii++ ) { PS::S32 i = NList.recv_rank_list[ii]; MPI_Irecv(&ex_ptcl_recv[adr_ex_ptcl_recv[ii]], n_ex_ptcl_recv[ii], PS::GetDataType(ex_ptcl_recv[0]), i, TAG+2, MPI_COMM_WORLD, &req2[ii]); MPI_Irecv(&ex_nei_recv[adr_ex_nei_recv[ii]], n_ex_nei_recv[ii], PS::GetDataType(ex_nei_recv[0]), i, TAG+3, MPI_COMM_WORLD, &req3[ii]); } MPI_Waitall(n_send, req0, stat0); MPI_Waitall(n_send, req1, stat1); MPI_Waitall(n_recv, req2, stat2); MPI_Waitall(n_recv, req3, stat3); #endif } void inputNeighborListOfExParticleRecv() { #pragma omp parallel for for ( PS::S32 ii=0; ii<n_recv; ii++ ) { PS::S32 n_data = n_ex_ptcl_recv.at(ii); PS::S32 adr_ptcl = adr_ex_ptcl_recv.at(ii); PS::S32 n_nei = adr_ex_nei_recv.at(ii); for ( PS::S32 jj=0; jj<n_data; jj++ ) { n_list.at(adr_ptcl + jj) = &(ex_nei_recv.at(n_nei)); n_nei += ex_ptcl_recv.at(adr_ptcl + jj).neighbor; assert ( ex_ptcl_recv.at(adr_ptcl + jj).isSent ); } if ( ii+1<n_recv ) assert ( adr_ex_nei_recv.at(ii+1) == n_nei ); } } void returnExParticle(NeighborList & NList, PS::S32 TAG = 0){ #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL MPI_Request req0[n_send], req1[n_send]; MPI_Status stat0[n_send], stat1[n_send]; for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = NList.send_rank_list[ii]; MPI_Irecv(&ex_ptcl_send[adr_ex_ptcl_send[ii]], n_ex_ptcl_send[ii], PS::GetDataType(ex_ptcl_send[0]), i, TAG+4, MPI_COMM_WORLD, &req0[ii]); MPI_Irecv(&ex_nei_send[adr_ex_nei_send[ii]], n_ex_nei_send[ii], PS::GetDataType(ex_nei_send[0]), i, TAG+5, MPI_COMM_WORLD, &req1[ii]); } MPI_Request req2[n_recv], req3[n_recv]; MPI_Status stat2[n_recv], stat3[n_recv]; for ( PS::S32 ii=0; ii<n_recv; ii++ ) { PS::S32 i = NList.recv_rank_list[ii]; MPI_Isend(&ex_ptcl_recv[adr_ex_ptcl_recv[ii]], n_ex_ptcl_recv[ii], PS::GetDataType(ex_ptcl_recv[0]), i, TAG+4, MPI_COMM_WORLD, &req2[ii]); MPI_Isend(&ex_nei_recv[adr_ex_nei_recv[ii]], n_ex_nei_recv[ii], PS::GetDataType(ex_nei_recv[0]), i, TAG+5, MPI_COMM_WORLD, &req3[ii]); } MPI_Waitall(n_send, req0, stat0); MPI_Waitall(n_send, req1, stat1); MPI_Waitall(n_recv, req2, stat2); MPI_Waitall(n_recv, req3, stat3); #endif } template <class Tpsys> void outputExParticleSend(Tpsys & pp, NeighborList & NList){ #pragma omp parallel for for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 n_data = n_ex_ptcl_send.at(ii); PS::S32 adr_ptcl = adr_ex_ptcl_send.at(ii); for ( PS::S32 jj=0; jj<n_data; jj++ ) { PS::S32 j = ex_ptcl_send_list[ii].at(jj); PS::S32 id_pre = pp[j].id; pp[j] = ex_ptcl_send.at(adr_ptcl + jj); if (!pp[j].isDead) assert( pp[j].id == id_pre ); } } } };
BaggingEnsemble.h
#ifndef GENIF_BAGGINGENSEMBLE_H #define GENIF_BAGGINGENSEMBLE_H #include <chrono> #include <genif/Learner.h> #include <random> namespace genif { /** * Provides a generic Bagging ensemble, that randomly subsamples given datasets and outputs a vector of learned models and predictions. * * @tparam ModelType The type of model, that the learner yields as result from calling `fit`. * @tparam PredictionType The type of prediction, that the learner yields as result from calling either `fitPredict` and `predict`. */ template<typename ModelType, typename PredictionType> class BaggingEnsemble : public Learner<std::vector<ModelType>, std::vector<PredictionType>> { public: /** * Constructs a new instance of BaggingEnsemble. * * @param baseLearner The base learner, which serves as a prototype for subsequent learning efforts. * @param nModels The number of models to fit. * @param sampleSize The number of observations to draw to fit each model. * @param workerCount The number of workers, which should fit models in parallel. * @param seed Seed to use for random number generation (-1 defaults to sysclock seed). Pass an integer for constant result across multiple runs. */ explicit BaggingEnsemble(const Learner<ModelType, PredictionType>& baseLearner, unsigned int nModels = 100, unsigned int sampleSize = 256, unsigned int workerCount = 1, int seed = -1) : _baseLearner(baseLearner), _seed(seed) { // Check property validity. if (nModels <= 0) throw std::runtime_error("BaggingEnsemble::BaggingEnsemble: nModels needs to be greater than zero."); if (sampleSize <= 0) throw std::runtime_error("BaggingEnsemble::BaggingEnsemble: sampleSize needs to be greater than zero."); if (workerCount <= 0) throw std::runtime_error("BaggingEnsemble::BaggingEnsemble: workerCount needs to be greater than zero."); // Assign properties. _nModels = nModels; _sampleSize = sampleSize; _workerCount = workerCount; } /** * Fit `nModels` using the supplied dataset. * * For fitting, this method takes a copy of the baseLearner property and calls fit using a previously drawn sample of dataset. * * @param dataset The dataset used to fit models. * @return A reference to the current BaggingEnsemble instance. The fitted models may be retrieved by calling the `getModels()` function. */ Learner<std::vector<ModelType>, std::vector<PredictionType>>& fit(const MatrixX& dataset) override { // Create PRNG. std::default_random_engine generator(_seed >= 0 ? _seed : std::chrono::system_clock::now().time_since_epoch().count()); std::uniform_int_distribution<int> distribution(0, dataset.rows() - 1); // Remove all existing models. _models.clear(); // Estimate new models. #pragma omp parallel for ordered num_threads(_workerCount) for (unsigned int i = 0; i < _nModels; i++) { // Take a copy of the base learner. auto learnerCopy = _baseLearner.copy(); // Sample dataset with replacement. MatrixX sampledDataset(_sampleSize, dataset.cols()); for (unsigned int j = 0; j < _sampleSize; j++) #pragma omp ordered sampledDataset.row(j) = dataset.row(distribution(generator)); // Fit base learner with sampled dataset. learnerCopy->fit(sampledDataset); // Add estimated model to the models vector. #pragma omp critical _models.push_back(std::move(learnerCopy->getModel())); } // Return self. return *this; } /** * Make predictions by using the set of models, which were previously learned with the `fit` method. * * Internally, this method calls the predict method of the given base learner and calls predict with every learned model and the supplied dataset. * * @param dataset The dataset to use for prediction. * @return A vector of predictions. */ std::vector<PredictionType> predict(const MatrixX& dataset) const override { // Create vector of predictions. std::vector<PredictionType> predictions; predictions.reserve(_models.size()); // Make predictions from models. for (auto& model : _models) predictions.push_back(_baseLearner.predict(dataset, model)); return predictions; } /** * Gathers the list of learned models, which were previously learned with the fit method. * @return A list of models. */ std::vector<ModelType> getModel() const { return _models; } /** * Returns the number of models, which should be fitted in this ensemble. * @return As stated above. */ unsigned int getNumberOfModels() const { return _nModels; } /** * Returns the number of models, which have been fitted in this ensemble. * @return As stated above. */ unsigned int getActualNumberOfModels() const { return _models.size(); } private: const Learner<ModelType, PredictionType>& _baseLearner; unsigned int _nModels; unsigned int _sampleSize; unsigned int _workerCount; int _seed; std::vector<ModelType> _models; }; } #endif // GENIF_BAGGINGENSEMBLE_H
main.c
/*======================================*/ /*= Autor: Tiago Serique Valadares =*/ /*= GRR: 20195138 =*/ /*= Disciplina: Aprendizado de Maquina =*/ /*======================================*/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include "knn.h" #include "read_data.h" int main(int argc, char *argv[]){ char train_base_file_name[LINESIZE]; char test_base_file_name[LINESIZE]; int k = 0; int n_lines_test = 0; int n_features = 0; int n_lines_train = 0; int n_classes = 0; int **confusion_matrix = NULL; Data *train_data_array = NULL; Data *test_data_array = NULL; FILE* train_base_file = NULL; FILE* test_base_file = NULL; if ( argc < 4 ){ printf("Formato de entrada:\n"); printf("knn <base de treinamento> <base de teste> <valor de k>\n"); return EXIT_FAILURE; } strcpy(train_base_file_name, argv[1]); strcpy(test_base_file_name, argv[2]); k = atoi(argv[3]); // open the train base file train_base_file = fopen(train_base_file_name, "r"); if ( train_base_file == NULL ){ printf("Not able to open the train base file\n"); return EXIT_FAILURE; } train_data_array = readData(train_base_file, &n_lines_train, &n_features, &n_classes); fclose(train_base_file); // open the test base file test_base_file = fopen(test_base_file_name, "r"); if ( test_base_file == NULL ){ printf("Not able to open the test base file\n"); return EXIT_FAILURE; } test_data_array = readData(test_base_file, &n_lines_test, &n_features, &n_classes); fclose(test_base_file); confusion_matrix = (int **)malloc(sizeof(int *) * n_classes + n_classes * n_classes * sizeof(int)); confusion_matrix[0] = (int *)(confusion_matrix + n_classes); #pragma omp parallel for for (int i = 1; i < n_classes; i++) confusion_matrix[i] = confusion_matrix[0] + (i * n_classes); #pragma omp parallel for for (int i = 0; i < n_classes; i++) for (int j = 0; j < n_classes; j++) confusion_matrix[i][j] = 0; knn(confusion_matrix, train_data_array, test_data_array, k, n_lines_train, n_lines_test, n_features, n_classes); printConfusionMatrix(confusion_matrix, n_classes); calculateAccuracy(confusion_matrix, n_classes); free(train_data_array); free(test_data_array); free(confusion_matrix); return EXIT_SUCCESS; }
rar5_fmt_plug.c
/* RAR 5.0 cracker patch for JtR. Hacked together during May of 2013 by Dhiru * Kholia. * * http://www.rarlab.com/technote.htm * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and * it is hereby released to the general public under the * following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * $rar5$<salt_len>$<salt>$<iter_log2>$<iv>$<pswcheck_len>$<pswcheck> */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rar5; #elif FMT_REGISTERS_H john_register_one(&fmt_rar5); #else #include <stdint.h> #include <string.h> #include <assert.h> #include <errno.h> #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 // tuned on core i7 #endif #endif #include "arch.h" #include "johnswap.h" #include "sha2.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "rar5_common.h" //#define PBKDF2_HMAC_SHA256_ALSO_INCLUDE_CTX #include "pbkdf2_hmac_sha256.h" #include "memdbg.h" #define FORMAT_LABEL "RAR5" #define FORMAT_NAME "" #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME #else #if ARCH_BITS >= 64 #define ALGORITHM_NAME "PBKDF2-SHA256 64/" ARCH_BITS_STR " " SHA2_LIB #else #define ALGORITHM_NAME "PBKDF2-SHA256 32/" ARCH_BITS_STR " " SHA2_LIB #endif #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(uint32_t) #define SALT_ALIGN sizeof(int) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { #ifdef SSE_GROUP_SZ_SHA256 int lens[SSE_GROUP_SZ_SHA256], i, j; unsigned char PswCheck[SIZE_PSWCHECK], PswCheckValue[SSE_GROUP_SZ_SHA256][SHA256_DIGEST_SIZE]; unsigned char *pin[SSE_GROUP_SZ_SHA256]; union { uint32_t *pout[SSE_GROUP_SZ_SHA256]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA256; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = (uint32_t*)PswCheckValue[i]; } pbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, SIZE_SALT50, cur_salt->iterations+32, &(x.poutc), SHA256_DIGEST_SIZE, 0); // special wtf processing for (j = 0; j < SSE_GROUP_SZ_SHA256; ++j) { memset(PswCheck, 0, sizeof(PswCheck)); for (i = 0; i < SHA256_DIGEST_SIZE; i++) PswCheck[i % SIZE_PSWCHECK] ^= PswCheckValue[j][i]; memcpy((void*)crypt_out[index+j], PswCheck, SIZE_PSWCHECK); } #else unsigned char PswCheckValue[SHA256_DIGEST_SIZE]; unsigned char PswCheck[SIZE_PSWCHECK]; int i; pbkdf2_sha256((unsigned char*)saved_key[index], strlen(saved_key[index]), cur_salt->salt, SIZE_SALT50, cur_salt->iterations+32, PswCheckValue, SHA256_DIGEST_SIZE, 0); // special wtf processing memset(PswCheck, 0, sizeof(PswCheck)); for (i = 0; i < SHA256_DIGEST_SIZE; i++) PswCheck[i % SIZE_PSWCHECK] ^= PswCheckValue[i]; memcpy((void*)crypt_out[index], PswCheck, SIZE_PSWCHECK); #endif } return count; } static void rar5_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_rar5 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, rar5_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
linop.h
#pragma once #include <Eigen/Dense> #include <Eigen/Sparse> #include <Eigen/IterativeLinearSolvers> #include <SmurffCpp/Utils/MatrixUtils.h> #include <SmurffCpp/Utils/Error.h> #include <SmurffCpp/Utils/counters.h> #include <SmurffCpp/SideInfo/SparseSideInfo.h> namespace smurff { namespace linop { class AtA; } } using Eigen::SparseMatrix; namespace Eigen { namespace internal { // AtA looks-like a SparseMatrix, so let's inherits its traits: template<> struct traits<smurff::linop::AtA> : public Eigen::internal::traits<Eigen::SparseMatrix<double> > {}; } } namespace smurff { namespace linop { // Example of a matrix-free wrapper from a user type to Eigen's compatible type // For the sake of simplicity, this example simply wrap a Eigen::SparseMatrix. class AtA : public Eigen::EigenBase<AtA> { public: // Required typedefs, constants, and method: typedef double Scalar; typedef double RealScalar; typedef int StorageIndex; enum { ColsAtCompileTime = Eigen::Dynamic, MaxColsAtCompileTime = Eigen::Dynamic, IsRowMajor = false }; Index rows() const { return m_A.cols(); } Index cols() const { return m_A.cols(); } template <typename Rhs> Eigen::Product<AtA, Rhs, Eigen::AliasFreeProduct> operator*(const Eigen::MatrixBase<Rhs> &x) const { return Eigen::Product<AtA, Rhs, Eigen::AliasFreeProduct>(*this, x.derived()); } // Custom API: AtA(const Eigen::SparseMatrix<double> &A, double reg) : m_A(A), m_reg(reg) {} const SparseMatrix<double> &m_A; double m_reg; }; } // namespace linop } // namespace smurff // Implementation of AtA * Eigen::DenseVector though a specialization of internal::generic_product_impl: namespace Eigen { namespace internal { template<typename Rhs> struct generic_product_impl<smurff::linop::AtA, Rhs, SparseShape, DenseShape, GemvProduct> // GEMV stands for matrix-vector : generic_product_impl_base<smurff::linop::AtA,Rhs,generic_product_impl<smurff::linop::AtA,Rhs> > { typedef typename Product<smurff::linop::AtA,Rhs>::Scalar Scalar; template<typename Dest> static void scaleAndAddTo(Dest& dst, const smurff::linop::AtA& lhs, const Rhs& rhs, const Scalar& alpha) { // This method should implement "dst += alpha * lhs * rhs" inplace, dst += alpha * ((lhs.m_A.transpose() * (lhs.m_A * rhs)) + lhs.m_reg * rhs); } }; } } namespace smurff { namespace linop { template<typename T> int solve_blockcg(Eigen::MatrixXd & X, T & t, double reg, Eigen::MatrixXd & B, double tol, const int blocksize, const int excess, bool throw_on_cholesky_error = false); template<typename T> int solve_blockcg(Eigen::MatrixXd & X, T & t, double reg, Eigen::MatrixXd & B, double tol, bool throw_on_cholesky_error = false); inline void AtA_mul_B(Eigen::MatrixXd & out, SparseSideInfo & A, double reg, Eigen::MatrixXd & B); inline void AtA_mul_B(Eigen::MatrixXd & out, Eigen::MatrixXd & A, double reg, Eigen::MatrixXd & B); inline void makeSymmetric(Eigen::MatrixXd &A) { A = A.selfadjointView<Eigen::Lower>(); } /** good values for solve_blockcg are blocksize=32 an excess=8 */ template<typename T> inline int solve_blockcg(Eigen::MatrixXd & X, T & K, double reg, Eigen::MatrixXd & B, double tol, const int blocksize, const int excess, bool throw_on_cholesky_error) { if (B.rows() <= excess + blocksize) { return solve_blockcg(X, K, reg, B, tol, throw_on_cholesky_error); } // split B into blocks of size <blocksize> (+ excess if needed) Eigen::MatrixXd Xblock, Bblock; int max_iter = 0; for (int i = 0; i < B.rows(); i += blocksize) { int nrows = blocksize; if (i + blocksize + excess >= B.rows()) { nrows = B.rows() - i; } Bblock.resize(nrows, B.cols()); Xblock.resize(nrows, X.cols()); Bblock = B.block(i, 0, nrows, B.cols()); int niter = solve_blockcg(Xblock, K, reg, Bblock, tol, throw_on_cholesky_error); max_iter = std::max(niter, max_iter); X.block(i, 0, nrows, X.cols()) = Xblock; } return max_iter; } // //-- Solves the system (K' * K + reg * I) * X = B for X for m right-hand sides // K = d x n matrix // I = n x n identity // X = n x m matrix // B = n x m matrix // template<typename T> inline int solve_blockcg(Eigen::MatrixXd & X, T & K, double reg, Eigen::MatrixXd & B, double tol, bool throw_on_cholesky_error) { // initialize const int nfeat = B.cols(); const int nrhs = B.rows(); double tolsq = tol*tol; if (nfeat != K.cols()) {THROWERROR("B.cols() must equal K.cols()");} Eigen::VectorXd norms(nrhs), inorms(nrhs); norms.setZero(); inorms.setZero(); #pragma omp parallel for schedule(static) for (int rhs = 0; rhs < nrhs; rhs++) { double sumsq = 0.0; for (int feat = 0; feat < nfeat; feat++) { sumsq += B(rhs, feat) * B(rhs, feat); } norms(rhs) = std::sqrt(sumsq); inorms(rhs) = 1.0 / norms(rhs); } Eigen::MatrixXd R(nrhs, nfeat); Eigen::MatrixXd P(nrhs, nfeat); Eigen::MatrixXd Ptmp(nrhs, nfeat); X.setZero(); // normalize R and P: #pragma omp parallel for schedule(static) collapse(2) for (int feat = 0; feat < nfeat; feat++) { for (int rhs = 0; rhs < nrhs; rhs++) { R(rhs, feat) = B(rhs, feat) * inorms(rhs); P(rhs, feat) = R(rhs, feat); } } Eigen::MatrixXd* RtR = new Eigen::MatrixXd(nrhs, nrhs); Eigen::MatrixXd* RtR2 = new Eigen::MatrixXd(nrhs, nrhs); Eigen::MatrixXd KP(nrhs, nfeat); Eigen::MatrixXd PtKP(nrhs, nrhs); //Eigen::Matrix<double, N, N> A; //Eigen::Matrix<double, N, N> Psi; Eigen::MatrixXd A; Eigen::MatrixXd Psi; //A_mul_At_combo(*RtR, R); *RtR = R * R.transpose(); makeSymmetric(*RtR); const int nblocks = (int)ceil(nfeat / 64.0); // CG iteration: int iter = 0; for (iter = 0; iter < 1000; iter++) { // KP = K * P ////double t1 = tick(); AtA_mul_B(KP, K, reg, P); ////double t2 = tick(); PtKP = P * KP.transpose(); auto chol_PtKP = PtKP.llt(); THROWERROR_ASSERT_MSG(!throw_on_cholesky_error || chol_PtKP.info() != Eigen::NumericalIssue, "Cholesky Decomposition failed! (Numerical Issue)"); THROWERROR_ASSERT_MSG(!throw_on_cholesky_error || chol_PtKP.info() != Eigen::InvalidInput, "Cholesky Decomposition failed! (Invalid Input)"); A = chol_PtKP.solve(*RtR); A.transposeInPlace(); ////double t3 = tick(); #pragma omp parallel for schedule(guided) for (int block = 0; block < nblocks; block++) { int col = block * 64; int bcols = std::min(64, nfeat - col); // X += A' * P X.block(0, col, nrhs, bcols).noalias() += A * P.block(0, col, nrhs, bcols); // R -= A' * KP R.block(0, col, nrhs, bcols).noalias() -= A * KP.block(0, col, nrhs, bcols); } ////double t4 = tick(); // convergence check: //A_mul_At_combo(*RtR2, R); *RtR2 = R * R.transpose(); makeSymmetric(*RtR2); Eigen::VectorXd d = RtR2->diagonal(); //std::cout << "[ iter " << iter << "] " << std::scientific << d.transpose() << " (max: " << d.maxCoeff() << " > " << tolsq << ")" << std::endl; //std::cout << iter << ":" << std::scientific << d.transpose() << std::endl; if ( (d.array() < tolsq).all()) { break; } // Psi = (R R') \ R2 R2' auto chol_RtR = RtR->llt(); THROWERROR_ASSERT_MSG(!throw_on_cholesky_error || chol_RtR.info() != Eigen::NumericalIssue, "Cholesky Decomposition failed! (Numerical Issue)"); THROWERROR_ASSERT_MSG(!throw_on_cholesky_error || chol_RtR.info() != Eigen::InvalidInput, "Cholesky Decomposition failed! (Invalid Input)"); Psi = chol_RtR.solve(*RtR2); Psi.transposeInPlace(); ////double t5 = tick(); // P = R + Psi' * P (P and R are already transposed) #pragma omp parallel for schedule(guided) for (int block = 0; block < nblocks; block++) { int col = block * 64; int bcols = std::min(64, nfeat - col); Eigen::MatrixXd xtmp(nrhs, bcols); xtmp = Psi * P.block(0, col, nrhs, bcols); P.block(0, col, nrhs, bcols) = R.block(0, col, nrhs, bcols) + xtmp; } // R R' = R2 R2' std::swap(RtR, RtR2); ////double t6 = tick(); ////printf("t2-t1 = %.3f, t3-t2 = %.3f, t4-t3 = %.3f, t5-t4 = %.3f, t6-t5 = %.3f\n", t2-t1, t3-t2, t4-t3, t5-t4, t6-t5); } if (iter == 1000) { Eigen::VectorXd d = RtR2->diagonal().cwiseSqrt(); std::cerr << "warning: block_cg: could not find a solution in 1000 iterations; residual: [" << d.transpose() << " ].all() > " << tol << std::endl; } // unnormalizing X: #pragma omp parallel for schedule(static) collapse(2) for (int feat = 0; feat < nfeat; feat++) { for (int rhs = 0; rhs < nrhs; rhs++) { X(rhs, feat) *= norms(rhs); } } delete RtR; delete RtR2; return iter; } inline void AtA_mul_B(Eigen::MatrixXd & out, Eigen::MatrixXd & A, double reg, Eigen::MatrixXd & B) { out.noalias() = (A.transpose() * (A * B.transpose())).transpose() + reg * B; } inline void AtA_mul_B(Eigen::MatrixXd& out, SparseSideInfo& A, double reg, Eigen::MatrixXd& B) { out.noalias() = (A.Ft * (A.F * B.transpose())).transpose() + reg * B; } }}
constant_density_acoustic_time_scalar_1D_4.h
#ifndef __CDA_TIME_SCALAR_1D_4__ #define __CDA_TIME_SCALAR_1D_4__ #include <stdlib.h> template< typename T, int ACCURACY > void cda_time_scalar_1D_4( T* km1_u, int nr_km1_u, int nc_km1_u, // in - padded wavefield shape T* k_Phiz, int nr_k_Phiz, int nc_k_Phiz, // in - padded wavefield shape T* k_u, int nr_k_u, int nc_k_u, // in - padded wavefield shape T* C, int nr_C, int nc_C, // in - padded wavefield shape T* rhs, int nr_rhs, int nc_rhs, // in - padded wavefield shape T* zlpml, int n_zlpml, // in - length is the number of nodes inside the padding that the pml value is defined. T* zrpml, int n_zrpml, // in - length is the number of nodes inside the padding that the pml value is defined. double const& dt, // in double const& dz, // in int const& nz, // in T* kp1_Phiz, int nr_kp1_Phiz, int nc_kp1_Phiz, // out T* kp1_u, int nr_kp1_u, int nc_kp1_u ) // out { enum {MAX_FD_SHIFT = ACCURACY/2}; // PML variable T sigmaz = 0.0; // Time delta variables T dt2 = dt*dt; // Loop/index variables int idx; int zstride=1; int s = zstride; // Loop public variables T dv = dz; T dv2 = dz*dz; // Loop private variables // derivatives T dU; T dPhi; T lapU = 0.0; // non derivatives T fac1; T fac2; // assignin the NUMBER of threads char* NUM = getenv("OMP_NUM_THREADS"); int Num_Th = atoi (NUM); #pragma omp parallel num_threads(Num_Th) private(dU, dPhi, lapU, sigmaz, idx, fac1, fac2) shared(dv, dv2, s, k_u,k_Phiz,kp1_Phiz, kp1_u, rhs, C, dt2, dt, km1_u, zlpml, n_zrpml) { #pragma omp for for(int k=0; k < nz; k++) { idx = k; kp1_Phiz[idx] = 0.0; kp1_u[idx] = 0.0; if ((k == 0) || (k == nz-1)) continue; lapU = 0.0; if (k==0) { //decentered derivative 2 ranks on the right dU = ((1./12.)*0.0+(-2./3.)*0.0+0.0+(2./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/dv; dPhi = ((1./12.)*0.0+(-2./3.)*0.0+0.0+(2./3.)*k_Phiz[idx+s]+(-1./12.)*k_Phiz[idx+2*s])/ dv; lapU += ((-1./12.)*0.0+(4./3.)*0.0+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/ dv2; } else if (k == 1) { //decentered derivative 1 rank on the right dU = ((1./12.)*0.0 +(-2./3.)*k_u[idx-s]+0.0+(2./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/ dv; dPhi = ((1./12.)*0.0+(-2./3.)*k_Phiz[idx-s]+0.0+(2./3.)*k_Phiz[idx+s]+(-1./12.)*k_Phiz[idx+2*s])/ dv; lapU += ((-1./12.)*0.0+(4./3.)*k_u[idx-s]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/ dv2; } else if (k == nz-1) { //decentered derivative 2 ranks on the left dU = ((1./12.)*k_u[idx-2*s]+(-2./3.)*k_u[idx-s]+0.0+(2./3.)*0.0 +(-1./12.)*0.0)/ dv; dPhi = ((1./12.)*k_Phiz[idx-2*s]+(-2./3.)*k_Phiz[idx-s]+0.0+(2./3.)*0.0+(-1./12.)*0.0) / dv; lapU += ((-1./12.)*k_u[idx-2*s]+(4./3.)*k_u[idx-s]+(-5./2.)*k_u[idx]+(4./3.)*0.0+(-1./12.)*0.0)/ dv2; } else if (k == nz-2) { //decentered derivative 1 ranks on the left dU = ((1./12.)*k_u[idx-2*s]+(-2./3.)*k_u[idx-s]+0.0+(2./3.)*k_u[idx+s]+(-1./12.)*0.0)/ dv; dPhi = ((1./12.)*k_Phiz[idx-2*s]+(-2./3.)*k_Phiz[idx-s]+0.0+(2./3.)*k_Phiz[idx+s]+(-1./12.)*0.0)/ dv; lapU += ((-1./12.)*k_u[idx-2*s]+(4./3.)*k_u[idx-s]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+s]+(-1./12.)*0.0)/ dv2; } else { //classic centered derivative dU = ((1./12.)*k_u[idx-2*s]+(-2./3.)*k_u[idx-s]+0.0+(2./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/ dv; dPhi = ((1./12.)*k_Phiz[idx-2*s]+(-2./3.)*k_Phiz[idx-s]+0.0+(2./3.)*k_Phiz[idx+s]+(-1./12.)*k_Phiz[idx+2*s])/ dv; lapU += ((-1./12.)*k_u[idx-2*s]+(4./3.)*k_u[idx-s]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/ dv2; } sigmaz = 0.0; if((n_zlpml>0) && (k < n_zlpml)) { sigmaz = zlpml[k]; } else if((n_zrpml>0) && (k >= nz-n_zrpml)) { sigmaz = zrpml[n_zrpml-((nz-1)-k)]; } if(sigmaz != 0.0) { kp1_Phiz[idx] = k_Phiz[idx] - dt * sigmaz*(k_Phiz[idx] + dU); fac1 = (2.0*dt2 / (2.0 + dt*sigmaz)); fac2 = (C[idx]*C[idx])*(rhs[idx]+lapU+dPhi) - (km1_u[idx]-2.0*k_u[idx])/dt2 + sigmaz*km1_u[idx]/(2.0*dt); kp1_u[idx] = fac1 * fac2; } else { kp1_Phiz[idx] = k_Phiz[idx]; kp1_u[idx] = dt2*(C[idx]*C[idx])*(rhs[idx]+lapU+dPhi) - (km1_u[idx]-2.0*k_u[idx]); } } } }; template< typename T> void cda_time_scalar_1D_OMP_4( T* km1_u, int nr_km1_u, int nc_km1_u, // in - padded wavefield shape T* k_Phiz, int nr_k_Phiz, int nc_k_Phiz, // in - padded wavefield shape T* k_u, int nr_k_u, int nc_k_u, // in - padded wavefield shape T* C, int nr_C, int nc_C, // in - padded wavefield shape T* rhs, int nr_rhs, int nc_rhs, // in - padded wavefield shape T* zlpml, int n_zlpml, // in - length is the number of nodes inside the padding that the pml value is defined. T* zrpml, int n_zrpml, // in - length is the number of nodes inside the padding that the pml value is defined. double const& dt, // in double const& dz, // in int const& nz, // in T* kp1_Phiz, int nr_kp1_Phiz, int nc_kp1_Phiz, // out T* kp1_u, int nr_kp1_u, int nc_kp1_u ) // out { cda_time_scalar_1D_4<T,4>( km1_u, nr_km1_u, nc_km1_u, // in - padded wavefield shape k_Phiz, nr_k_Phiz, nc_k_Phiz, // in - padded wavefield shape k_u, nr_k_u, nc_k_u, // in - padded wavefield shape C, nr_C, nc_C, // in - padded wavefield shape rhs, nr_rhs, nc_rhs, // in - padded wavefield shape zlpml, n_zlpml, // in - length is the number of nodes inside the padding that the pml value is defined. zrpml, n_zrpml, // in - length is the number of nodes inside the padding that the pml value is defined. dt, // in dz, // in nz, // in kp1_Phiz, nr_kp1_Phiz, nc_kp1_Phiz, // out kp1_u, nr_kp1_u, nc_kp1_u ); // out } #endif
atomic-updates-2.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #define irand(n) (n * (double)rand()/(RAND_MAX + 1.0)) int bucket[10]; int main() { int i; for (i = 0; i < 10; i++) bucket[i] = 1000; omp_set_num_threads(3); #pragma omp parallel private(i) for (i = 0; i < 10000; i++) { int from, to, mode, diff = 0, sum; from = irand(10); do { to = irand(10); } while (from == to); mode = irand(10); switch (mode) { case 0: case 1: case 2: /* equalize */ diff = (bucket[from] - bucket[to]) / 2; break; case 3: /* report */ sum = 0; for (int j = 0; j < 10; j++) { printf("%d ", bucket[j]); sum += bucket[j]; } printf(" Sum: %d\n", sum); continue; default: /* random transfer */ diff = irand(bucket[from]); break; } #pragma omp critical { bucket[from] -= diff; bucket[to] += diff; } } return 0; }
1.norace6.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 20 int main() { int A[N][N]; #pragma omp parallel for schedule(runtime) for (int i = 1; i < N; i++) for (int j = 1; j < N; j++) A[i][j] = A[i][j - 1]; } // CHECK: Region is Data Race Free. // END
7601.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "covariance.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < M; i++) for (j = 0; j < N; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_covariance(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m)) { int i, j, j1, j2; #pragma scop /* Determine mean of column vectors of input data matrix */ { #pragma omp target teams distribute for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Center the column vectors. */ #pragma omp target teams distribute for (i = 0; i < _PB_N; i++) { #pragma omp target teams distribute for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ #pragma omp target teams distribute for (j1 = 0; j1 < _PB_M; j1++) { #pragma omp target teams distribute for (j2 = j1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += data[i][j1] * data[i][j2]; symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_covariance (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); return 0; }
template_cpu_02.h
/* Copyright 2015 The math21 Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #pragma once #include "inner.h" #define MATH21_IS_FROM_CPU #include "../kernels/generic_02.kl" #undef MATH21_IS_FROM_CPU namespace math21 { template<typename T> void math21_template_tensor_f_shrink_cpu(NumN fname, NumN n, const T *x, T *y, NumN dims_x, const NumN *dx, NumN dims_y, const NumN *dy, NumN nb, const NumN *b, NumN nv, NumN dims_v, const NumN *dv) { x -= 1; y -= 1; dx -= 1; dy -= 1; b -= 1; dv -= 1; NumN id; math21_type_f_min_like f_min_like = NULL; math21_type_f_argmin_like f_argmin_like = NULL; if (fname == m21_fname_sum) { f_min_like = math21_device_f_sum; } else if (fname == m21_fname_norm1) { f_min_like = math21_device_f_norm1; } else if (fname == m21_fname_norm2_square) { f_min_like = math21_device_f_norm2_square; } else if (fname == m21_fname_mean) { f_min_like = math21_device_f_mean; } else if (fname == m21_fname_max) { f_min_like = math21_device_f_max; } else if (fname == m21_fname_min) { f_min_like = math21_device_f_min; } else if (fname == m21_fname_argmax) { f_argmin_like = math21_device_f_argmax; } else if (fname == m21_fname_argmin) { f_argmin_like = math21_device_f_argmin; } else { MATH21_ASSERT(0, "not support calling function with name " << fname); } if (f_min_like) { #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_tensor_f_shrink_cpu_kernel(f_min_like, n, x, y, dims_x, dx, dims_y, dy, nb, b, nv, dims_v, dv, id); } } else { #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_tensor_f_shrink_cpu_kernel(f_argmin_like, n, x, y, dims_x, dx, dims_y, dy, nb, b, nv, dims_v, dv, id); } } } template<typename T> void math21_template_tensor_f_inner_product_like_shrink_cpu(NumN fname, NumN n, const T *x1, const T *x2, T *y, NumN dims_x, const NumN *dx, NumN dims_y, const NumN *dy, NumN nb, const NumN *b, NumN nv, NumN dims_v, const NumN *dv) { x1 -= 1; x2 -= 1; y -= 1; dx -= 1; dy -= 1; b -= 1; dv -= 1; NumN id; math21_type_f_inner_product_like f = NULL; if (fname == m21_fname_inner_product) { f = math21_device_f_inner_product; } else if (fname == m21_fname_distance_1) { f = math21_device_f_distance_1; } else if (fname == m21_fname_distance_2_square) { f = math21_device_f_distance_2_square; } else { MATH21_ASSERT(0, "not support calling function with name " << fname); } #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_tensor_f_inner_product_like_shrink_cpu_kernel(f, n, x1, x2, y, dims_x, dx, dims_y, dy, nb, b, nv, dims_v, dv, id); } } template<typename T> void math21_template_tensor_f_inner_product_like_bcshrink_cpu(NumN fname, NumN n, const T *x1, const T *x2, T *y, NumN dims_x1, const NumN *dx1, NumN dims_x2, const NumN *dx2, NumN dims_x, const NumN *dx, NumN dims_y, const NumN *dy, NumN nb, const NumN *b, NumN nv, NumN dims_v, const NumN *dv) { x1 -= 1; x2 -= 1; y -= 1; dx1 -= 1; dx2 -= 1; dx -= 1; dy -= 1; b -= 1; dv -= 1; NumN id; math21_type_f_inner_product_like f = NULL; if (fname == m21_fname_inner_product) { f = math21_device_f_inner_product; } else if (fname == m21_fname_distance_1) { f = math21_device_f_distance_1; } else if (fname == m21_fname_distance_2_square) { f = math21_device_f_distance_2_square; } else { MATH21_ASSERT(0, "not support calling function with name " << fname); } #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_tensor_f_inner_product_like_bcshrink_cpu_kernel(f, n, x1, x2, y, dims_x1, dx1, dims_x2, dx2, dims_x, dx, dims_y, dy, nb, b, nv, dims_v, dv, id); } } // todo: use index 1 for x, y // a special kind of sub // x is sub-tensor of y template<typename T> void math21_template_tensor_f_with_broadcast_in_dn_cpu(NumN fname, NumN n, const T *x1, const T *x2, T *y, NumN dims_x1, const NumN *dx1, NumN dims_x2, const NumN *dx2, NumN dims_y, const NumN *dy) { x1 -= 1; x2 -= 1; y -= 1; dx1 -= 1; dx2 -= 1; dy -= 1; NumN id; math21_type_f_add_like f_add_like = NULL; if (fname == m21_fname_add) { f_add_like = math21_device_f_add; } else if (fname == m21_fname_subtract) { f_add_like = math21_device_f_subtract; } else if (fname == m21_fname_multiply) { f_add_like = math21_device_f_multiply; } else if (fname == m21_fname_divide) { f_add_like = math21_device_f_divide; } else if (fname == m21_fname_ele_is_equal) { f_add_like = math21_device_f_is_equal; } else if (fname == m21_fname_ele_is_less_than) { f_add_like = math21_device_f_is_less_than; } else if (fname == m21_fname_ele_is_not_less_than) { f_add_like = math21_device_f_is_not_less_than; } else if (fname == m21_fname_set_using_mask) { } else { MATH21_ASSERT(0, "not support calling function with name " << fname); } if (fname == m21_fname_set_using_mask) { #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_tensor_set_using_mask_in_dn_cpu_kernel(n, x1, x2, y, dims_x1, dx1, dims_x2, dx2, dims_y, dy, id); } } else { #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_tensor_f_with_broadcast_in_dn_cpu_kernel(f_add_like, n, x1, x2, y, dims_x1, dx1, dims_x2, dx2, dims_y, dy, id); } } } // todo: use index 1 for x, y template<typename T> void math21_template_vector_f_add_like_cpu(NumN fname, NumN n, const T *x1, const T *x2, T *y) { x1 -= 1; x2 -= 1; y -= 1; NumN id; math21_type_f_add_like f_add_like = NULL; if (fname == m21_fname_add) { f_add_like = math21_device_f_add; } else if (fname == m21_fname_subtract) { f_add_like = math21_device_f_subtract; } else if (fname == m21_fname_multiply) { f_add_like = math21_device_f_multiply; } else if (fname == m21_fname_divide) { f_add_like = math21_device_f_divide; } else if (fname == m21_fname_ele_is_equal) { f_add_like = math21_device_f_is_equal; } else if (fname == m21_fname_ele_is_less_than) { f_add_like = math21_device_f_is_less_than; } else if (fname == m21_fname_ele_is_not_less_than) { f_add_like = math21_device_f_is_not_less_than; }else if (fname == m21_fname_set_using_mask) { } else { MATH21_ASSERT(0, "not support calling function with name " << fname); } if (fname == m21_fname_set_using_mask) { #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_vector_set_using_mask_cpu_kernel(n, x1, x2, y, id); } } else { #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_vector_f_add_like_cpu_kernel(f_add_like, n, x1, x2, y, id); } } } template<typename T> void math21_template_vector_f_sin_like_cpu(NumN fname, NumN n, const T *x, T *y) { x -= 1; y -= 1; NumN id; math21_type_f_sin_like f = NULL; if (fname == m21_fname_sin) { f = math21_device_f_sin; } else if (fname == m21_fname_cos) { f = math21_device_f_cos; } else if (fname == m21_fname_tan) { f = math21_device_f_tan; } else if (fname == m21_fname_exp) { f = math21_device_f_exp; } else if (fname == m21_fname_log) { f = math21_device_f_log; } else if (fname == m21_fname_abs) { f = math21_device_f_abs; } else { MATH21_ASSERT(0, "not support calling function with name " << fname); } #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_vector_f_sin_like_cpu_kernel(f, n, x, y, id); } } template<typename T> void math21_template_vector_f_kx_like_cpu(NumN fname, NumN n, T k, const T *x, T *y) { x -= 1; y -= 1; NumN id; math21_type_f_kx_like f = NULL; if (fname == m21_fname_kx_add) { f = math21_device_f_add; } else if (fname == m21_fname_kx_subtract) { f = math21_device_f_subtract; } else if (fname == m21_fname_xk_subtract) { f = math21_device_f_xk_subtract; } else if (fname == m21_fname_kx_mul) { f = math21_device_f_multiply; } else if (fname == m21_fname_kx_divide) { f = math21_device_f_divide; } else if (fname == m21_fname_xk_divide) { f = math21_device_f_xk_divide; } else if (fname == m21_fname_kx_pow) { f = math21_device_f_kx_pow; } else if (fname == m21_fname_xk_pow) { f = math21_device_f_xk_pow; } else { MATH21_ASSERT(0, "not support calling function with name " << fname); } #pragma omp parallel for for (id = 1; id <= n; ++id) { math21_template_vector_f_kx_like_cpu_kernel(f, n, k, x, y, id); } } }
tree-ssa-loop-ivcanon.c
/* Induction variable canonicalization and loop peeling. Copyright (C) 2004-2020 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ /* This pass detects the loops that iterate a constant number of times, adds a canonical induction variable (step -1, tested against 0) and replaces the exit test. This enables the less powerful rtl level analysis to use this information. This might spoil the code in some cases (by increasing register pressure). Note that in the case the new variable is not needed, ivopts will get rid of it, so it might only be a problem when there are no other linear induction variables. In that case the created optimization possibilities are likely to pay up. We also perform - complete unrolling (or peeling) when the loops is rolling few enough times - simple peeling (i.e. copying few initial iterations prior the loop) when number of iteration estimate is known (typically by the profile info). */ #include "config.h" #include "system.h" #include "coretypes.h" #include "backend.h" #include "tree.h" #include "gimple.h" #include "cfghooks.h" #include "tree-pass.h" #include "ssa.h" #include "cgraph.h" #include "gimple-pretty-print.h" #include "fold-const.h" #include "profile.h" #include "gimple-fold.h" #include "tree-eh.h" #include "gimple-iterator.h" #include "tree-cfg.h" #include "tree-ssa-loop-manip.h" #include "tree-ssa-loop-niter.h" #include "tree-ssa-loop.h" #include "tree-into-ssa.h" #include "cfgloop.h" #include "tree-chrec.h" #include "tree-scalar-evolution.h" #include "tree-inline.h" #include "tree-cfgcleanup.h" #include "builtins.h" #include "tree-ssa-sccvn.h" #include "dbgcnt.h" /* Specifies types of loops that may be unrolled. */ enum unroll_level { UL_SINGLE_ITER, /* Only loops that exit immediately in the first iteration. */ UL_NO_GROWTH, /* Only loops whose unrolling will not cause increase of code size. */ UL_ALL /* All suitable loops. */ }; /* Adds a canonical induction variable to LOOP iterating NITER times. EXIT is the exit edge whose condition is replaced. The ssa versions of the new IV before and after increment will be stored in VAR_BEFORE and VAR_AFTER if they are not NULL. */ void create_canonical_iv (class loop *loop, edge exit, tree niter, tree *var_before = NULL, tree *var_after = NULL) { edge in; tree type, var; gcond *cond; gimple_stmt_iterator incr_at; enum tree_code cmp; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Added canonical iv to loop %d, ", loop->num); print_generic_expr (dump_file, niter, TDF_SLIM); fprintf (dump_file, " iterations.\n"); } cond = as_a <gcond *> (last_stmt (exit->src)); in = EDGE_SUCC (exit->src, 0); if (in == exit) in = EDGE_SUCC (exit->src, 1); /* Note that we do not need to worry about overflows, since type of niter is always unsigned and all comparisons are just for equality/nonequality -- i.e. everything works with a modulo arithmetics. */ type = TREE_TYPE (niter); niter = fold_build2 (PLUS_EXPR, type, niter, build_int_cst (type, 1)); incr_at = gsi_last_bb (in->src); create_iv (niter, build_int_cst (type, -1), NULL_TREE, loop, &incr_at, false, var_before, &var); if (var_after) *var_after = var; cmp = (exit->flags & EDGE_TRUE_VALUE) ? EQ_EXPR : NE_EXPR; gimple_cond_set_code (cond, cmp); gimple_cond_set_lhs (cond, var); gimple_cond_set_rhs (cond, build_int_cst (type, 0)); update_stmt (cond); } /* Describe size of loop as detected by tree_estimate_loop_size. */ struct loop_size { /* Number of instructions in the loop. */ int overall; /* Number of instructions that will be likely optimized out in peeled iterations of loop (i.e. computation based on induction variable where induction variable starts at known constant.) */ int eliminated_by_peeling; /* Same statistics for last iteration of loop: it is smaller because instructions after exit are not executed. */ int last_iteration; int last_iteration_eliminated_by_peeling; /* If some IV computation will become constant. */ bool constant_iv; /* Number of call stmts that are not a builtin and are pure or const present on the hot path. */ int num_pure_calls_on_hot_path; /* Number of call stmts that are not a builtin and are not pure nor const present on the hot path. */ int num_non_pure_calls_on_hot_path; /* Number of statements other than calls in the loop. */ int non_call_stmts_on_hot_path; /* Number of branches seen on the hot path. */ int num_branches_on_hot_path; }; /* Return true if OP in STMT will be constant after peeling LOOP. */ static bool constant_after_peeling (tree op, gimple *stmt, class loop *loop) { if (CONSTANT_CLASS_P (op)) return true; /* We can still fold accesses to constant arrays when index is known. */ if (TREE_CODE (op) != SSA_NAME) { tree base = op; /* First make fast look if we see constant array inside. */ while (handled_component_p (base)) base = TREE_OPERAND (base, 0); if ((DECL_P (base) && ctor_for_folding (base) != error_mark_node) || CONSTANT_CLASS_P (base)) { /* If so, see if we understand all the indices. */ base = op; while (handled_component_p (base)) { if (TREE_CODE (base) == ARRAY_REF && !constant_after_peeling (TREE_OPERAND (base, 1), stmt, loop)) return false; base = TREE_OPERAND (base, 0); } return true; } return false; } /* Induction variables are constants when defined in loop. */ if (loop_containing_stmt (stmt) != loop) return false; tree ev = analyze_scalar_evolution (loop, op); if (chrec_contains_undetermined (ev) || chrec_contains_symbols (ev)) return false; return true; } /* Computes an estimated number of insns in LOOP. EXIT (if non-NULL) is an exite edge that will be eliminated in all but last iteration of the loop. EDGE_TO_CANCEL (if non-NULL) is an non-exit edge eliminated in the last iteration of loop. Return results in SIZE, estimate benefits for complete unrolling exiting by EXIT. Stop estimating after UPPER_BOUND is met. Return true in this case. */ static bool tree_estimate_loop_size (class loop *loop, edge exit, edge edge_to_cancel, struct loop_size *size, int upper_bound) { basic_block *body = get_loop_body (loop); gimple_stmt_iterator gsi; unsigned int i; bool after_exit; vec<basic_block> path = get_loop_hot_path (loop); size->overall = 0; size->eliminated_by_peeling = 0; size->last_iteration = 0; size->last_iteration_eliminated_by_peeling = 0; size->num_pure_calls_on_hot_path = 0; size->num_non_pure_calls_on_hot_path = 0; size->non_call_stmts_on_hot_path = 0; size->num_branches_on_hot_path = 0; size->constant_iv = 0; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num); for (i = 0; i < loop->num_nodes; i++) { if (edge_to_cancel && body[i] != edge_to_cancel->src && dominated_by_p (CDI_DOMINATORS, body[i], edge_to_cancel->src)) after_exit = true; else after_exit = false; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index, after_exit); for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); int num = estimate_num_insns (stmt, &eni_size_weights); bool likely_eliminated = false; bool likely_eliminated_last = false; bool likely_eliminated_peeled = false; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " size: %3i ", num); print_gimple_stmt (dump_file, gsi_stmt (gsi), 0); } /* Look for reasons why we might optimize this stmt away. */ if (!gimple_has_side_effects (stmt)) { /* Exit conditional. */ if (exit && body[i] == exit->src && stmt == last_stmt (exit->src)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Exit condition will be eliminated " "in peeled copies.\n"); likely_eliminated_peeled = true; } if (edge_to_cancel && body[i] == edge_to_cancel->src && stmt == last_stmt (edge_to_cancel->src)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Exit condition will be eliminated " "in last copy.\n"); likely_eliminated_last = true; } /* Sets of IV variables */ if (gimple_code (stmt) == GIMPLE_ASSIGN && constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Induction variable computation will" " be folded away.\n"); likely_eliminated = true; } /* Assignments of IV variables. */ else if (gimple_code (stmt) == GIMPLE_ASSIGN && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME && constant_after_peeling (gimple_assign_rhs1 (stmt), stmt, loop) && (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS || constant_after_peeling (gimple_assign_rhs2 (stmt), stmt, loop)) && gimple_assign_rhs_class (stmt) != GIMPLE_TERNARY_RHS) { size->constant_iv = true; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Constant expression will be folded away.\n"); likely_eliminated = true; } /* Conditionals. */ else if ((gimple_code (stmt) == GIMPLE_COND && constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop) && constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop) /* We don't simplify all constant compares so make sure they are not both constant already. See PR70288. */ && (! is_gimple_min_invariant (gimple_cond_lhs (stmt)) || ! is_gimple_min_invariant (gimple_cond_rhs (stmt)))) || (gimple_code (stmt) == GIMPLE_SWITCH && constant_after_peeling (gimple_switch_index ( as_a <gswitch *> (stmt)), stmt, loop) && ! is_gimple_min_invariant (gimple_switch_index (as_a <gswitch *> (stmt))))) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Constant conditional.\n"); likely_eliminated = true; } } size->overall += num; if (likely_eliminated || likely_eliminated_peeled) size->eliminated_by_peeling += num; if (!after_exit) { size->last_iteration += num; if (likely_eliminated || likely_eliminated_last) size->last_iteration_eliminated_by_peeling += num; } if ((size->overall * 3 / 2 - size->eliminated_by_peeling - size->last_iteration_eliminated_by_peeling) > upper_bound) { free (body); path.release (); return true; } } } while (path.length ()) { basic_block bb = path.pop (); for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); if (gimple_code (stmt) == GIMPLE_CALL && !gimple_inexpensive_call_p (as_a <gcall *> (stmt))) { int flags = gimple_call_flags (stmt); if (flags & (ECF_PURE | ECF_CONST)) size->num_pure_calls_on_hot_path++; else size->num_non_pure_calls_on_hot_path++; size->num_branches_on_hot_path ++; } /* Count inexpensive calls as non-calls, because they will likely expand inline. */ else if (gimple_code (stmt) != GIMPLE_DEBUG) size->non_call_stmts_on_hot_path++; if (((gimple_code (stmt) == GIMPLE_COND && (!constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop) || !constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop))) || (gimple_code (stmt) == GIMPLE_SWITCH && !constant_after_peeling (gimple_switch_index ( as_a <gswitch *> (stmt)), stmt, loop))) && (!exit || bb != exit->src)) size->num_branches_on_hot_path++; } } path.release (); if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall, size->eliminated_by_peeling, size->last_iteration, size->last_iteration_eliminated_by_peeling); free (body); return false; } /* Estimate number of insns of completely unrolled loop. It is (NUNROLL + 1) * size of loop body with taking into account the fact that in last copy everything after exit conditional is dead and that some instructions will be eliminated after peeling. Loop body is likely going to simplify further, this is difficult to guess, we just decrease the result by 1/3. */ static unsigned HOST_WIDE_INT estimated_unrolled_size (struct loop_size *size, unsigned HOST_WIDE_INT nunroll) { HOST_WIDE_INT unr_insns = ((nunroll) * (HOST_WIDE_INT) (size->overall - size->eliminated_by_peeling)); if (!nunroll) unr_insns = 0; unr_insns += size->last_iteration - size->last_iteration_eliminated_by_peeling; unr_insns = unr_insns * 2 / 3; if (unr_insns <= 0) unr_insns = 1; return unr_insns; } /* Loop LOOP is known to not loop. See if there is an edge in the loop body that can be remove to make the loop to always exit and at the same time it does not make any code potentially executed during the last iteration dead. After complete unrolling we still may get rid of the conditional on the exit in the last copy even if we have no idea what it does. This is quite common case for loops of form int a[5]; for (i=0;i<b;i++) a[i]=0; Here we prove the loop to iterate 5 times but we do not know it from induction variable. For now we handle only simple case where there is exit condition just before the latch block and the latch block contains no statements with side effect that may otherwise terminate the execution of loop (such as by EH or by terminating the program or longjmp). In the general case we may want to cancel the paths leading to statements loop-niter identified as having undefined effect in the last iteration. The other cases are hopefully rare and will be cleaned up later. */ static edge loop_edge_to_cancel (class loop *loop) { unsigned i; edge edge_to_cancel; gimple_stmt_iterator gsi; /* We want only one predecestor of the loop. */ if (EDGE_COUNT (loop->latch->preds) > 1) return NULL; auto_vec<edge> exits = get_loop_exit_edges (loop); FOR_EACH_VEC_ELT (exits, i, edge_to_cancel) { /* Find the other edge than the loop exit leaving the conditoinal. */ if (EDGE_COUNT (edge_to_cancel->src->succs) != 2) continue; if (EDGE_SUCC (edge_to_cancel->src, 0) == edge_to_cancel) edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 1); else edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 0); /* We only can handle conditionals. */ if (!(edge_to_cancel->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE))) continue; /* We should never have conditionals in the loop latch. */ gcc_assert (edge_to_cancel->dest != loop->header); /* Check that it leads to loop latch. */ if (edge_to_cancel->dest != loop->latch) continue; /* Verify that the code in loop latch does nothing that may end program execution without really reaching the exit. This may include non-pure/const function calls, EH statements, volatile ASMs etc. */ for (gsi = gsi_start_bb (loop->latch); !gsi_end_p (gsi); gsi_next (&gsi)) if (gimple_has_side_effects (gsi_stmt (gsi))) return NULL; return edge_to_cancel; } return NULL; } /* Remove all tests for exits that are known to be taken after LOOP was peeled NPEELED times. Put gcc_unreachable before every statement known to not be executed. */ static bool remove_exits_and_undefined_stmts (class loop *loop, unsigned int npeeled) { class nb_iter_bound *elt; bool changed = false; for (elt = loop->bounds; elt; elt = elt->next) { /* If statement is known to be undefined after peeling, turn it into unreachable (or trap when debugging experience is supposed to be good). */ if (!elt->is_exit && wi::ltu_p (elt->bound, npeeled)) { gimple_stmt_iterator gsi = gsi_for_stmt (elt->stmt); gcall *stmt = gimple_build_call (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0); gimple_set_location (stmt, gimple_location (elt->stmt)); gsi_insert_before (&gsi, stmt, GSI_NEW_STMT); split_block (gimple_bb (stmt), stmt); changed = true; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Forced statement unreachable: "); print_gimple_stmt (dump_file, elt->stmt, 0); } } /* If we know the exit will be taken after peeling, update. */ else if (elt->is_exit && wi::leu_p (elt->bound, npeeled)) { basic_block bb = gimple_bb (elt->stmt); edge exit_edge = EDGE_SUCC (bb, 0); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Forced exit to be taken: "); print_gimple_stmt (dump_file, elt->stmt, 0); } if (!loop_exit_edge_p (loop, exit_edge)) exit_edge = EDGE_SUCC (bb, 1); exit_edge->probability = profile_probability::always (); gcc_checking_assert (loop_exit_edge_p (loop, exit_edge)); gcond *cond_stmt = as_a <gcond *> (elt->stmt); if (exit_edge->flags & EDGE_TRUE_VALUE) gimple_cond_make_true (cond_stmt); else gimple_cond_make_false (cond_stmt); update_stmt (cond_stmt); changed = true; } } return changed; } /* Remove all exits that are known to be never taken because of the loop bound discovered. */ static bool remove_redundant_iv_tests (class loop *loop) { class nb_iter_bound *elt; bool changed = false; if (!loop->any_upper_bound) return false; for (elt = loop->bounds; elt; elt = elt->next) { /* Exit is pointless if it won't be taken before loop reaches upper bound. */ if (elt->is_exit && loop->any_upper_bound && wi::ltu_p (loop->nb_iterations_upper_bound, elt->bound)) { basic_block bb = gimple_bb (elt->stmt); edge exit_edge = EDGE_SUCC (bb, 0); class tree_niter_desc niter; if (!loop_exit_edge_p (loop, exit_edge)) exit_edge = EDGE_SUCC (bb, 1); /* Only when we know the actual number of iterations, not just a bound, we can remove the exit. */ if (!number_of_iterations_exit (loop, exit_edge, &niter, false, false) || !integer_onep (niter.assumptions) || !integer_zerop (niter.may_be_zero) || !niter.niter || TREE_CODE (niter.niter) != INTEGER_CST || !wi::ltu_p (loop->nb_iterations_upper_bound, wi::to_widest (niter.niter))) continue; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Removed pointless exit: "); print_gimple_stmt (dump_file, elt->stmt, 0); } gcond *cond_stmt = as_a <gcond *> (elt->stmt); if (exit_edge->flags & EDGE_TRUE_VALUE) gimple_cond_make_false (cond_stmt); else gimple_cond_make_true (cond_stmt); update_stmt (cond_stmt); changed = true; } } return changed; } /* Stores loops that will be unlooped and edges that will be removed after we process whole loop tree. */ static vec<loop_p> loops_to_unloop; static vec<int> loops_to_unloop_nunroll; static vec<edge> edges_to_remove; /* Stores loops that has been peeled. */ static bitmap peeled_loops; /* Cancel all fully unrolled loops by putting __builtin_unreachable on the latch edge. We do it after all unrolling since unlooping moves basic blocks across loop boundaries trashing loop closed SSA form as well as SCEV info needed to be intact during unrolling. IRRED_INVALIDATED is used to bookkeep if information about irreducible regions may become invalid as a result of the transformation. LOOP_CLOSED_SSA_INVALIDATED is used to bookkepp the case when we need to go into loop closed SSA form. */ static void unloop_loops (bitmap loop_closed_ssa_invalidated, bool *irred_invalidated) { while (loops_to_unloop.length ()) { class loop *loop = loops_to_unloop.pop (); int n_unroll = loops_to_unloop_nunroll.pop (); basic_block latch = loop->latch; edge latch_edge = loop_latch_edge (loop); int flags = latch_edge->flags; location_t locus = latch_edge->goto_locus; gcall *stmt; gimple_stmt_iterator gsi; remove_exits_and_undefined_stmts (loop, n_unroll); /* Unloop destroys the latch edge. */ unloop (loop, irred_invalidated, loop_closed_ssa_invalidated); /* Create new basic block for the latch edge destination and wire it in. */ stmt = gimple_build_call (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0); latch_edge = make_edge (latch, create_basic_block (NULL, NULL, latch), flags); latch_edge->probability = profile_probability::never (); latch_edge->flags |= flags; latch_edge->goto_locus = locus; add_bb_to_loop (latch_edge->dest, current_loops->tree_root); latch_edge->dest->count = profile_count::zero (); set_immediate_dominator (CDI_DOMINATORS, latch_edge->dest, latch_edge->src); gsi = gsi_start_bb (latch_edge->dest); gsi_insert_after (&gsi, stmt, GSI_NEW_STMT); } loops_to_unloop.release (); loops_to_unloop_nunroll.release (); /* Remove edges in peeled copies. Given remove_path removes dominated regions we need to cope with removal of already removed paths. */ unsigned i; edge e; auto_vec<int, 20> src_bbs; src_bbs.reserve_exact (edges_to_remove.length ()); FOR_EACH_VEC_ELT (edges_to_remove, i, e) src_bbs.quick_push (e->src->index); FOR_EACH_VEC_ELT (edges_to_remove, i, e) if (BASIC_BLOCK_FOR_FN (cfun, src_bbs[i])) { bool ok = remove_path (e, irred_invalidated, loop_closed_ssa_invalidated); gcc_assert (ok); } edges_to_remove.release (); } /* Tries to unroll LOOP completely, i.e. NITER times. UL determines which loops we are allowed to unroll. EXIT is the exit of the loop that should be eliminated. MAXITER specfy bound on number of iterations, -1 if it is not known or too large for HOST_WIDE_INT. The location LOCUS corresponding to the loop is used when emitting a summary of the unroll to the dump file. */ static bool try_unroll_loop_completely (class loop *loop, edge exit, tree niter, bool may_be_zero, enum unroll_level ul, HOST_WIDE_INT maxiter, dump_user_location_t locus, bool allow_peel) { unsigned HOST_WIDE_INT n_unroll = 0; bool n_unroll_found = false; edge edge_to_cancel = NULL; /* See if we proved number of iterations to be low constant. EXIT is an edge that will be removed in all but last iteration of the loop. EDGE_TO_CACNEL is an edge that will be removed from the last iteration of the unrolled sequence and is expected to make the final loop not rolling. If the number of execution of loop is determined by standard induction variable test, then EXIT and EDGE_TO_CANCEL are the two edges leaving from the iv test. */ if (tree_fits_uhwi_p (niter)) { n_unroll = tree_to_uhwi (niter); n_unroll_found = true; edge_to_cancel = EDGE_SUCC (exit->src, 0); if (edge_to_cancel == exit) edge_to_cancel = EDGE_SUCC (exit->src, 1); } /* We do not know the number of iterations and thus we cannot eliminate the EXIT edge. */ else exit = NULL; /* See if we can improve our estimate by using recorded loop bounds. */ if ((allow_peel || maxiter == 0 || ul == UL_NO_GROWTH) && maxiter >= 0 && (!n_unroll_found || (unsigned HOST_WIDE_INT)maxiter < n_unroll)) { n_unroll = maxiter; n_unroll_found = true; /* Loop terminates before the IV variable test, so we cannot remove it in the last iteration. */ edge_to_cancel = NULL; } if (!n_unroll_found) return false; if (!loop->unroll && n_unroll > (unsigned) param_max_completely_peel_times) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d " "(--param max-completely-peel-times limit reached).\n", loop->num); return false; } if (!edge_to_cancel) edge_to_cancel = loop_edge_to_cancel (loop); if (n_unroll) { if (ul == UL_SINGLE_ITER) return false; if (loop->unroll) { /* If the unrolling factor is too large, bail out. */ if (n_unroll > (unsigned)loop->unroll) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: " "user didn't want it unrolled completely.\n", loop->num); return false; } } else { struct loop_size size; /* EXIT can be removed only if we are sure it passes first N_UNROLL iterations. */ bool remove_exit = (exit && niter && TREE_CODE (niter) == INTEGER_CST && wi::leu_p (n_unroll, wi::to_widest (niter))); bool large = tree_estimate_loop_size (loop, remove_exit ? exit : NULL, edge_to_cancel, &size, param_max_completely_peeled_insns); if (large) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: it is too large.\n", loop->num); return false; } unsigned HOST_WIDE_INT ninsns = size.overall; unsigned HOST_WIDE_INT unr_insns = estimated_unrolled_size (&size, n_unroll); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " Loop size: %d\n", (int) ninsns); fprintf (dump_file, " Estimated size after unrolling: %d\n", (int) unr_insns); } /* If the code is going to shrink, we don't need to be extra cautious on guessing if the unrolling is going to be profitable. */ if (unr_insns /* If there is IV variable that will become constant, we save one instruction in the loop prologue we do not account otherwise. */ <= ninsns + (size.constant_iv != false)) ; /* We unroll only inner loops, because we do not consider it profitable otheriwse. We still can cancel loopback edge of not rolling loop; this is always a good idea. */ else if (ul == UL_NO_GROWTH) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: size would grow.\n", loop->num); return false; } /* Outer loops tend to be less interesting candidates for complete unrolling unless we can do a lot of propagation into the inner loop body. For now we disable outer loop unrolling when the code would grow. */ else if (loop->inner) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: " "it is not innermost and code would grow.\n", loop->num); return false; } /* If there is call on a hot path through the loop, then there is most probably not much to optimize. */ else if (size.num_non_pure_calls_on_hot_path) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: " "contains call and code would grow.\n", loop->num); return false; } /* If there is pure/const call in the function, then we can still optimize the unrolled loop body if it contains some other interesting code than the calls and code storing or cumulating the return value. */ else if (size.num_pure_calls_on_hot_path /* One IV increment, one test, one ivtmp store and one useful stmt. That is about minimal loop doing pure call. */ && (size.non_call_stmts_on_hot_path <= 3 + size.num_pure_calls_on_hot_path)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: " "contains just pure calls and code would grow.\n", loop->num); return false; } /* Complete unrolling is major win when control flow is removed and one big basic block is created. If the loop contains control flow the optimization may still be a win because of eliminating the loop overhead but it also may blow the branch predictor tables. Limit number of branches on the hot path through the peeled sequence. */ else if (size.num_branches_on_hot_path * (int)n_unroll > param_max_peel_branches) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: " "number of branches on hot path in the unrolled " "sequence reaches --param max-peel-branches limit.\n", loop->num); return false; } else if (unr_insns > (unsigned) param_max_completely_peeled_insns) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Not unrolling loop %d: " "number of insns in the unrolled sequence reaches " "--param max-completely-peeled-insns limit.\n", loop->num); return false; } } if (!dbg_cnt (gimple_unroll)) return false; initialize_original_copy_tables (); auto_sbitmap wont_exit (n_unroll + 1); if (exit && niter && TREE_CODE (niter) == INTEGER_CST && wi::leu_p (n_unroll, wi::to_widest (niter))) { bitmap_ones (wont_exit); if (wi::eq_p (wi::to_widest (niter), n_unroll) || edge_to_cancel) bitmap_clear_bit (wont_exit, 0); } else { exit = NULL; bitmap_clear (wont_exit); } if (may_be_zero) bitmap_clear_bit (wont_exit, 1); if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), n_unroll, wont_exit, exit, &edges_to_remove, DLTHE_FLAG_UPDATE_FREQ | DLTHE_FLAG_COMPLETTE_PEEL)) { free_original_copy_tables (); if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Failed to duplicate the loop\n"); return false; } free_original_copy_tables (); } /* Remove the conditional from the last copy of the loop. */ if (edge_to_cancel) { gcond *cond = as_a <gcond *> (last_stmt (edge_to_cancel->src)); force_edge_cold (edge_to_cancel, true); if (edge_to_cancel->flags & EDGE_TRUE_VALUE) gimple_cond_make_false (cond); else gimple_cond_make_true (cond); update_stmt (cond); /* Do not remove the path, as doing so may remove outer loop and confuse bookkeeping code in tree_unroll_loops_completely. */ } /* Store the loop for later unlooping and exit removal. */ loops_to_unloop.safe_push (loop); loops_to_unloop_nunroll.safe_push (n_unroll); if (dump_enabled_p ()) { if (!n_unroll) dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus, "loop turned into non-loop; it never loops\n"); else { dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus, "loop with %d iterations completely unrolled", (int) n_unroll); if (loop->header->count.initialized_p ()) dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, " (header execution count %d)", (int)loop->header->count.to_gcov_type ()); dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, "\n"); } } if (dump_file && (dump_flags & TDF_DETAILS)) { if (exit) fprintf (dump_file, "Exit condition of peeled iterations was " "eliminated.\n"); if (edge_to_cancel) fprintf (dump_file, "Last iteration exit edge was proved true.\n"); else fprintf (dump_file, "Latch of last iteration was marked by " "__builtin_unreachable ().\n"); } return true; } /* Return number of instructions after peeling. */ static unsigned HOST_WIDE_INT estimated_peeled_sequence_size (struct loop_size *size, unsigned HOST_WIDE_INT npeel) { return MAX (npeel * (HOST_WIDE_INT) (size->overall - size->eliminated_by_peeling), 1); } /* If the loop is expected to iterate N times and is small enough, duplicate the loop body N+1 times before the loop itself. This way the hot path will never enter the loop. Parameters are the same as for try_unroll_loops_completely */ static bool try_peel_loop (class loop *loop, edge exit, tree niter, bool may_be_zero, HOST_WIDE_INT maxiter) { HOST_WIDE_INT npeel; struct loop_size size; int peeled_size; if (!flag_peel_loops || param_max_peel_times <= 0 || !peeled_loops) return false; if (bitmap_bit_p (peeled_loops, loop->num)) { if (dump_file) fprintf (dump_file, "Not peeling: loop is already peeled\n"); return false; } /* We don't peel loops that will be unrolled as this can duplicate a loop more times than the user requested. */ if (loop->unroll) { if (dump_file) fprintf (dump_file, "Not peeling: user didn't want it peeled.\n"); return false; } /* Peel only innermost loops. While the code is perfectly capable of peeling non-innermost loops, the heuristics would probably need some improvements. */ if (loop->inner) { if (dump_file) fprintf (dump_file, "Not peeling: outer loop\n"); return false; } if (!optimize_loop_for_speed_p (loop)) { if (dump_file) fprintf (dump_file, "Not peeling: cold loop\n"); return false; } /* Check if there is an estimate on the number of iterations. */ npeel = estimated_loop_iterations_int (loop); if (npeel < 0) npeel = likely_max_loop_iterations_int (loop); if (npeel < 0) { if (dump_file) fprintf (dump_file, "Not peeling: number of iterations is not " "estimated\n"); return false; } if (maxiter >= 0 && maxiter <= npeel) { if (dump_file) fprintf (dump_file, "Not peeling: upper bound is known so can " "unroll completely\n"); return false; } /* We want to peel estimated number of iterations + 1 (so we never enter the loop on quick path). Check against PARAM_MAX_PEEL_TIMES and be sure to avoid overflows. */ if (npeel > param_max_peel_times - 1) { if (dump_file) fprintf (dump_file, "Not peeling: rolls too much " "(%i + 1 > --param max-peel-times)\n", (int) npeel); return false; } npeel++; /* Check peeled loops size. */ tree_estimate_loop_size (loop, exit, NULL, &size, param_max_peeled_insns); if ((peeled_size = estimated_peeled_sequence_size (&size, (int) npeel)) > param_max_peeled_insns) { if (dump_file) fprintf (dump_file, "Not peeling: peeled sequence size is too large " "(%i insns > --param max-peel-insns)", peeled_size); return false; } if (!dbg_cnt (gimple_unroll)) return false; /* Duplicate possibly eliminating the exits. */ initialize_original_copy_tables (); auto_sbitmap wont_exit (npeel + 1); if (exit && niter && TREE_CODE (niter) == INTEGER_CST && wi::leu_p (npeel, wi::to_widest (niter))) { bitmap_ones (wont_exit); bitmap_clear_bit (wont_exit, 0); } else { exit = NULL; bitmap_clear (wont_exit); } if (may_be_zero) bitmap_clear_bit (wont_exit, 1); if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), npeel, wont_exit, exit, &edges_to_remove, DLTHE_FLAG_UPDATE_FREQ)) { free_original_copy_tables (); return false; } free_original_copy_tables (); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Peeled loop %d, %i times.\n", loop->num, (int) npeel); } if (loop->any_estimate) { if (wi::ltu_p (npeel, loop->nb_iterations_estimate)) loop->nb_iterations_estimate -= npeel; else loop->nb_iterations_estimate = 0; } if (loop->any_upper_bound) { if (wi::ltu_p (npeel, loop->nb_iterations_upper_bound)) loop->nb_iterations_upper_bound -= npeel; else loop->nb_iterations_upper_bound = 0; } if (loop->any_likely_upper_bound) { if (wi::ltu_p (npeel, loop->nb_iterations_likely_upper_bound)) loop->nb_iterations_likely_upper_bound -= npeel; else { loop->any_estimate = true; loop->nb_iterations_estimate = 0; loop->nb_iterations_likely_upper_bound = 0; } } profile_count entry_count = profile_count::zero (); edge e; edge_iterator ei; FOR_EACH_EDGE (e, ei, loop->header->preds) if (e->src != loop->latch) { if (e->src->count.initialized_p ()) entry_count += e->src->count; gcc_assert (!flow_bb_inside_loop_p (loop, e->src)); } profile_probability p; p = entry_count.probability_in (loop->header->count); scale_loop_profile (loop, p, 0); bitmap_set_bit (peeled_loops, loop->num); return true; } /* Adds a canonical induction variable to LOOP if suitable. CREATE_IV is true if we may create a new iv. UL determines which loops we are allowed to completely unroll. If TRY_EVAL is true, we try to determine the number of iterations of a loop by direct evaluation. Returns true if cfg is changed. */ static bool canonicalize_loop_induction_variables (class loop *loop, bool create_iv, enum unroll_level ul, bool try_eval, bool allow_peel) { edge exit = NULL; tree niter; HOST_WIDE_INT maxiter; bool modified = false; dump_user_location_t locus; class tree_niter_desc niter_desc; bool may_be_zero = false; /* For unrolling allow conditional constant or zero iterations, thus perform loop-header copying on-the-fly. */ exit = single_exit (loop); niter = chrec_dont_know; if (exit && number_of_iterations_exit (loop, exit, &niter_desc, false)) { niter = niter_desc.niter; may_be_zero = niter_desc.may_be_zero && !integer_zerop (niter_desc.may_be_zero); } if (TREE_CODE (niter) == INTEGER_CST) locus = last_stmt (exit->src); else { /* For non-constant niter fold may_be_zero into niter again. */ if (may_be_zero) { if (COMPARISON_CLASS_P (niter_desc.may_be_zero)) niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), niter_desc.may_be_zero, build_int_cst (TREE_TYPE (niter), 0), niter); else niter = chrec_dont_know; may_be_zero = false; } /* If the loop has more than one exit, try checking all of them for # of iterations determinable through scev. */ if (!exit) niter = find_loop_niter (loop, &exit); /* Finally if everything else fails, try brute force evaluation. */ if (try_eval && (chrec_contains_undetermined (niter) || TREE_CODE (niter) != INTEGER_CST)) niter = find_loop_niter_by_eval (loop, &exit); if (exit) locus = last_stmt (exit->src); if (TREE_CODE (niter) != INTEGER_CST) exit = NULL; } /* We work exceptionally hard here to estimate the bound by find_loop_niter_by_eval. Be sure to keep it for future. */ if (niter && TREE_CODE (niter) == INTEGER_CST) { auto_vec<edge> exits = get_loop_exit_edges (loop); record_niter_bound (loop, wi::to_widest (niter), exit == single_likely_exit (loop, exits), true); } /* Force re-computation of loop bounds so we can remove redundant exits. */ maxiter = max_loop_iterations_int (loop); if (dump_file && (dump_flags & TDF_DETAILS) && TREE_CODE (niter) == INTEGER_CST) { fprintf (dump_file, "Loop %d iterates ", loop->num); print_generic_expr (dump_file, niter, TDF_SLIM); fprintf (dump_file, " times.\n"); } if (dump_file && (dump_flags & TDF_DETAILS) && maxiter >= 0) { fprintf (dump_file, "Loop %d iterates at most %i times.\n", loop->num, (int)maxiter); } if (dump_file && (dump_flags & TDF_DETAILS) && likely_max_loop_iterations_int (loop) >= 0) { fprintf (dump_file, "Loop %d likely iterates at most %i times.\n", loop->num, (int)likely_max_loop_iterations_int (loop)); } /* Remove exits that are known to be never taken based on loop bound. Needs to be called after compilation of max_loop_iterations_int that populates the loop bounds. */ modified |= remove_redundant_iv_tests (loop); if (try_unroll_loop_completely (loop, exit, niter, may_be_zero, ul, maxiter, locus, allow_peel)) return true; if (create_iv && niter && !chrec_contains_undetermined (niter) && exit && just_once_each_iteration_p (loop, exit->src)) { tree iv_niter = niter; if (may_be_zero) { if (COMPARISON_CLASS_P (niter_desc.may_be_zero)) iv_niter = fold_build3 (COND_EXPR, TREE_TYPE (iv_niter), niter_desc.may_be_zero, build_int_cst (TREE_TYPE (iv_niter), 0), iv_niter); else iv_niter = NULL_TREE; } if (iv_niter) create_canonical_iv (loop, exit, iv_niter); } if (ul == UL_ALL) modified |= try_peel_loop (loop, exit, niter, may_be_zero, maxiter); return modified; } /* The main entry point of the pass. Adds canonical induction variables to the suitable loops. */ unsigned int canonicalize_induction_variables (void) { class loop *loop; bool changed = false; bool irred_invalidated = false; bitmap loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL); estimate_numbers_of_iterations (cfun); FOR_EACH_LOOP (loop, LI_FROM_INNERMOST) { changed |= canonicalize_loop_induction_variables (loop, true, UL_SINGLE_ITER, true, false); } gcc_assert (!need_ssa_update_p (cfun)); unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated); if (irred_invalidated && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)) mark_irreducible_loops (); /* Clean up the information about numbers of iterations, since brute force evaluation could reveal new information. */ free_numbers_of_iterations_estimates (cfun); scev_reset (); if (!bitmap_empty_p (loop_closed_ssa_invalidated)) { gcc_checking_assert (loops_state_satisfies_p (LOOP_CLOSED_SSA)); rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa); } BITMAP_FREE (loop_closed_ssa_invalidated); if (changed) return TODO_cleanup_cfg; return 0; } /* Process loops from innermost to outer, stopping at the innermost loop we unrolled. */ static bool tree_unroll_loops_completely_1 (bool may_increase_size, bool unroll_outer, bitmap father_bbs, class loop *loop) { class loop *loop_father; bool changed = false; class loop *inner; enum unroll_level ul; unsigned num = number_of_loops (cfun); /* Process inner loops first. Don't walk loops added by the recursive calls because SSA form is not up-to-date. They can be handled in the next iteration. */ bitmap child_father_bbs = NULL; for (inner = loop->inner; inner != NULL; inner = inner->next) if ((unsigned) inner->num < num) { if (!child_father_bbs) child_father_bbs = BITMAP_ALLOC (NULL); if (tree_unroll_loops_completely_1 (may_increase_size, unroll_outer, child_father_bbs, inner)) { bitmap_ior_into (father_bbs, child_father_bbs); bitmap_clear (child_father_bbs); changed = true; } } if (child_father_bbs) BITMAP_FREE (child_father_bbs); /* If we changed an inner loop we cannot process outer loops in this iteration because SSA form is not up-to-date. Continue with siblings of outer loops instead. */ if (changed) { /* If we are recorded as father clear all other fathers that are necessarily covered already to avoid redundant work. */ if (bitmap_bit_p (father_bbs, loop->header->index)) { bitmap_clear (father_bbs); bitmap_set_bit (father_bbs, loop->header->index); } return true; } /* Don't unroll #pragma omp simd loops until the vectorizer attempts to vectorize those. */ if (loop->force_vectorize) return false; /* Try to unroll this loop. */ loop_father = loop_outer (loop); if (!loop_father) return false; if (loop->unroll > 1) ul = UL_ALL; else if (may_increase_size && optimize_loop_nest_for_speed_p (loop) /* Unroll outermost loops only if asked to do so or they do not cause code growth. */ && (unroll_outer || loop_outer (loop_father))) ul = UL_ALL; else ul = UL_NO_GROWTH; if (canonicalize_loop_induction_variables (loop, false, ul, !flag_tree_loop_ivcanon, unroll_outer)) { /* If we'll continue unrolling, we need to propagate constants within the new basic blocks to fold away induction variable computations; otherwise, the size might blow up before the iteration is complete and the IR eventually cleaned up. */ if (loop_outer (loop_father)) { /* Once we process our father we will have processed the fathers of our children as well, so avoid doing redundant work and clear fathers we've gathered sofar. */ bitmap_clear (father_bbs); bitmap_set_bit (father_bbs, loop_father->header->index); } return true; } return false; } /* Unroll LOOPS completely if they iterate just few times. Unless MAY_INCREASE_SIZE is true, perform the unrolling only if the size of the code does not increase. */ static unsigned int tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer) { bitmap father_bbs = BITMAP_ALLOC (NULL); bool changed; int iteration = 0; bool irred_invalidated = false; estimate_numbers_of_iterations (cfun); do { changed = false; bitmap loop_closed_ssa_invalidated = NULL; if (loops_state_satisfies_p (LOOP_CLOSED_SSA)) loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL); free_numbers_of_iterations_estimates (cfun); estimate_numbers_of_iterations (cfun); changed = tree_unroll_loops_completely_1 (may_increase_size, unroll_outer, father_bbs, current_loops->tree_root); if (changed) { unsigned i; unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated); /* We cannot use TODO_update_ssa_no_phi because VOPS gets confused. */ if (loop_closed_ssa_invalidated && !bitmap_empty_p (loop_closed_ssa_invalidated)) rewrite_into_loop_closed_ssa (loop_closed_ssa_invalidated, TODO_update_ssa); else update_ssa (TODO_update_ssa); /* father_bbs is a bitmap of loop father header BB indices. Translate that to what non-root loops these BBs belong to now. */ bitmap_iterator bi; bitmap fathers = BITMAP_ALLOC (NULL); EXECUTE_IF_SET_IN_BITMAP (father_bbs, 0, i, bi) { basic_block unrolled_loop_bb = BASIC_BLOCK_FOR_FN (cfun, i); if (! unrolled_loop_bb) continue; if (loop_outer (unrolled_loop_bb->loop_father)) bitmap_set_bit (fathers, unrolled_loop_bb->loop_father->num); } bitmap_clear (father_bbs); /* Propagate the constants within the new basic blocks. */ EXECUTE_IF_SET_IN_BITMAP (fathers, 0, i, bi) { loop_p father = get_loop (cfun, i); bitmap exit_bbs = BITMAP_ALLOC (NULL); loop_exit *exit = father->exits->next; while (exit->e) { bitmap_set_bit (exit_bbs, exit->e->dest->index); exit = exit->next; } do_rpo_vn (cfun, loop_preheader_edge (father), exit_bbs); } BITMAP_FREE (fathers); /* This will take care of removing completely unrolled loops from the loop structures so we can continue unrolling now innermost loops. */ if (cleanup_tree_cfg ()) update_ssa (TODO_update_ssa_only_virtuals); /* Clean up the information about numbers of iterations, since complete unrolling might have invalidated it. */ scev_reset (); if (flag_checking && loops_state_satisfies_p (LOOP_CLOSED_SSA)) verify_loop_closed_ssa (true); } if (loop_closed_ssa_invalidated) BITMAP_FREE (loop_closed_ssa_invalidated); } while (changed && ++iteration <= param_max_unroll_iterations); BITMAP_FREE (father_bbs); if (irred_invalidated && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)) mark_irreducible_loops (); return 0; } /* Canonical induction variable creation pass. */ namespace { const pass_data pass_data_iv_canon = { GIMPLE_PASS, /* type */ "ivcanon", /* name */ OPTGROUP_LOOP, /* optinfo_flags */ TV_TREE_LOOP_IVCANON, /* tv_id */ ( PROP_cfg | PROP_ssa ), /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ }; class pass_iv_canon : public gimple_opt_pass { public: pass_iv_canon (gcc::context *ctxt) : gimple_opt_pass (pass_data_iv_canon, ctxt) {} /* opt_pass methods: */ virtual bool gate (function *) { return flag_tree_loop_ivcanon != 0; } virtual unsigned int execute (function *fun); }; // class pass_iv_canon unsigned int pass_iv_canon::execute (function *fun) { if (number_of_loops (fun) <= 1) return 0; return canonicalize_induction_variables (); } } // anon namespace gimple_opt_pass * make_pass_iv_canon (gcc::context *ctxt) { return new pass_iv_canon (ctxt); } /* Complete unrolling of loops. */ namespace { const pass_data pass_data_complete_unroll = { GIMPLE_PASS, /* type */ "cunroll", /* name */ OPTGROUP_LOOP, /* optinfo_flags */ TV_COMPLETE_UNROLL, /* tv_id */ ( PROP_cfg | PROP_ssa ), /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ }; class pass_complete_unroll : public gimple_opt_pass { public: pass_complete_unroll (gcc::context *ctxt) : gimple_opt_pass (pass_data_complete_unroll, ctxt) {} /* opt_pass methods: */ virtual unsigned int execute (function *); }; // class pass_complete_unroll unsigned int pass_complete_unroll::execute (function *fun) { if (number_of_loops (fun) <= 1) return 0; /* If we ever decide to run loop peeling more than once, we will need to track loops already peeled in loop structures themselves to avoid re-peeling the same loop multiple times. */ if (flag_peel_loops) peeled_loops = BITMAP_ALLOC (NULL); unsigned int val = tree_unroll_loops_completely (flag_cunroll_grow_size, true); if (peeled_loops) { BITMAP_FREE (peeled_loops); peeled_loops = NULL; } return val; } } // anon namespace gimple_opt_pass * make_pass_complete_unroll (gcc::context *ctxt) { return new pass_complete_unroll (ctxt); } /* Complete unrolling of inner loops. */ namespace { const pass_data pass_data_complete_unrolli = { GIMPLE_PASS, /* type */ "cunrolli", /* name */ OPTGROUP_LOOP, /* optinfo_flags */ TV_COMPLETE_UNROLL, /* tv_id */ ( PROP_cfg | PROP_ssa ), /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ }; class pass_complete_unrolli : public gimple_opt_pass { public: pass_complete_unrolli (gcc::context *ctxt) : gimple_opt_pass (pass_data_complete_unrolli, ctxt) {} /* opt_pass methods: */ virtual bool gate (function *) { return optimize >= 2; } virtual unsigned int execute (function *); }; // class pass_complete_unrolli unsigned int pass_complete_unrolli::execute (function *fun) { unsigned ret = 0; loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS); if (number_of_loops (fun) > 1) { scev_initialize (); ret = tree_unroll_loops_completely (optimize >= 3, false); scev_finalize (); } loop_optimizer_finalize (); return ret; } } // anon namespace gimple_opt_pass * make_pass_complete_unrolli (gcc::context *ctxt) { return new pass_complete_unrolli (ctxt); }
sampler.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <math.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include <gsl/gsl_sort.h> #include <gsl/gsl_math.h> // gsl_finite #ifdef _OPENMP #include <omp.h> #endif const size_t N=10000; const size_t T=1000; const size_t K=20; const size_t L=2; const double alpha_k = 0.5; /* * 0: thompson sampling * 1: uniform subset * */ const unsigned int strategy = 0; #include "helpers.h" #include "model.h" #include "set_counter.h" #define to_string(arr, k) \ for (size_t i = 0; i < k-1; i++) { \ printf("%.16lf,", arr[i]); \ } \ printf("%.16lf", arr[k-1]); unsigned int move_gibbs(double *restrict random_numbers, double logth[K], size_t ngames, const size_t games[ngames][L], const size_t game_counts[ngames], const size_t win_counts[K]) { double ll, ll_p; double alpha; double logth_comp_old; double logth_Km1_old; unsigned int accepted = 0; for (size_t comp=0; comp<K-1; comp++) { assert(gsl_fcmp(log_sum_exp(logth, K), 1.0, 1e-15) == 0); ll = fullcond(comp, logth, ngames, games, game_counts, win_counts); /* sample a suitable value for the current component */ logth_comp_old = logth[comp]; logth_Km1_old = logth[K-1]; if (logth_comp_old > logth[K-1]) { logth[comp] = log(*random_numbers++) + logth_comp_old + log1p(exp(logth[K-1] - logth_comp_old)); logth[K-1] = logth_comp_old + log1p(exp(logth_Km1_old - logth_comp_old) - exp(logth[comp] - logth_comp_old)); } else { logth[comp] = log(*random_numbers++) + logth_Km1_old + log1p(exp(logth_comp_old - logth_Km1_old)); logth[K-1] = logth_Km1_old + log1p(exp(logth_comp_old - logth_Km1_old) - exp(logth[comp] - logth_Km1_old)); } /* compute full conditional density at th_p */ ll_p = fullcond(comp, logth, ngames, games, game_counts, win_counts); alpha = *random_numbers++; if (log(alpha) < ll_p - ll) { /* accept */ accepted = 1; } else { /* reject */ /* reset the proposed component back to its original value */ /* th[K-1] += th[comp] - th_comp_old; */ /* th[comp] = th_comp_old; */ /* assert(th[K-1] >= 0); */ logth[comp] = logth_comp_old; logth[K-1] = logth_Km1_old; } } return accepted; } void resample_move(const gsl_rng *r, double logtheta[N][K], const double w[N], const struct set_counter *games_counter, const size_t wins[K]) { unsigned int cnt[N]; size_t accepted = 0; gsl_ran_multinomial(r, N, N, w, cnt); /* populate particles */ double (*logtheta_new)[K] = malloc(N * sizeof *logtheta_new); size_t n_new = 0; for (size_t n=0; n<N; n++) { for (size_t i=0; i < cnt[n]; i++) { memcpy(logtheta_new[n_new++], logtheta[n], sizeof *logtheta_new); } } /* pre-generate random numbers to avoid thread synchronization */ double (*random_numbers)[2*(K-1)] = malloc(N * sizeof *random_numbers); for (size_t n=0; n<N; n++) { for (size_t k=0; k<2*(K-1); k++) { random_numbers[n][k] = gsl_rng_uniform_pos(r); } } /* extract game counts from the counter */ size_t ngames = games_counter->size; size_t (*games)[L] = malloc(ngames * sizeof *games); size_t *game_counts = malloc(ngames * sizeof *game_counts); set_counter_keys(games_counter, games); set_counter_values(games_counter, game_counts); #pragma omp parallel for reduction(+:accepted) for (size_t n=0; n<N; n++) { accepted += move_gibbs(random_numbers[n], logtheta_new[n], ngames, games, game_counts, wins); } printf("# to_move = %zu\n", N); printf("# accepted = %zu\n", accepted); printf("# acceptance ratio = %lf\n", (double) accepted / N); memcpy(logtheta, logtheta_new, N*K*sizeof(double)); free(logtheta_new); free(random_numbers); free(games); free(game_counts); } void sample_theta_star(const gsl_rng *r, double theta_star[K]) { double a[K]; for (size_t k=0; k<K; k++) { a[k] = alpha_k; } gsl_ran_dirichlet(r, K, a, theta_star); } void read_theta_star(const char *file_name, double theta_star[K]) { char buf[80]; FILE *ts = fopen(file_name, "r"); if (!ts) { fprintf(stderr, "error reading %s\n", file_name); exit(EXIT_FAILURE); } for (size_t k = 0; k<K; k++) { if (!fgets(buf, 80, ts)) { fprintf(stderr, "error reading %s\n", file_name); exit(EXIT_FAILURE); } theta_star[k] = atof(buf); }; fclose(ts); } void sim(const gsl_rng *r, const double theta_star[K]) { double (*logtheta)[K] = malloc(N * sizeof *logtheta); double *w = malloc(N * sizeof(double)); double *logw = malloc(N * sizeof(double)); ones(w, N); zeros(logw, N); size_t *wins = calloc(K, sizeof *wins); struct set_counter *games_counter = set_counter_alloc(); /* general info */ printf("# generator type: %s\n", gsl_rng_name(r)); printf("# seed = %lu\n", gsl_rng_default_seed); printf("\n"); /* sample N particles from the `uniform` prior */ { double alpha[K]; ones(alpha, K); double theta[K]; for (size_t n = 0; n < N; n++) { gsl_ran_dirichlet(r, K, alpha, theta); #pragma omp simd for (size_t k=0; k<K; k++) logtheta[n][k] = log(theta[k]); } } for(size_t t = 0; t < T; t++) { fprintf(stderr, "t = %zu\r", t); /* for progress monitoring */ printf("# iteration = %zu\n", t); size_t players[L]; if (strategy == 0) { /* presentation strategy: thompson sampling */ printf("# strategy: thompson\n"); /* sample a theta from the current posterior */ gsl_ran_discrete_t *g = gsl_ran_discrete_preproc(N, w); size_t theta_sample_idx = gsl_ran_discrete(r, g); gsl_ran_discrete_free(g); printf("# sampled theta: "); to_string(logtheta[theta_sample_idx], K); printf("\n"); /* pick L elements from current sample */ gsl_sort_largest_index(players, L, logtheta[theta_sample_idx], 1, K); } else if (strategy == 1) { /* presentation strategy: uniform subset */ printf("# strategy: uniform subset\n"); size_t idx[K]; for (size_t k=0; k<K; k++) idx[k] = k; gsl_ran_choose(r, players, L, idx, K, sizeof(size_t)); } set_counter_add(games_counter, players); printf("# number of unique subsets so far: %zu\n", games_counter->size); double player_w[L]; printf("# game: "); for (size_t l=0; l<L-1; l++) { printf("%zu,", players[l]); player_w[l] = theta_star[players[l]]; } printf("%zu\n", players[L-1]); player_w[L-1] = theta_star[players[L-1]]; printf("# player weights = "); to_string(player_w, L); printf("\n"); /* determine outcome using theta_star */ size_t winner; { gsl_ran_discrete_t *g = gsl_ran_discrete_preproc(L, player_w); size_t wn = gsl_ran_discrete(r, g); winner = players[wn]; gsl_ran_discrete_free(g); } printf("# winner: %zu\n", winner); wins[winner]++; /* update weights */ #pragma omp parallel for for(size_t n = 0; n < N; n++) { double logtheta_winner = logtheta[n][winner]; double lth_game[L]; for (size_t l=0; l<L; l++) { lth_game[l] = logtheta[n][players[l]]; } logw[n] += logtheta_winner - log_sum_exp(lth_game, L); } /* compute w from logw */ { double lse = log_sum_exp(logw, N); for (size_t n=0; n<N; n++) { w[n] = exp(logw[n] - lse); assert(gsl_finite(w[n]) == 1); } } /* compute ess and perform resampling if necessary */ { double two_logw[N]; for (size_t n=0; n<N; n++) two_logw[n] = 2*logw[n]; double ess = exp(2*log_sum_exp(logw, N) - log_sum_exp(two_logw, N)); printf("# ess = %lf\n", ess); if (ess < .5*N) { printf("# resampling at iteration %zu\n", t); resample_move(r, logtheta, w, games_counter, wins); ones(w, N); zeros(logw, N); } } printf("\n"); } fprintf(stderr, "\n"); /* resample at the end */ printf("# resampling at iteration %zu\n", T); resample_move(r, logtheta, w, games_counter, wins); /* no need to reset the weights at this point but just to be safe... */ ones(w, N); zeros(logw, N); for(size_t n = 0; n < N; n++) { to_string(logtheta[n], K); printf("\n"); } /* cleanup */ free(logtheta); free(w); free(logw); free(wins); set_counter_free(games_counter); } int main(int argc, char *argv[]) { const gsl_rng_type *t; gsl_rng *r; gsl_rng_env_setup(); t = gsl_rng_default; r = gsl_rng_alloc(t); gsl_set_error_handler_off(); double *theta_star = malloc(K * sizeof(double)); /* read theta_star from a file */ if (argc==2) read_theta_star(argv[1], theta_star); else sample_theta_star(r, theta_star); printf("# K = %zu\n", K); printf("# N = %zu\n", N); printf("# T = %zu\n", T); printf("# L = %zu\n", L); printf("# theta_star = "); to_string(theta_star, K); printf("\n"); // perform simulation sim(r, theta_star); // cleanup free(theta_star); gsl_rng_free(r); exit(EXIT_SUCCESS); }
GB_unop__round_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__round_fp64_fp64) // op(A') function: GB (_unop_tran__round_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = round (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = round (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = round (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ROUND || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__round_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = round (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = round (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__round_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pq_filter.h
/** * Skyline filter based on a priority queue */ #ifndef PQ_FILTER_H_ #define PQ_FILTER_H_ #include <queue> #include <vector> #include <math.h> #if defined(_OPENMP) #include <omp.h> //#include <parallel/algorithm> #else #include <algorithm> #define omp_get_thread_num() 0 #define omp_set_num_threads( t ) 0 #endif #include "common2.h" #include "SubTuple.h" #define DEFAULT_QP_SIZE 128 using namespace std; typedef std::pair<uint32_t, float> mn_w_idx; struct PQComparator { bool operator()( const mn_w_idx &a, const mn_w_idx &b ) { return a.second < b.second; } }; typedef priority_queue<mn_w_idx, vector<mn_w_idx>, PQComparator> PQ; class PQFilter { public: /** * Computes Manhattan norm in STuple.score, maintaining the pq_size * lowest Manhattan norm scores with a parallel reduction. Then conducts * explicit dominance tests between every point and the pq_size chosen * "lowest" points to see if any points can be trivially pruned this way. */ template<typename T, int DIMS> static uint32_t Execute( T* data, const uint32_t n, const uint32_t pq_size, const uint32_t num_threads ) { /* Create one priority queue for each thread */ PQ * const PQs_ = new PQ[num_threads]; /* Populate each priority queue with the same first pq_size * points and score them. */ for (uint32_t i = 0; i < pq_size; ++i) { data[i].score = 0; for (uint32_t j = 0; j < DIMS; ++j) { data[i].score += data[i].elems[j]; } for (uint32_t j = 0; j < num_threads; ++j) { PQs_[j].push( mn_w_idx( i, data[i].score ) ); } } /* Compute lowest Manhattan norm scores and remember best q_size ones. */ #pragma omp parallel num_threads(num_threads) { const uint32_t th_id = omp_get_thread_num(); mn_w_idx worst_of_bests = PQs_[th_id].top(); #pragma omp for nowait for (uint32_t i = 0; i < n; ++i) { /* Compute Manhattan norm. */ float sum = 0; for (uint32_t j = 0; j < DIMS; j++) { sum += data[i].elems[j]; } data[i].score = sum; /* Update priority queue if this score is good enough */ if ( worst_of_bests.second > sum ) { PQs_[th_id].pop(); PQs_[th_id].push( mn_w_idx( i, sum ) ); worst_of_bests = PQs_[th_id].top(); } } } // END PARALLEL BLOCK /* Reduce the priority queues into a global best. */ mn_w_idx worst_of_bests = PQs_[ 0 ].top(); for ( uint32_t i = 1; i < num_threads; ++i ) { while ( !PQs_[i].empty() ) { mn_w_idx top = PQs_[i].top(); if ( worst_of_bests.second > top.second ) { PQs_[ 0 ].pop(); PQs_[ 0 ].push( mn_w_idx( top.first, top.second ) ); worst_of_bests = PQs_[ 0 ].top(); } PQs_[i].pop(); } } /* Copy the priority queue into an iteratable vector */ vector<uint32_t> pruners( pq_size, 0 ); for( uint32_t i = 0; i < pq_size && !PQs_[ 0 ].empty(); ++i ) { pruners[ pq_size - i - 1 ] = PQs_[ 0 ].top().first; PQs_[ 0 ].pop(); } // UPD_PROFILER( "01 calc mns" ); /* Pre-filter dataset using top pruners. */ #pragma omp parallel for num_threads(num_threads) for (uint32_t i = 0; i < n; ++i) { for ( auto it = pruners.begin(); it != pruners.end(); ++it ) { if ( DominateLeft( data[*it], data[i] ) ) { data[i].markPruned(); break; } } } // END PARALLEL FOR /* Determine how many points were pruned. */ uint32_t new_n = n; for (uint32_t i = 0; i < new_n; ++i) { if ( data[i].isPruned() ) { data[i--] = data[--new_n]; } } #ifdef NVERBOSE printf( " pq_filter: %0.2f %% pruned\n", (n - new_n) / (double) n * 100.0 ); #endif return new_n; } template<typename T, int DIMS> static uint32_t ExecuteExtended( T* data, const uint32_t n, const uint32_t pq_size, const uint32_t num_threads ) { /* Create one priority queue for each thread */ PQ * const PQs_ = new PQ[num_threads]; /* Populate each priority queue with the same first pq_size * points and score them. */ for (uint32_t i = 0; i < pq_size; ++i) { data[i].score = 0; for (uint32_t j = 0; j < DIMS; ++j) { data[i].score += data[i].elems[j]; } for (uint32_t j = 0; j < num_threads; ++j) { PQs_[j].push( mn_w_idx( i, data[i].score ) ); } } /* Compute lowest Manhattan norm scores and remember best q_size ones. */ #pragma omp parallel num_threads(num_threads) { const uint32_t th_id = omp_get_thread_num(); mn_w_idx worst_of_bests = PQs_[th_id].top(); #pragma omp for nowait for (uint32_t i = 0; i < n; ++i) { /* Compute Manhattan norm. */ float sum = 0; for (uint32_t j = 0; j < DIMS; j++) { sum += data[i].elems[j]; } data[i].score = sum; /* Update priority queue if this score is good enough */ if ( worst_of_bests.second > sum ) { PQs_[th_id].pop(); PQs_[th_id].push( mn_w_idx( i, sum ) ); worst_of_bests = PQs_[th_id].top(); } } } // END PARALLEL BLOCK /* Reduce the priority queues into a global best. */ mn_w_idx worst_of_bests = PQs_[ 0 ].top(); for ( uint32_t i = 1; i < num_threads; ++i ) { while ( !PQs_[i].empty() ) { mn_w_idx top = PQs_[i].top(); if ( worst_of_bests.second > top.second ) { PQs_[ 0 ].pop(); PQs_[ 0 ].push( mn_w_idx( top.first, top.second ) ); worst_of_bests = PQs_[ 0 ].top(); } PQs_[i].pop(); } } /* Copy the priority queue into an iteratable vector */ vector<uint32_t> pruners( pq_size, 0 ); for( uint32_t i = 0; i < pq_size && !PQs_[ 0 ].empty(); ++i ) { pruners[ pq_size - i - 1 ] = PQs_[ 0 ].top().first; PQs_[ 0 ].pop(); } // UPD_PROFILER( "01 calc mns" ); /* Pre-filter dataset using top pruners. */ #pragma omp parallel for num_threads(num_threads) for (uint32_t i = 0; i < n; ++i) { for ( auto it = pruners.begin(); it != pruners.end(); ++it ) { if ( StrictDominateLeft( data[*it], data[i] ) ) { data[i].markPruned(); break; } } } // END PARALLEL FOR /* Determine how many points were pruned. */ uint32_t new_n = n; for (uint32_t i = 0; i < new_n; ++i) { if ( data[i].isPruned() ) { data[i--] = data[--new_n]; } } #ifdef NVERBOSE printf( " pq_filter: %0.2f %% pruned\n", (n - new_n) / (double) n * 100.0 ); #endif return new_n; } template<typename T, int DIMS> static uint32_t ExecuteExtended_array( T* data, const uint32_t n, const uint32_t pq_size, const uint32_t num_threads ) { /* Create one priority queue for each thread */ PQ * const PQs_ = new PQ[num_threads]; /* Populate each priority queue with the same first pq_size * points and score them. */ for (uint32_t i = 0; i < pq_size; ++i) { data[i].score = 0; for (uint32_t j = 0; j < DIMS; ++j) { data[i].score += data[i].elems[j]; } for (uint32_t j = 0; j < num_threads; ++j) { PQs_[j].push( mn_w_idx( i, data[i].score ) ); } } /* Compute lowest Manhattan norm scores and remember best q_size ones. */ #pragma omp parallel num_threads(num_threads) { const uint32_t th_id = omp_get_thread_num(); mn_w_idx worst_of_bests = PQs_[th_id].top(); #pragma omp for nowait for (uint32_t i = 0; i < n; ++i) { /* Compute Manhattan norm. */ float sum = 0; for (uint32_t j = 0; j < DIMS; j++) { sum += data[i].elems[j]; } data[i].score = sum; /* Update priority queue if this score is good enough */ if ( worst_of_bests.second > sum ) { PQs_[th_id].pop(); PQs_[th_id].push( mn_w_idx( i, sum ) ); worst_of_bests = PQs_[th_id].top(); } } } // END PARALLEL BLOCK /* Reduce the priority queues into a global best. */ mn_w_idx worst_of_bests = PQs_[ 0 ].top(); for ( uint32_t i = 1; i < num_threads; ++i ) { while ( !PQs_[i].empty() ) { mn_w_idx top = PQs_[i].top(); if ( worst_of_bests.second > top.second ) { PQs_[ 0 ].pop(); PQs_[ 0 ].push( mn_w_idx( top.first, top.second ) ); worst_of_bests = PQs_[ 0 ].top(); } PQs_[i].pop(); } } /* Copy the priority queue into an iteratable vector */ vector<uint32_t> pruners( pq_size, 0 ); for( uint32_t i = 0; i < pq_size && !PQs_[ 0 ].empty(); ++i ) { pruners[ pq_size - i - 1 ] = PQs_[ 0 ].top().first; PQs_[ 0 ].pop(); } // UPD_PROFILER( "01 calc mns" ); /* Pre-filter dataset using top pruners. */ #pragma omp parallel for num_threads(num_threads) for (uint32_t i = 0; i < n; ++i) { for ( auto it = pruners.begin(); it != pruners.end(); ++it ) { //if ( StrictDominateLeft_array<DIMS>( data[*it].elems, data[i].elems ) ) { if ( DominateLeft_array<DIMS>( data[*it].elems, data[i].elems ) ) { data[i].markPruned(); break; } } } // END PARALLEL FOR /* Determine how many points were pruned. */ uint32_t new_n = n; for (uint32_t i = 0; i < new_n; ++i) { if ( data[i].isPruned() ) { data[i--] = data[--new_n]; } } #ifdef NVERBOSE printf( " pq_filter: %0.2f %% pruned\n", (n - new_n) / (double) n * 100.0 ); #endif return new_n; } template<typename T, int DIMS> static uint32_t ExecuteSubspace( T* data, const uint32_t n, const uint32_t pq_size, const uint32_t num_threads, const std::vector<int>* dimensions ) { /* Create one priority queue for each thread */ PQ * const PQs_ = new PQ[num_threads]; /* Populate each priority queue with the same first pq_size * points and score them. */ for (uint32_t i = 0; i < pq_size; ++i) { data[i].score = 0; for (uint32_t j = 0; j < DIMS; ++j) { data[i].score += data[i].elems[dimensions->at(j)]; } for (uint32_t j = 0; j < num_threads; ++j) { PQs_[j].push( mn_w_idx( i, data[i].score ) ); } } /* Compute lowest Manhattan norm scores and remember best q_size ones. */ #pragma omp parallel num_threads(num_threads) { const uint32_t th_id = omp_get_thread_num(); mn_w_idx worst_of_bests = PQs_[th_id].top(); #pragma omp for nowait for (uint32_t i = 0; i < n; ++i) { /* Compute Manhattan norm. */ float sum = 0; for (uint32_t j = 0; j < DIMS; j++) { sum += data[i].elems[dimensions->at(j)]; } data[i].score = sum; /* Update priority queue if this score is good enough */ if ( worst_of_bests.second > sum ) { PQs_[th_id].pop(); PQs_[th_id].push( mn_w_idx( i, sum ) ); worst_of_bests = PQs_[th_id].top(); } } } // END PARALLEL BLOCK /* Reduce the priority queues into a global best. */ mn_w_idx worst_of_bests = PQs_[ 0 ].top(); for ( uint32_t i = 1; i < num_threads; ++i ) { while ( !PQs_[i].empty() ) { mn_w_idx top = PQs_[i].top(); if ( worst_of_bests.second > top.second ) { PQs_[ 0 ].pop(); PQs_[ 0 ].push( mn_w_idx( top.first, top.second ) ); worst_of_bests = PQs_[ 0 ].top(); } PQs_[i].pop(); } } /* Copy the priority queue into an iteratable vector */ vector<uint32_t> pruners( pq_size, 0 ); for( uint32_t i = 0; i < pq_size && !PQs_[ 0 ].empty(); ++i ) { pruners[ pq_size - i - 1 ] = PQs_[ 0 ].top().first; PQs_[ 0 ].pop(); } // UPD_PROFILER( "01 calc mns" ); /* Pre-filter dataset using top pruners. */ #pragma omp parallel for num_threads(num_threads) for (uint32_t i = 0; i < n; ++i) { for ( auto it = pruners.begin(); it != pruners.end(); ++it ) { if ( StrictDominateLeftSubspace<DIMS>( data[*it], data[i] , dimensions) ) { data[i].markPruned(); break; } } } // END PARALLEL FOR /* Determine how many points were pruned. */ uint32_t new_n = n; for (uint32_t i = 0; i < new_n; ++i) { if ( data[i].isPruned() ) { data[i--] = data[--new_n]; } } delete[] PQs_; return new_n; } template<typename T, int DIMS> static uint32_t ExecuteSubspace_array( T* data, const uint32_t n, const uint32_t pq_size, const uint32_t active_dimensions, const uint32_t num_threads, const std::vector<int>* dimensions ) { /* Create one priority queue for each thread */ PQ * const PQs_ = new PQ[num_threads]; /* Populate each priority queue with the same first pq_size * points and score them. */ for (uint32_t i = 0; i < pq_size; ++i) { data[i].score = 0; for (uint32_t j = 0; j < dimensions->size(); ++j) { data[i].score += data[i].elems[dimensions->at(j)]; } for (uint32_t j = 0; j < num_threads; ++j) { PQs_[j].push( mn_w_idx( i, data[i].score ) ); } } /* Compute lowest Manhattan norm scores and remember best q_size ones. */ #pragma omp parallel num_threads(num_threads) { const uint32_t th_id = omp_get_thread_num(); mn_w_idx worst_of_bests = PQs_[th_id].top(); #pragma omp for nowait for (uint32_t i = 0; i < n; ++i) { /* Compute Manhattan norm. */ float sum = 0; for (uint32_t j = 0; j < dimensions->size(); j++) { sum += data[i].elems[dimensions->at(j)]; } data[i].score = sum; /* Update priority queue if this score is good enough */ if ( worst_of_bests.second > sum ) { PQs_[th_id].pop(); PQs_[th_id].push( mn_w_idx( i, sum ) ); worst_of_bests = PQs_[th_id].top(); } } } // END PARALLEL BLOCK /* Reduce the priority queues into a global best. */ mn_w_idx worst_of_bests = PQs_[ 0 ].top(); for ( uint32_t i = 1; i < num_threads; ++i ) { while ( !PQs_[i].empty() ) { mn_w_idx top = PQs_[i].top(); if ( worst_of_bests.second > top.second ) { PQs_[ 0 ].pop(); PQs_[ 0 ].push( mn_w_idx( top.first, top.second ) ); worst_of_bests = PQs_[ 0 ].top(); } PQs_[i].pop(); } } /* Copy the priority queue into an iteratable vector */ vector<uint32_t> pruners( pq_size, 0 ); for( uint32_t i = 0; i < pq_size && !PQs_[ 0 ].empty(); ++i ) { pruners[ pq_size - i - 1 ] = PQs_[ 0 ].top().first; PQs_[ 0 ].pop(); } // UPD_PROFILER( "01 calc mns" ); /* Pre-filter dataset using top pruners. */ #pragma omp parallel for num_threads(num_threads) for (uint32_t i = 0; i < n; ++i) { for ( auto it = pruners.begin(); it != pruners.end(); ++it ) { if ( active_dimensions == (active_dimensions & DT_bitmap_array<DIMS>( data[i].elems, data[*it].elems) ) ) { data[i].markPruned(); break; } } } // END PARALLEL FOR /* Determine how many points were pruned. */ uint32_t new_n = n; for (uint32_t i = 0; i < new_n; ++i) { if ( data[i].isPruned() ) { data[i--] = data[--new_n]; } } delete[] PQs_; return new_n; } }; #endif /* PQ_FILTER_H_ */
openmp_exemplo.c
/****************************************************************************** * FILE: mm.c * DESCRIPTION: * Matrix Multiply - C Version * Modified from Blaise Barney OpenMP code. ******************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <omp.h> #define NRA 62 /* number of rows in matrix A */ #define NCA 15 /* number of columns in matrix A */ #define NCB 7 /* number of columns in matrix B */ int main (int argc, char *argv[]) { int i, j, k; double a[NRA][NCA], /* matrix A to be multiplied */ b[NCA][NCB], /* matrix B to be multiplied */ c[NRA][NCB]; /* result matrix C */ /*** Initialize matrices ***/ #pragma omp parallel for for (i=0; i<NRA; i++) for (j=0; j<NCA; j++) a[i][j]= i+j; #pragma omp parallel for for (i=0; i<NCA; i++) for (j=0; j<NCB; j++) b[i][j]= i*j; #pragma omp parallel for for (i=0; i<NRA; i++) for (j=0; j<NCB; j++) c[i][j]= 0; /*** Do matrix multiply ***/ for (i=0; i<NRA; i++) for(j=0; j<NCB; j++) for (k=0; k<NCA; k++) c[i][j] += a[i][k] * b[k][j]; /*** Print results ***/ printf("******************************************************\n"); printf("Result Matrix:\n"); for (i=0; i<NRA; i++) { for (j=0; j<NCB; j++) printf("%6.2f ", c[i][j]); printf("\n"); } printf("******************************************************\n"); printf ("Done.\n"); }
task_dep-5.c
/* { dg-do run } */ #define N 128 #define BS 16 #define EPS 0.000001 #include <stdlib.h> void matmul_depend (float A[N][N], float B[N][N], float C[N][N]) { int i, j, k, ii, jj, kk; for (i = 0; i < N; i+=BS) for (j = 0; j < N; j+=BS) for (k = 0; k < N; k+=BS) // Note 1: i, j, k, A, B, C are firstprivate by default // Note 2: A, B and C are just pointers #pragma omp task private(ii, jj, kk) \ depend ( in: A[i:BS][k:BS], B[k:BS][j:BS] ) \ depend ( inout: C[i:BS][j:BS] ) for (ii = i; ii < i+BS; ii++ ) for (jj = j; jj < j+BS; jj++ ) for (kk = k; kk < k+BS; kk++ ) C[ii][jj] = C[ii][jj] + A[ii][kk] * B[kk][jj]; } void matmul_ref (float A[N][N], float B[N][N], float C[N][N]) { int i, j, k; for (i = 0; i < N; i++) for (j = 0; j < N; j++) for (k = 0; k < N; k++) C[i][j] += A[i][k] * B[k][j]; } void init (float A[N][N], float B[N][N]) { int i, j, s = -1; for (i = 0; i < N; i++) for (j = 0; j < N; j++) { A[i][j] = i * j * s; B[i][j] = i + j; s = -s; } } void init_zero (float A[N][N], float B[N][N]) { int i, j, s = -1; for (i = 0; i < N; i++) for (j = 0; j < N; j++) { A[i][j] = 0; B[i][j] = 0; } } void check (float A[N][N], float B[N][N]) { int i, j; for (i = 0; i < N; i++) for (j = 0; j < N; j++) if (A[i][j] - B[i][j] > EPS || B[i][j] - A[i][j] > EPS) abort (); } int main () { float A[N][N], B[N][N], C[N][N], C_ref[N][N]; init (A, B); init_zero (C, C_ref); matmul_depend (A, B, C); matmul_ref (A, B, C_ref); check (C, C_ref); return 0; }
graphProcessing.h
/* FINISH TEMPFLATPATH CODE AS WRITTEN, THESE FUNCTIONS WILL ONLY WORK WITH GRAPHS THAT ARE IMPLEMENTED IN THE boost NAMESPACE. */ #define LP 1 #define PERFDEBUG 0 //#define FULLDEBUG 1 #ifdef _OPENMP #include <omp.h> #endif #include <boost/regex.hpp> #include <iostream> #include <fstream> #include <string> #include <assert.h> #include <staticCFG.h> /** *@file graphProcessing.h *Brief Overview of Algorithm: *********************** *Current Implementation *********************** *This implementation uses BOOSTs graph structure to analyze the paths of the graph *The path analyzer sends the user paths to be evaluated by the "analyzePath" function that is user defined ************************** *Further Improvements: TODO ************************** @todo utilize BOOST visitors to take advantage of the BOOST graph structures abilities *************** *Contact Info *************** *Finally, blame can be assigned to and questions can be forwarded to the author, though response is not guaranteed *if I'm still at Lawrence *hoffman34 AT llnl DOT gov *@author Michael Hoffman */ #include <boost/graph/adjacency_list.hpp> #include <boost/bind.hpp> #include <boost/foreach.hpp> #include <boost/tuple/tuple.hpp> #include <boost/graph/graphviz.hpp> #include <boost/graph/dominator_tree.hpp> #include <boost/graph/reverse_graph.hpp> #include <boost/graph/transpose_graph.hpp> #include <boost/algorithm/string.hpp> #include <vector> #include <algorithm> #include <utility> #include <iostream> #include <sys/time.h> #include <sys/resource.h> #include <sys/time.h> template <class CFG> class SgGraphTraversal { public: typedef typename boost::graph_traits<CFG>::vertex_descriptor Vertex; typedef typename boost::graph_traits<CFG>:: edge_descriptor Edge; void constructPathAnalyzer(CFG* g, bool unbounded=false, Vertex end=0, Vertex begin=0, bool ns = true); virtual void analyzePath(std::vector<Vertex>& pth) = 0; std::vector<int> getInEdges(int& node, CFG*& g); std::vector<int> getOutEdges(int& node, CFG*& g); int getTarget(int& n, CFG*& g); int getSource(int& n, CFG*& g); std::map<Vertex, int> vertintmap; std::map<Edge, int> edgeintmap; std::map<int, Vertex> intvertmap; std::map<int, Edge> intedgemap; SgGraphTraversal(); virtual ~SgGraphTraversal(); SgGraphTraversal( SgGraphTraversal &); SgGraphTraversal &operator=( SgGraphTraversal &); int pathnum; void firstPrepGraph(CFG*& g); private: int normals; int abnormals; bool needssafety; int recursed; int checkedfound; // typedef typename boost::graph_traits<CFG>::vertex_descriptor Vertex; // typedef typename boost::graph_traits<CFG>:: edge_descriptor Edge; // std::vector<int> getInEdges(int& node, CFG*& g); // std::vector<int> getOutEdges(int& node, CFG*& g); void prepareGraph(CFG*& g); void findClosuresAndMarkersAndEnumerate(CFG*& g); // void constructPathAnalyzer(CFG* g, bool unbounded=false, Vertex end=0, Vertex begin=0, bool ns = true); // virtual void analyzePath(std::vector<Vertex>& pth) = 0; // void firstPrepGraph(CFG*& g); int stoppedpaths; std::set<std::vector<int> > traversePath(int begin, int end, CFG*& g, bool loop=false); std::set<std::vector<int> > uTraversePath(int begin, int end, CFG*& g, bool loop, std::map<int, std::vector<std::vector<int> > >& localLoops); std::vector<std::vector<int> > bfsTraversePath(int begin, int end, CFG*& g, bool loop=false); std::vector<int> unzipPath(std::vector<int>& path, CFG*& g, int start, int end); std::vector<int> zipPath(std::vector<int>& path, CFG*& g, int start, int end); std::vector<int> zipPath2(std::vector<int>& path, CFG*& g); void printCFGNode(int& cf, std::ofstream& o); void printCFGNodeGeneric(int& cf, std::string prop, std::ofstream& o); void printCFGEdge(int& cf, CFG*& cfg, std::ofstream& o); void printHotness(CFG*& g); void printPathDot(CFG*& g); void computeOrder(CFG*& g, const int& begin); void computeSubGraphs(const int& begin, const int &end, CFG*& g, int depthDifferential); //int getTarget(int& n, CFG*& g); //int getSource(int& n, CFG*& g); std::vector<int> sources; std::vector<int> sinks; std::vector<int> recursiveLoops; std::vector<int> recurses; std::map<int, int> ptsNum; bool borrowed; std::set<int> badloop; std::map<int, std::vector<std::vector<int> > > totalLoops; // int pathnum; std::map<int, std::string> nodeStrings; int sourcenum; unsigned long long evaledpaths; int badpaths; int workingthreadnum; bool workingthread; std::map<int, std::set<std::vector<int> > > loopStore; std::vector<std::vector<int> > pathStore; std::map<int, std::vector<int> > subpathglobal; std::map<std::vector<int>, int> subpathglobalinv; int nextsubpath; std::vector<int> orderOfNodes; // std::map<Vertex, int> vertintmap; // std::map<Edge, int> edgeintmap; // std::map<int, Vertex> intvertmap; // std::map<int, Edge> intedgemap; std::vector<std::map<Vertex, Vertex> > SubGraphGraphMap; std::vector<std::map<Vertex, Vertex> > GraphSubGraphMap; std::vector<CFG*> subGraphVector; void getVertexPath(std::vector<int> path, CFG*& g, std::vector<Vertex>& vertexPath ); void storeCompact(std::vector<int> path); int nextNode; int nextEdge; std::vector<int> markers; std::vector<int> closures; std::map<int, int> markerIndex; std::map<int, std::vector<int> > pathsAtMarkers; typedef typename boost::graph_traits<CFG>::vertex_iterator vertex_iterator; typedef typename boost::graph_traits<CFG>::out_edge_iterator out_edge_iterator; typedef typename boost::graph_traits<CFG>::in_edge_iterator in_edge_iterator; typedef typename boost::graph_traits<CFG>::edge_iterator edge_iterator; bool bound; // SgGraphTraversal(); // virtual ~SgGraphTraversal(); // SgGraphTraversal( SgGraphTraversal &); // SgGraphTraversal &operator=( SgGraphTraversal &); }; template<class CFG> SgGraphTraversal<CFG>:: SgGraphTraversal() { } template<class CFG> SgGraphTraversal<CFG> & SgGraphTraversal<CFG>:: operator=( SgGraphTraversal &other) { return *this; } #ifndef SWIG template<class CFG> SgGraphTraversal<CFG>:: ~SgGraphTraversal() { } #endif /** Gets the source of an edge SgGraphTraversal::getSource Input: @param[edge] int& integer representation of edge in question @param[g] CFG*& the CFG used */ template<class CFG> inline int SgGraphTraversal<CFG>:: getSource(int& edge, CFG*& g) { Edge e = intedgemap[edge]; Vertex v = boost::source(e, *g); return(vertintmap[v]); } /** Gets the target of an edge SgGraphTraversal::getTarget Input: @param[edge] int& integer representation of edge in quesution @param[g] the CFG*& CFG used */ template<class CFG> inline int SgGraphTraversal<CFG>:: getTarget(int& edge, CFG*& g) { Edge e = intedgemap[edge]; Vertex v = boost::target(e, *g); return(vertintmap[v]); } /** Gets out edges with integer inputs, internal use only SgGraphTraversal::getInEdges Input: @param[node] int, integer representation of the node to get the in edges from @param[g] CFG* g, CFG */ template<class CFG> std::vector<int> SgGraphTraversal<CFG>:: getInEdges(int& node, CFG*& g) { Vertex getIns = intvertmap[node]; std::vector<int> inedges; in_edge_iterator i, j; for (boost::tie(i, j) = boost::in_edges(getIns, *g); i != j; ++i) { inedges.push_back(edgeintmap[*i]); } return inedges; } /** Gets out edges with integer inputs, internal use only SgGraphTraversal::getOutEdges Input: @param[node] int, integer representation of the node to get the out edges from @param[g] CFG* g, CFG */ template<class CFG> std::vector<int> SgGraphTraversal<CFG>:: getOutEdges(int &node, CFG*& g) { Vertex getOuts = intvertmap[node]; std::vector<int> outedges; out_edge_iterator i, j; for (boost::tie(i, j) = boost::out_edges(getOuts, *g); i != j; ++i) { outedges.push_back(edgeintmap[*i]); } return outedges; } /** Condenses paths, currently deprecated... Input: @param[pth] std::vector<int> the original path @param[g] CFG*, the ambient graph Output: zipped path */ template<class CFG> inline std::vector<int> SgGraphTraversal<CFG>:: zipPath2(std::vector<int>& pth, CFG*& g) { std::vector<int> npth; npth.push_back(pth[0]); for (int i = 1; i < pth.size()-1; i++) { if (find(closures.begin(), closures.end(), pth[i]) != closures.end()) { npth.push_back(pth[i]); } } npth.push_back(pth.back()); return npth; } /** Condenses paths to simply the first and last node and the ordered set of edges taken at nodes with more than 1 outedge Input: @param[pth] std::vector<int>, the original path @param[g] CFG*, the ambient graph @param[start] integer representation of the first node @param[end] integer representation of the last node */ template<class CFG> std::vector<int> SgGraphTraversal<CFG>:: zipPath(std::vector<int>& pth, CFG*& g, int start, int end) { std::vector<int> subpath; std::vector<int> movepath; movepath.push_back(pth.front()); movepath.push_back(pth.back()); for (unsigned int qw = 0; qw < pth.size()-1; qw++) { if (find(markers.begin(), markers.end(), pth[qw]) != markers.end()) { std::vector<int> oeds = getOutEdges(pth[qw], g); for (unsigned int i = 0; i < oeds.size(); i++) { if (getTarget(oeds[i], g) == pth[qw+1]) { movepath.push_back(oeds[i]); } } } } return movepath; } /** unzips the paths zipped by zipPath Input: @param[pzipped] the zipped path @param[CFG] the ambient graph @param[start] the integer representation of the first node (used to check that zipPath is working correctly) @param[end] the integer representation of the end node */ template<class CFG> std::vector<int> SgGraphTraversal<CFG>:: unzipPath(std::vector<int>& pzipped, CFG*& g, int start, int end) { ROSE_ASSERT(pzipped[0] == start && (pzipped[1] == end || end == -1)); std::vector<int> zipped; for (unsigned int i = 2; i < pzipped.size(); i++) { zipped.push_back(pzipped[i]); } std::vector<int> unzipped; unzipped.push_back(start); std::vector<int> oeds = getOutEdges(start, g); if (oeds.size() == 0) { return unzipped; } for (unsigned int i = 0; i < zipped.size(); i++) { oeds = getOutEdges(unzipped.back(), g); while (oeds.size() == 1) { if (getTarget(oeds[0], g) == end && unzipped.size() != 1) { unzipped.push_back(end); return unzipped; } unzipped.push_back(getTarget(oeds[0], g)); oeds = getOutEdges(unzipped.back(), g); } if (oeds.size() == 0) { return unzipped; } if (oeds.size() > 1 && (unzipped.back() != end || (unzipped.size() == 1 && unzipped.back() == end))) { ROSE_ASSERT(getSource(zipped[i], g) == unzipped.back()); unzipped.push_back(getTarget(zipped[i], g)); } } std::vector<int> oeds2 = getOutEdges(unzipped.back(), g); if (unzipped.back() != end && oeds2.size() != 0) { while (oeds2.size() == 1 && unzipped.back() != end) { unzipped.push_back(getTarget(oeds2[0], g)); oeds2 = getOutEdges(unzipped.back(), g); } } return unzipped; } /* Example Time Example: timeval tim; gettimeofday(&tim, NULL); double t1=tim.tv_sec+(tim.tv_usec/1000000.0); do_something_long(); gettimeofday(&tim, NULL); double t2=tim.tv_sec+(tim.tv_usec/1000000.0); printf("%.6lf seconds elapsed\n", t2-t1); */ /** The function responsible for collecting all paths without loops, and all paths within lops that do not include other loops then sending those to uTraverse to assemble them into all paths with any combination of loops Input: @param[begin] integer representation of the first node @param[end] integer representation of the last node (or -1 if its not bounded) @param[g] CFG*, the ambient CFG @param[loop] boolean expressing whether or not we are calculating paths contained within a loop */ template<class CFG> std::vector<std::vector<int> > SgGraphTraversal<CFG>:: bfsTraversePath(int begin, int end, CFG*& g, bool loop) { //perfdebug allows for examining the speed of traversal #ifdef PERFDEBUG //timeval tim; //gettimeofday(&tim, NULL); //double tim1 = tim.tv_sec+(tim.tv_usec/1000000.0); #endif bool recursedloop = loop; std::map<int, std::vector<std::vector<int> > > PtP; std::set<int> nodes; std::vector<std::vector<int> > pathContainer; //std::vector<std::vector<int> > oldPaths; std::vector<int> completedLoops; std::vector<std::vector<int> > npc; std::vector<int> bgpath; bgpath.push_back(begin); pathContainer.push_back(bgpath); std::vector<std::vector<int> > newPathContainer; std::vector<std::vector<int> > paths; std::vector<int> localLoops; std::map<int, std::vector<std::vector<int> > > globalLoopPaths; //std::cout << "at the while" << std::endl; //To keep while (pathContainer.size() != 0 /*|| oldPaths.size() != 0*/) { /* unsigned int mpc = 50000; if (pathContainer.size() == 0) { unsigned int mxl = 0; if (oldPaths.size() > mpc) { mxl = mpc/2; } else { mxl = oldPaths.size(); } for (unsigned int k = 0; k < mxl; k++) { pathContainer.push_back(oldPaths.back()); oldPaths.pop_back(); } } if (pathContainer.size() > mpc) { unsigned int j = 0; while (j < mpc) { npc.push_back(pathContainer.back()); pathContainer.pop_back(); j++; } oldPaths.insert(oldPaths.end(), pathContainer.begin(), pathContainer.end()); pathContainer = npc; npc.clear(); } */ //iterating through the currently discovered subpaths to build them up for (unsigned int i = 0; i < pathContainer.size(); i++) { std::vector<int> npth = pathContainer[i]; std::vector<int> oeds = getOutEdges(npth.back(), g); std::vector<int> ieds = getInEdges(npth.back(), g); npth = pathContainer[i]; oeds = getOutEdges(npth.back(), g); if ((!recursedloop && ((bound && npth.back() == end && npth.size() != 1) || (!bound && oeds.size() == 0))) || (recursedloop && npth.back() == end && npth.size() != 1)) { std::vector<int> newpth; newpth = (pathContainer[i]); std::vector<int> movepath = newpth;//zipPath(newpth, g); if (recursedloop && newpth.back() == end && newpth.size() != 1) { paths.push_back(movepath); } else if (!recursedloop) { if (bound && newpth.size() != 1 && newpth.back() == end) { paths.push_back(movepath); } else if (!bound) { paths.push_back(movepath); } } } else { std::vector<int> oeds = getOutEdges(pathContainer[i].back(), g); for (unsigned int j = 0; j < oeds.size(); j++) { int tg = getTarget(oeds[j], g); std::vector<int> newpath = (pathContainer[i]); //we split up paths into pieces so that they don't take up a lot of memory, basically this is when we run into a path //more than once, so we attach all paths that go to that path to that particular node via PtP if (nodes.find(tg) != nodes.end() && find(newpath.begin(), newpath.end(), tg) == newpath.end() && tg != end) { if (PtP.find(tg) == PtP.end()) { std::vector<int> nv; nv.push_back(tg); newPathContainer.push_back(nv); PtP[tg].push_back(/*zipPath(*(*/newpath);//, g, newpath.front(), newpath.back())); } else { PtP[tg].push_back(/*zipPath(*/newpath);//, g, newpath.front(), newpath.back())); } } else if (find(newpath.begin(), newpath.end(), getTarget(oeds[j], g)) == newpath.end() || getTarget(oeds[j], g) == end) { newpath.push_back(tg); std::vector<int> ieds = getInEdges(tg, g); if (ieds.size() > 1) {//find(closures.begin(), closures.end(), tg) != closures.end()) { nodes.insert(tg); } newPathContainer.push_back(newpath); } else if (tg == end && recursedloop) { newpath.push_back(tg); newPathContainer.push_back(newpath); } else {//if (find(newpath.begin(), newpath.end(), tg) != newpath.end() && tg != end) { std::vector<int> ieds = getInEdges(tg, g); if (ieds.size() > 1/*find(closures.begin(), closures.end(), tg) != closures.end()*/ && find(completedLoops.begin(), completedLoops.end(), tg) == completedLoops.end() /*&& find(localLoops.begin(), localLoops.end(), tg) == localLoops.end()*/ && find(recurses.begin(), recurses.end(), tg) == recurses.end()) { localLoops.push_back(tg); nodes.insert(tg); } // else if (find(recurses.begin(), recurses.end(), tg) != recurses.end()) { // } } //else { // std::cout << "problem" << std::endl; // ROSE_ASSERT(false); // } } } } pathContainer = newPathContainer; newPathContainer.clear(); } // std::cout << "done while" << std::endl; pathContainer.clear(); std::vector<std::vector<int> > finnpts; std::vector<std::vector<int> > npts; while (true) { if (paths.size() > 1000000) { std::cout << "too many paths, consider a subgraph" << std::endl; ROSE_ASSERT(false); } //#pragma omp parallel for schedule(guided) for (unsigned int qq = 0; qq < paths.size(); qq++) { std::vector<int> pq = paths[qq]; std::vector<int> qp; int ppf = paths[qq].front(); if (PtP.find(ppf) != PtP.end()) { for (unsigned int kk = 0; kk < PtP[ppf].size(); kk++) { std::vector<int> newpath = /*unzipPath(*/PtP[ppf][kk];//, g, PtP[ppf][kk][0], PtP[ppf][kk][1]); bool good = true; if (newpath.back() == newpath.front() && newpath.front() != begin && newpath.size() > 1) { good = false; } else { // if (find(pq.begin(), pq.end(), newpath.front()) != pq.end() && newpath.front() != begin) { // good = false; // } // else { for (unsigned int kk1 = 0; kk1 < newpath.size(); kk1++) { /* if (newpath.front() == newpath.back()) { good = false; break; } else */if (find(pq.begin(), pq.end(), newpath[kk1]) != pq.end() && newpath[kk1] != begin) { good = false; break; } } //} } if (good) { newpath.insert(newpath.end(), pq.begin(), pq.end()); #pragma omp critical { npts.push_back(newpath); } } } } else { std::vector<int> ppq = pq;// zipPath(pq, g, pq.front(), pq.back()); #pragma omp critical { finnpts.push_back(ppq); } } } if (npts.size() == 0) { break; } else { paths = npts; npts.clear(); } } paths = finnpts; finnpts.clear(); for (unsigned int k = 0; k < localLoops.size(); k++) { int lk = localLoops[k]; std::vector<std::vector<int> > loopp; if (loopStore.find(localLoops[k]) != loopStore.end()) { loopp.insert(loopp.end(), loopStore[localLoops[k]].begin(), loopStore[localLoops[k]].end()); } else { std::map<int, std::vector<std::vector<int> > > localLoopPaths; completedLoops.push_back(lk); recurses.push_back(lk); loopp = bfsTraversePath(lk, lk, g, true); recurses.pop_back(); } for (unsigned int ik = 0; ik < loopp.size(); ik++) { if (find(globalLoopPaths[lk].begin(), globalLoopPaths[lk].end(), loopp[ik]) == globalLoopPaths[lk].end()) { globalLoopPaths[localLoops[k]].push_back(loopp[ik]); } } } borrowed = true; std::vector<std::vector<int> > lps2; //unsigned int maxpaths = 1000; //unsigned int pathdivisor = 1;//paths.size()/maxpaths;///paths.size(); //if (pathdivisor < 1) { //pathdivisor = 1; //maxpaths = paths.size(); // } /* for (unsigned int j = 0; j < pathdivisor+1; j++) { std::vector<std::vector<int> > npaths; std::vector<int> dummyvec; unsigned int mxpths; if (j < pathdivisor) { mxpths = maxpaths; } else { mxpths = paths.size() % pathdivisor; } for (unsigned int k = 0; k < mxpths; k++) { npaths.push_back(paths.back());//unzipPath(paths.back(), g, begin, end)); paths.pop_back(); } */ pathStore = paths; paths.clear(); if (!recursedloop) { uTraversePath(begin, end, g, false, globalLoopPaths); } else { recursed++; std::set<std::vector<int> > lps = uTraversePath(begin, end, g, true, globalLoopPaths); recursed--; for (std::set<std::vector<int> >::iterator ij = lps.begin(); ij != lps.end(); ij++) { std::vector<int> ijk = (*ij); lps2.push_back(*ij); } } //} #ifdef PERFDEBUG // timeval tim; //std::cout << "begin: " << begin << " end: " << end << std::endl; //gettimeofday(&tim, NULL); //double tim2 = tim.tv_sec+(tim.tv_usec/1000000); //double timeRet = tim2 - tim1; //std::cout << "bfs time elapsed: " << timeRet << std::endl; #endif return lps2; } /** This function calculates all the permutations of loops on paths it also throws away duplicate paths Input: @param[begin] integer representation of first node @param[end] integer representation of the final node @param[g] ambient CFG @param[globalLoopPaths] connects an integer representation of a node to all possible loops starting at that node */ template<class CFG> std::set<std::vector<int> > SgGraphTraversal<CFG>:: uTraversePath(int begin, int end, CFG*& g, bool loop, std::map<int, std::vector<std::vector<int> > >& globalLoopPaths) { //std::cout << "uTraverse" << std::endl; //int doubledpaths = 0; int newmil = 1; //#ifdef LP //if (loop && loopStore.find(begin) != loopStore.end()) { // return loopStore[begin]; //} //#endif #ifdef PERFDEBUG //timeval tim; //gettimeofday(&tim, NULL); //double t1 = tim.tv_sec+(tim.tv_usec/1000000); #endif std::set<std::vector<int> > newpaths; std::set<std::vector<int> > npaths; pathnum = 0; std::vector<int> path; std::vector<std::vector<int> > paths; int truepaths = 0; std::vector<std::vector<int> > checkpaths; std::vector<std::vector<int> > npathchecker; std::map<int, int> currents; //int nnumpaths = 0; std::set<std::vector<int> > loopPaths; //bool threadsafe = true; bool done = false; std::set<std::vector<int> > fts; //double ttfors = 0; //double tperms = 0; while (true) { //std::cout << "paths.size() " << paths.size() << std::endl; if (paths.size() > 1000000) { std::cout << "nearly 1 million paths with no loops, stopping" << std::endl; return loopPaths; std::cout << "ended early" << std::endl; } if (done || borrowed) { if (borrowed) { paths = pathStore; pathStore.clear(); } //std::cout << "paths.size(): " << paths.size() << std::endl; if (paths.size() != 0) { } else { return loopPaths; } // #pragma omp parallel // { #pragma omp parallel for schedule(guided) for (unsigned int qqq = 0; qqq < paths.size(); qqq++) { // std::cout << "pathcheck" << std::endl; //int pathevals = 0; //std::vector<int> zpt = zipPath2(paths[qqq], g); //std::set<std::vector<int> > boxpaths; std::set<std::vector<int> > movepaths; std::vector<int> path;// = paths[qqq]; path = paths[qqq];//unzipPath(paths[qqq], g, begin, end); truepaths++; int permnums = 1; std::vector<int> perms; std::vector<unsigned int> qs; std::map<int, std::vector<std::vector<int> > > localLoops; std::vector<int> takenLoops; takenLoops.push_back(path[0]); bool taken = false; //timeval timfor; int lost = 0; //gettimeofday(&timfor, NULL); //double t1for = timfor.tv_sec + (timfor.tv_usec/1000000); for (unsigned int q = 1; q < path.size()-1; q++) { //if (find(closures.begin(), closures.end(), path[q]) != closures.end()) { if (globalLoopPaths.find(path[q]) != globalLoopPaths.end() /*&& find(lloops.begin(), lloops.end(), path[q]) != lloops.end()*/ && globalLoopPaths[path[q]].size() != 0 /*&& path[q] != begin && path[q] != end*/) { for (unsigned int qp1 = 0; qp1 < globalLoopPaths[path[q]].size(); qp1++) { std::vector<int> gp = globalLoopPaths[path[q]][qp1]; //unzipPath(globalLoopPaths[path[q]][qp1],g,path[q],path[q]); // std::vector<int> zgp = zipPath2(globalLoopPaths[zpt[q]][qp1], g); for (unsigned int qp2 = 0; qp2 < takenLoops.size(); qp2++) { if (find(gp.begin(),gp.end(), takenLoops[qp2]) != gp.end()) { taken = true; } } if (!taken) { localLoops[path[q]].push_back(gp); } else { lost++; taken = false; } } if (localLoops[path[q]].size() != 0) { takenLoops.push_back(path[q]); permnums *= (localLoops[path[q]].size()+1); perms.push_back(permnums); qs.push_back(path[q]); } } } //} //if (loop) { //std::cout << "lostloop: " << lost << std::endl; //} //else { //std::cout << "lostpath: " << lost << std::endl; //} //std::cout << "endpathcheck" << std::endl; //std::cout << "rest" << std::endl; //std::cout << "permnums: " << permnums << std::endl; //gettimeofday(&timfor, NULL); //double t2for = timfor.tv_sec + (timfor.tv_usec/1000000); //double ttfor = t2for - t1for; //#pragma omp atomic //ttfors += ttfor; //std::set<std::vector<int> > movepaths2; std::set<std::vector<int> > movepathscheck; //timeval timperms; //gettimeofday(&timperms, NULL); // double t1perm = timperms.tv_sec + (timperms.tv_usec/1000000); std::vector<int> nvec; std::vector<std::vector<int> > boxpaths(permnums, nvec); //#pragma omp parallel for schedule(guided) for (int i = 1; i <= permnums; i++) { //bool goodthread = false; std::vector<int> loopsTaken; //bool stop = false; unsigned int j = 0; std::vector<int> npath; while (true) { if (j == perms.size() || perms[j] > i) { break; } else { j++; } } int pn = i; std::vector<int> pL; for (unsigned int j1 = 0; j1 <= j; j1++) { pL.push_back(-1); } for (unsigned int k = j; k > 0; k--) { int l = 1; while (perms[k-1]*l < pn) { l++; } pL[k] = l-2; pn -= (perms[k-1]*(l-1)); } pL[0] = pn-2; unsigned int q2 = 0; for (unsigned int q1 = 0; q1 < path.size(); q1++) { if (q2 < qs.size()) { if (qs.size() != 0 && (unsigned)path[q1] == qs[q2] && (size_t)q2 != pL.size()) { if (pL[q2] == -1) { npath.push_back(path[q1]); } else { // if (!stop) { npath.insert(npath.end(), localLoops[path[q1]][pL[q2]].begin(), localLoops[path[q1]][pL[q2]].end()); // } } q2++; } else { npath.push_back(path[q1]); } } else { npath.push_back(path[q1]); } } #ifdef FULLDEBUG std::cout << "path: " << std::endl; for (int qe = 0; qe < npath.size(); qe++) { std::cout << ", " << npath[qe]; } std::cout << std::endl; std::cout << "permnum: " << i << std::endl; #endif // bool addit = false; //if (!stop) { // if (loop && npath.front() == npath.back()) { // addit = true; // } // else if (!loop && bound && npath.front() == begin && npath.back() == end && npath.size() != 1) { // addit = true; // } // else if (!loop && !bound) { // addit = true; // } // if (!addit) { // std::cout << "bad path" << std::endl; // } //bool extra = false; //if (addit && !loop) { //if (movepathscheck.find(npath) == movepathscheck.end()) { //int mpc = movepathscheck.size(); //std::set<std::vector<int> > movepathspre = movepathscheck; // movepaths2.insert(npath); //movepathscheck.insert(npath); //ROSE_ASSERT(movepathscheck.size() == mpc || movepathspre.find(npath) == movepathspre.end()); //if (movepathscheck.size() == mpc) { // extra = true; // } //} //else { //#pragma omp atomic // doubledpaths++; // } //} //if (!workingthread || threadsafe) { //if ((newpaths.size() > 1 || i == permnums || threadsafe)) { // } // } // } //if (!extra) // { //if (movepaths2.size() > 0) //|| i == permnums || threadsafe) // #pragma omp critical // { boxpaths[i-1] = npath; // } // } //std::cout << "endrest" << std::endl; } evaledpaths += boxpaths.size(); if (evaledpaths > newmil*100000ull) { //std::cout << "evaledpaths: " << evaledpaths << std::endl; newmil++; } // #pragma omp critical // { if (!loop) { for (std::vector<std::vector<int> >::iterator box = boxpaths.begin(); box != boxpaths.end(); box++) { std::vector<Vertex> verts; getVertexPath((*box), g, verts); #pragma omp critical { analyzePath(verts); } } } else { #pragma omp critical { loopPaths.insert(boxpaths.begin(), boxpaths.end());; } } } } //} /* #pragma omp atomic evaledpaths++; //pathevals++; if (evaledpaths % 10000 == 0 && evaledpaths != 0) { std::cout << "evaled paths: " << evaledpaths << std::endl; } if (!loop) { std::vector<Vertex> verts; getVertexPath(npath, g, verts); #pragma omp critical { #ifdef FULLDEBUG for (unsigned int aa = 0; aa < npath.size(); aa++) { if (ptsNum.find(npath[aa]) != ptsNum.end()) { ptsNum[npath[aa]] += 1; } else { ptsNum[npath[aa]] = 1; } } #endif analyzePath(verts); } } else if (loop) { //std::vector<int> zpth = zipPath(npath, g, npath.front(), npath.back()); #pragma omp critical { loopPaths.insert(npath);//zipPath(npath, g, npath.front(), npath.back())); } } else { } } */ // movepaths2.clear(); // std::cout << "permnums: " << permnums << std::endl; // std::cout << "evaledpaths final: " << pathevals << std::endl; //gettimeofday(&timperms, NULL); //double t2perm = timperms.tv_sec+(timperms.tv_usec/1000000); //#pragma omp atomic //tperms += t2perm - t1perm; // } //} //} //} #ifdef PERFDEBUG //gettimeofday(&tim, NULL); // double t2 = tim.tv_sec+(tim.tv_usec/1000000.0); // double tperm = t2 - t1perm //double tX = t2 - t1; //std::cout << "begin: " << begin << " end: " << end << std::endl; // std::cout << "uTraverse time: " << tX << std::endl; // std::cout << "tperms: " << tperms << std::endl; // std::cout << "ttfors: " << ttfors << std::endl; // std::cout << "doubledpaths: " << doubledpaths << std::endl; #endif #ifdef LP if (loop) { #ifdef PERFDEBUG // std::cout << "loopPaths: " << loopPaths.size() << std::endl; #endif loopStore[begin] = loopPaths; } #endif return loopPaths; } } /** This is the function that is used by the user directly to start the algorithm. It is immediately available to the user SgGraphTraversal::constructPathAnalyzer Input: @param[begin] Vertex, starting node @param[end] Vertex, endnode @param[g] CFG* g, CFG calculated previously */ template<class CFG> void SgGraphTraversal<CFG>:: constructPathAnalyzer(CFG* g, bool unbounded, Vertex begin, Vertex end, bool ns) { abnormals = 0; normals = 0; if (ns) { needssafety = true; } else { needssafety = false; } checkedfound = 0; recursed = 0; nextsubpath = 0; borrowed = true; stoppedpaths = 0; evaledpaths = 0; badpaths = 0; sourcenum = 0; prepareGraph(g); workingthread = false; workingthreadnum = -1; //std::cout << "markers: " << markers.size() << std::endl; //std::cout << "closures: " << closures.size() << std::endl; //std::cout << "sources: " << sources.size() << std::endl; //std::cout << "sinks" << sinks.size() << std::endl; // printHotness(g); bool subgraph = false; if (!subgraph) { if (!unbounded) { bound = true; recursiveLoops.clear(); recurses.clear(); std::vector<std::vector<int> > spaths = bfsTraversePath(vertintmap[begin], vertintmap[end], g); // std::cout << "spaths: " << spaths.size() << std::endl; } else { std::set<int> usedsources; bound = false; std::vector<int> localLps; for (unsigned int j = 0; j < sources.size(); j++) { sourcenum = sources[j]; recursiveLoops.clear(); recurses.clear(); std::vector<std::vector<int> > spaths = bfsTraversePath(sources[j], -1, g); } } } //std::cout << "checkedfound: " << checkedfound << std::endl; printHotness(g); } /** DEPRECATED This is a function to construct subgraphs for parallelization SgGraphTraversal::computeSubGraphs Input: @param[begin] const int, starting point @param[end] const int ending point @param[g] const CFG*, control flow graph to compute @param[depthDifferential] int, used to specify how large the subgraph should be */ template<class CFG> void SgGraphTraversal<CFG>:: computeSubGraphs(const int& begin, const int &end, CFG*& g, int depthDifferential) { int minDepth = 0; int maxDepth = minDepth + depthDifferential; int currSubGraph = 0; CFG* subGraph; std::set<int> foundNodes; while (true) { Vertex begin = boost::add_vertex(*subGraphVector[currSubGraph]); GraphSubGraphMap[currSubGraph][intvertmap[orderOfNodes[minDepth]]] = intvertmap[begin]; SubGraphGraphMap[currSubGraph][intvertmap[begin]] = intvertmap[orderOfNodes[minDepth]]; for (int i = minDepth; i <= maxDepth; i++) { Vertex v = GraphSubGraphMap[currSubGraph][intvertmap[orderOfNodes[i]]]; std::vector<int> outEdges = getOutEdges(orderOfNodes[i], g); for (unsigned int j = 0; j < outEdges.size(); j++) { Vertex u; if (foundNodes.find(getTarget(outEdges[j], g)) == foundNodes.end()) { u = GraphSubGraphMap[currSubGraph][intvertmap[getTarget(outEdges[j], g)]]; } else { u = boost::add_vertex(*subGraphVector[currSubGraph]); foundNodes.insert(getTarget(outEdges[j], g)); SubGraphGraphMap[currSubGraph][u] = intvertmap[getTarget(outEdges[j], g)]; GraphSubGraphMap[currSubGraph][intvertmap[getTarget(outEdges[j], g)]] = u; } Edge edge; bool ok; boost::tie(edge, ok) = boost::add_edge(v,u,*subGraphVector[currSubGraph]); } } minDepth = maxDepth; if ((unsigned int) minDepth == orderOfNodes.size()-1) { break; } maxDepth += depthDifferential; if ((unsigned int) maxDepth > orderOfNodes.size()-1) { maxDepth = orderOfNodes.size()-1; } CFG* newSubGraph; subGraphVector.push_back(newSubGraph); currSubGraph++; } return; } /* These should NOT be used by the user. They are simply for writing interesting information on the DOT graphs of the CFG */ template<class CFG> void SgGraphTraversal<CFG>:: printCFGNodeGeneric(int &cf, std::string prop, std::ofstream& o) { std::string nodeColor = "black"; o << cf << " [label=\"" << " num:" << cf << " prop: " << prop << "\", color=\"" << nodeColor << "\", style=\"" << "solid" << "\"];\n"; } template<class CFG> void SgGraphTraversal<CFG>:: printCFGNode(int& cf, std::ofstream& o) { #ifdef FULLDEBUG int pts = ptsNum[cf]; std::string nodeColor = "black"; o << cf << " [label=\"" << " pts: " << pts << "\", color=\"" << nodeColor << "\", style=\"" << "solid" << "\"];\n"; #endif #ifndef FULLDEBUG std::string nodeColor = "black"; o << cf << " [label=\"" << " num:" << cf << "\", color=\"" << nodeColor << "\", style=\"" << "solid" << "\"];\n"; #endif } template<class CFG> void SgGraphTraversal<CFG>:: printCFGEdge(int& cf, CFG*& cfg, std::ofstream& o) { int src = getSource(cf, cfg); int tar = getTarget(cf, cfg); o << src << " -> " << tar << " [label=\"" << src << " " << tar << "\", style=\"" << "solid" << "\"];\n"; } template<class CFG> void SgGraphTraversal<CFG>:: printHotness(CFG*& g) { const CFG* gc = g; int currhot = 0; std::ofstream mf; std::stringstream filenam; filenam << "hotness" << currhot << ".dot"; currhot++; std::string fn = filenam.str(); mf.open(fn.c_str()); mf << "digraph defaultName { \n"; vertex_iterator v, vend; edge_iterator e, eend; for (boost::tie(v, vend) = vertices(*gc); v != vend; ++v) { printCFGNode(vertintmap[*v], mf); } for (tie(e, eend) = edges(*gc); e != eend; ++e) { printCFGEdge(edgeintmap[*e], g, mf); } mf.close(); } template<class CFG> void SgGraphTraversal<CFG>:: printPathDot(CFG*& g) { const CFG* gc = g; std::ofstream mf; std::stringstream filenam; filenam << "pathnums.dot"; std::string fn = filenam.str(); mf.open(fn.c_str()); mf << "digraph defaultName { \n"; vertex_iterator v, vend; edge_iterator e, eend; for (tie(v, vend) = vertices(*gc); v != vend; ++v) { if (nodeStrings.find(vertintmap[*v]) != nodeStrings.end()) { int nn = vertintmap[*v]; printCFGNodeGeneric(vertintmap[*v], nodeStrings[nn], mf); } else { printCFGNodeGeneric(vertintmap[*v], "noprop", mf); } } for (tie(e, eend) = edges(*gc); e != eend; ++e) { printCFGEdge(edgeintmap[*e], g, mf); } mf.close(); } /** This is the function that preps the graph for traversal SgGraphTraversal::prepareGraph Input: @param[g] CFG*& g, CFG calculated previously */ template<class CFG> void SgGraphTraversal<CFG>:: prepareGraph(CFG*& g) { nextNode = 1; nextEdge = 1; findClosuresAndMarkersAndEnumerate(g); } /** DEPRECATED This is the function that preps the graph for traversal, currently this one isn't used but for many traversals on one visitor may necessitate SgGraphTraversal::firstPrepGraph Input: @param[g] CFG*& g, CFG calculated previously */ template<class CFG> void SgGraphTraversal<CFG>:: firstPrepGraph(CFG*& g) { nextNode = 1; nextEdge = 1; findClosuresAndMarkersAndEnumerate(g); } /** This calculates nodes with more than one in edge or more than one out edge SgGraphTraversal::findClosuresAndMarkers Input: @param[g] CFG*& g, CFG calculated previously */ template<class CFG> void SgGraphTraversal<CFG>:: findClosuresAndMarkersAndEnumerate(CFG*& g) { edge_iterator e, eend; for (tie(e, eend) = edges(*g); e != eend; ++e) { intedgemap[nextEdge] = *e; edgeintmap[*e] = nextEdge; nextEdge++; } vertex_iterator v1, vend1; for (boost::tie(v1, vend1) = vertices(*g); v1 != vend1; ++v1) { vertintmap[*v1] = nextNode; intvertmap[nextNode] = *v1; nextNode++; } vertex_iterator v, vend; for (boost::tie(v, vend) = vertices(*g); v != vend; ++v) { std::vector<int> outs = getOutEdges(vertintmap[*v], g); std::vector<int> ins = getInEdges(vertintmap[*v], g); if (outs.size() > 1) { markers.push_back(vertintmap[*v]); markerIndex[vertintmap[*v]] = markers.size()-1; for (unsigned int i = 0; i < outs.size(); i++) { pathsAtMarkers[vertintmap[*v]].push_back(getTarget(outs[i], g)); } } if (ins.size() > 1) { closures.push_back(vertintmap[*v]); } if (outs.size() == 0) { sinks.push_back(vertintmap[*v]); } if (ins.size() == 0) { sources.push_back(vertintmap[*v]); } } return; } /** DEPRECATED Currently unused but will be necessary for parallelization in progress SgGraphTraversal::computeOrder @param[g] CFG* cfg in question @parm[begin] const int, integer representation of source node */ template<class CFG> void SgGraphTraversal<CFG>:: computeOrder(CFG*& g, const int& begin) { std::vector<int> currentNodes; std::vector<int> newCurrentNodes; currentNodes.push_back(begin); std::map<int, int> reverseCurrents; orderOfNodes.push_back(begin); std::set<int> heldBackNodes; while (currentNodes.size() != 0) { for (unsigned int j = 0; j < currentNodes.size(); j++) { std::vector<int> inEdges = getInEdges(currentNodes[j], g); if (inEdges.size() > 1) { if (reverseCurrents.find(currentNodes[j]) == reverseCurrents.end()) { reverseCurrents[currentNodes[j]] = 0; } if ((unsigned int) reverseCurrents[currentNodes[j]] == inEdges.size() - 1) { heldBackNodes.erase(currentNodes[j]); reverseCurrents[currentNodes[j]]++; std::vector<int> outEdges = getOutEdges(currentNodes[j], g); for (unsigned int k = 0; k < outEdges.size(); k++) { newCurrentNodes.push_back(getTarget(outEdges[k], g)); orderOfNodes.push_back(getTarget(outEdges[k], g)); } } else if (reverseCurrents[currentNodes[j]] < reverseCurrents.size()) { reverseCurrents[currentNodes[j]]++; if (heldBackNodes.find(currentNodes[j]) == heldBackNodes.end()) { heldBackNodes.insert(currentNodes[j]); } } } else { std::vector<int> outEdges = getOutEdges(currentNodes[j], g); for (unsigned int k = 0; k < outEdges.size(); k++) { newCurrentNodes.push_back(getTarget(outEdges[k], g)); orderOfNodes.push_back(getTarget(outEdges[k], g)); } } } if (newCurrentNodes.size() == 0 && heldBackNodes.size() != 0) { for (std::set<int>::iterator q = heldBackNodes.begin(); q != heldBackNodes.end(); q++) { int qint = *q; std::vector<int> heldBackOutEdges = getOutEdges(qint, g); for (unsigned int p = 0; p < heldBackOutEdges.size(); p++) { newCurrentNodes.push_back(getTarget(heldBackOutEdges[p], g)); } } heldBackNodes.clear(); } currentNodes = newCurrentNodes; newCurrentNodes.clear(); } return; } /** Converts the path calculated by this algorithm to Vertices so users can access data SgGraphTraversal::getVertexPath @param[path] integer representation of path @param[g] CFG*, cfg in question @param[vertexPath] for some reason this can't be a return value so it is changed via pass by reference */ template<class CFG> void SgGraphTraversal<CFG>:: getVertexPath(std::vector<int> path, CFG*& g, std::vector<Vertex>& vertexPath) { for (unsigned int i = 0; i < path.size(); i++) { vertexPath.push_back(intvertmap[path[i]]); } } /** DEPRECATED Currently unused, may eventually be modified for optimal storage purposes SgGraphTraversal::storeCompact @param[compactPath] path to be compactified */ template<class CFG> void SgGraphTraversal<CFG>:: storeCompact(std::vector<int> compactPath) { return; }
common.h
#ifndef __COMMOM_H__ #define __COMMOM_H__ #include <stdint.h> #include <string.h> #include "gap_common.h" #include "gap_cluster.h" #include "gap_dmamchan.h" performance_t perf; #define l2malloc malloc #define l2free free #define l1malloc L1_Malloc #define l1free L1_Free #define ANSI_COLOR_RED "\x1b[1m\x1b[31m" #define ANSI_COLOR_GREEN "\x1b[1m\x1b[32m" #define ANSI_COLOR_RESET "\x1b[0m" #define SHARED_ICACHE (0x0U) #define PRIVATE_ICACHE (0x1U) #define MULTIPORT_ICACHE (0x2U) #define L2_MEM_BASE_ADDR (0x1C000000) volatile int PowerBenchActive __attribute__( ( aligned ( 8 ) ) ); int cycles[3] = {0,0,0}; int reg[3][20]; static inline uint32_t checksum_vector(void *v, uint32_t nbElements, uint32_t elementSize) { uint32_t cnt = 0ULL; uint32_t i; for(i=0; i<nbElements; i++) cnt += ((uint8_t *) v) [i*elementSize]; return cnt; } static inline void memcpy_async(void *dst, void *src, uint32_t size, dma_req_t *req) { if (L2_MEM_BASE_ADDR <= (uint32_t) dst) { // Copy from shared L1 to L2 DMAMCHAN_Memcpy_1D((uint32_t)dst, (uint32_t)src, size, GAP_DMA_TCDM2L2, req); } else { // copy from L2 to shared L1 DMAMCHAN_Memcpy_1D((uint32_t)src, (uint32_t)dst, size, GAP_DMA_L22TCDM, req); } } static inline void memcpy_async2D(void *dst, void *src, uint32_t size, uint32_t stride, uint32_t count, dma_req_t *req) { if (L2_MEM_BASE_ADDR <= (uint32_t) dst) { // Copy from shared L1 to L2 DMAMCHAN_Memcpy_2D((uint32_t) dst, (uint32_t)src, size, stride, count, GAP_DMA_TCDM2L2, req); } else { // copy from L2 to shared L1 DMAMCHAN_Memcpy_2D((uint32_t) src, (uint32_t) dst, size, stride, count, GAP_DMA_L22TCDM, req); } } static inline void memcpy_wait(dma_req_t *req) { DMAMCHAN_WaitRequestEnd(req); } static inline void perf_save_reg(int iter) { for (int i = 4; i < 20; i++) reg[iter][i] = *(volatile int*) (CLUSTER_SCBC_BASE + (i) * 4); } static inline void perf_reg_print(int iter) { printf("Time: %d\n", cycles[iter]); for(int i = 4; i < 8; ++i) printf("%d,", (int)reg[iter][i]); printf(" PRI\n"); for(int i = 8; i < 12; ++i) printf("%d,", (int)reg[iter][i]); printf(" SP\n"); for(int i = 12; i < 16; ++i) printf("%d,", (int)reg[iter][i]); printf(" HIER1\n"); for(int i = 16; i < 20; ++i) printf("%d,", (int)reg[iter][i]); printf(" MP-HIER2\n"); } static inline void profile_start(int iter) { // 0x10201410, Clear counter CLUSTER_SCBC->ICACHE_CNTS_CLEAR = 0xC1A0FFFFU; // 0x10201414, Enable counter Start CLUSTER_SCBC->ICACHE_CNTS_ENABLE = 0xCA550FFFU; #pragma omp master { CLUSTER_TIMERL->COMPARE = 0xFFFFFFFF; /* Reset Enable Counter */ CLUSTER_TIMERL->CTRL = (1 << TIMERL_CFG_REG_LOW_ENABLE_Pos) | (1 << TIMERL_CFG_REG_LOW_RESET_Pos) | (1 << TIMERL_CFG_REG_LOW_IRQE_Pos) | (0 << TIMERL_CFG_REG_LOW_IEM_Pos) | (1 << TIMERL_CFG_REG_LOW_CMP_CLR_Pos) | (0 << TIMERL_CFG_REG_LOW_ONE_SHOT_Pos) | (0 << TIMERL_CFG_REG_LOW_PRESCALERE_Pos) | (0 << TIMERL_CFG_REG_LOW_PRESCALER_Pos) | (0 << TIMERL_CFG_REG_LOW_CLKS_Pos) | (0 << TIMERL_CFG_REG_LOW_64BIT_Pos); /* Reset value */ CLUSTER_TIMERL->VALUE = 0; if(iter == 2) { PowerBenchActive = 0XABBAABBA; } } } static inline void profile_stop(int iter) { #pragma omp master { if(iter == 2) { PowerBenchActive = 0xABBACACA; } cycles[iter] = CLUSTER_TIMERL->VALUE; CLUSTER_TIMERL->CTRL = 0; perf_save_reg(iter); } } static inline void profile_show() { perf_reg_print(0); perf_reg_print(1); perf_reg_print(2); } #endif
data_env_scalar_map.c
#include <stdio.h> #include <omp.h> int main(int argc, char *argv[], char **envp) { int numdev = omp_get_num_devices(); printf ("Machine has %d GPU device%s\n", numdev, (numdev==1 ? "" : "s") ); int from = 13; int tofrom = 17; printf("ON HOST before: from = %d, tofrom = %d\n", from, tofrom); #pragma omp target data map(from:from) map(tofrom:tofrom) #pragma omp target { printf("ON GPU: enter from = %d, tofrom = %d\n", from, tofrom); from = 5; tofrom = 5; printf("ON GPU: exit from = %d, tofrom = %d\n", from, tofrom); } // This should print ON HOST after: from = 5, tofrom = 5 printf("ON HOST after: from = %d, tofrom = %d\n", from, tofrom); return 0; }
Parallelizer.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARALLELIZER_H #define EIGEN_PARALLELIZER_H namespace Eigen { namespace internal { /** \internal */ inline void manage_multi_threading(Action action, int* v) { static EIGEN_UNUSED int m_maxThreads = -1; if (action == SetAction) { eigen_internal_assert(v != 0); m_maxThreads = *v; } else if (action == GetAction) { eigen_internal_assert(v != 0); #ifdef EIGEN_HAS_OPENMP if (m_maxThreads>0) *v = m_maxThreads; else *v = omp_get_max_threads(); #else *v = 1; #endif } else { eigen_internal_assert(false); } } } /** Must be call first when calling Eigen from multiple threads */ inline void initParallel() { int nbt; internal::manage_multi_threading(GetAction, &nbt); std::ptrdiff_t l1, l2, l3; internal::manage_caching_sizes(GetAction, &l1, &l2, &l3); } /** \returns the max number of threads reserved for Eigen * \sa setNbThreads */ inline int nbThreads() { int ret; internal::manage_multi_threading(GetAction, &ret); return ret; } /** Sets the max number of threads reserved for Eigen * \sa nbThreads */ inline void setNbThreads(int v) { internal::manage_multi_threading(SetAction, &v); } namespace internal { template<typename Index> struct GemmParallelInfo { GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {} Index volatile sync; int volatile users; Index lhs_start; Index lhs_length; }; template<bool Condition, typename Functor, typename Index> void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose) { // TODO when EIGEN_USE_BLAS is defined, // we should still enable OMP for other scalar types #if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS) // FIXME the transpose variable is only needed to properly split // the matrix product when multithreading is enabled. This is a temporary // fix to support row-major destination matrices. This whole // parallelizer mechanism has to be redisigned anyway. EIGEN_UNUSED_VARIABLE(depth); EIGEN_UNUSED_VARIABLE(transpose); func(0, rows, 0, cols); #else // Dynamically check whether we should enable or disable OpenMP. // The conditions are: // - the max number of threads we can create is greater than 1 // - we are not already in a parallel code // - the sizes are large enough // compute the maximal number of threads from the size of the product: // This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once. Index size = transpose ? rows : cols; Index pb_max_threads = std::max<Index>(1, size / Functor::Traits::nr); // compute the maximal number of threads from the total amount of work: double work = static_cast<double>(rows) * static_cast<double>(cols) * static_cast<double>(depth); double kMinTaskSize = 50000; // FIXME improve this heuristic. pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize)); // compute the number of threads we are going to use Index threads = std::min<Index>(nbThreads(), pb_max_threads); // if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session, // then abort multi-threading // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp? if ((!Condition) || (threads == 1) || (omp_get_num_threads()>1)) return func(0, rows, 0, cols); Eigen::initParallel(); func.initParallelSession(threads); if (transpose) std::swap(rows, cols); ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>, info, threads, 0); #pragma omp parallel num_threads(threads) { Index i = omp_get_thread_num(); // Note that the actual number of threads might be lower than the number of request ones. Index actual_threads = omp_get_num_threads(); Index blockCols = (cols / actual_threads) & ~Index(0x3); Index blockRows = (rows / actual_threads); blockRows = (blockRows / Functor::Traits::mr)*Functor::Traits::mr; Index r0 = i*blockRows; Index actualBlockRows = (i + 1 == actual_threads) ? rows - r0 : blockRows; Index c0 = i*blockCols; Index actualBlockCols = (i + 1 == actual_threads) ? cols - c0 : blockCols; info[i].lhs_start = r0; info[i].lhs_length = actualBlockRows; if (transpose) func(c0, actualBlockCols, 0, rows, info); else func(0, rows, c0, actualBlockCols, info); } #endif } } // end namespace internal } // end namespace Eigen #endif // EIGEN_PARALLELIZER_H
AnisotropicTensorUtils.h
/* * Copyright 2016 University of Basel, Medical Image Analysis Center * * Author: Christoph Jud (christoph.jud@unibas.ch) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <iostream> #include <itkGradientImageFilter.h> #include "CommonTypes.h" #include "itkUtils.h" #include "vnl/vnl_cross.h" #include "vnl/vnl_inverse.h" #include "vnl/algo/vnl_symmetric_eigensystem.h" #include <itkDiscreteGaussianDerivativeImageFilter.h> #include <itkSymmetricEigenDecompositionImageFilter.h> // alignment_direction: x=0, y=1, z=2, none=negative itk::SymmetricEigenDecompositionImageFilter<TensorImageType,TensorImageType>::OutputImageType::Pointer ComputeAnisotropicTensor(ImageType::Pointer input_mask, double sigma, double weight, int alignment_direction, double& maximum_tile, bool anisotropic_filtering=true, bool write_to_tmp=true, std::string temp_directory="/tmp/"){ // preprocess mask if(anisotropic_filtering) input_mask = AnisotropicDiffusion<ImageType>(input_mask, 0.0625, 10.0, 2.0, 50); // setup alignment vector md double md[SpaceDimensions]; for(unsigned d=0; d<SpaceDimensions; d++){ md[d] = 0; } if(alignment_direction >= 0 && alignment_direction <SpaceDimensions){ md[alignment_direction] = 1; } // Multiply by gradient weight to scale gradient magnitude input_mask = MultiplyConstant<ImageType>(input_mask,weight); /** Compute structure tensor */ // K_rho ( nabla u_sigma outer_product nabla u_sigma typedef itk::SymmetricSecondRankTensor< ImageType::PixelType, SpaceDimensions> SymmetricTensorType; typedef itk::Image<SymmetricTensorType::EigenVectorsMatrixType, SpaceDimensions> TensorImageType; ImageType::Pointer smoothed = GaussianSmoothing<ImageType>(input_mask, sigma); typedef itk::GradientImageFilter<ImageType, ImageType::PointValueType, ImageType::PointValueType > GradientFilterType; GradientFilterType::Pointer gradientFilter = GradientFilterType::New(); gradientFilter->SetInput( smoothed ); gradientFilter->Update(); if(write_to_tmp) WriteImage<GradientFilterType::OutputImageType>(gradientFilter->GetOutput(), temp_directory + "/gradient_image.vtk"); TensorImageType::Pointer struct_tensor = TensorImageType::New(); struct_tensor->CopyInformation( gradientFilter->GetOutput() ); struct_tensor->SetRequestedRegion( gradientFilter->GetOutput()->GetRequestedRegion() ); struct_tensor->SetBufferedRegion( gradientFilter->GetOutput()->GetBufferedRegion() ); struct_tensor->Allocate(); typedef TensorImageType::PixelType EigenSystemMatrixType; typedef vnl_vector<EigenSystemMatrixType::ValueType> EigenVectorType; itk::ImageRegionConstIterator<GradientFilterType::OutputImageType> iter_gradient(gradientFilter->GetOutput(), gradientFilter->GetOutput()->GetLargestPossibleRegion()); itk::ImageRegionIterator<TensorImageType> iter_tensor(struct_tensor, struct_tensor->GetLargestPossibleRegion()); while(!iter_gradient.IsAtEnd() && !iter_tensor.IsAtEnd()){ auto M(outer_product(iter_gradient.Get().GetVnlVector(),iter_gradient.Get().GetVnlVector())); EigenSystemMatrixType T; for(unsigned i=0; i<SpaceDimensions; i++){ for(unsigned j=0; j<SpaceDimensions; j++){ T(i,j) = M(i,j); } } iter_tensor.Set(T); ++iter_gradient; ++iter_tensor; } if(write_to_tmp) WriteImage<TensorImageType>(struct_tensor, temp_directory + "/struct_tensor.vtk"); // filter tensor image elemet-wise TensorImageType::Pointer smoothed_tensor = TensorImageType::New(); smoothed_tensor->CopyInformation( gradientFilter->GetOutput() ); smoothed_tensor->SetRequestedRegion( gradientFilter->GetOutput()->GetRequestedRegion() ); smoothed_tensor->SetBufferedRegion( gradientFilter->GetOutput()->GetBufferedRegion() ); smoothed_tensor->Allocate(); // each matrix index #pragma omp parallel for for(unsigned i=0; i<SpaceDimensions; i++){ for(unsigned j=0; j<SpaceDimensions; j++){ ImageType::Pointer component_image = ImageType::New(); component_image->CopyInformation( gradientFilter->GetOutput() ); component_image->SetRequestedRegion( gradientFilter->GetOutput()->GetRequestedRegion() ); component_image->SetBufferedRegion( gradientFilter->GetOutput()->GetBufferedRegion() ); component_image->Allocate(); // fill up component image with structure tensor index i,j { itk::ImageRegionConstIterator<TensorImageType> iter_tensor(struct_tensor, struct_tensor->GetLargestPossibleRegion()); itk::ImageRegionIterator<ImageType> iter_component(component_image, component_image->GetLargestPossibleRegion()); while(!iter_tensor.IsAtEnd() && !iter_component.IsAtEnd()){ iter_component.Set(iter_tensor.Get()(i,j)); ++iter_tensor; ++iter_component; } } component_image = GaussianSmoothing<ImageType>(component_image, sigma/4.0); // fill up structur tensor element i,j { itk::ImageRegionIterator<TensorImageType> iter_tensor(smoothed_tensor, smoothed_tensor->GetLargestPossibleRegion()); itk::ImageRegionConstIterator<ImageType> iter_component(component_image, component_image->GetLargestPossibleRegion()); while(!iter_tensor.IsAtEnd() && !iter_component.IsAtEnd()){ auto M = iter_tensor.Get(); M(i,j) = iter_component.Get(); iter_tensor.Set(M); ++iter_tensor; ++iter_component; } } } } if(write_to_tmp) WriteImage<TensorImageType>(smoothed_tensor, temp_directory + "/smoothed_struct_tensor.vtk"); typedef itk::SymmetricEigenDecompositionImageFilter<TensorImageType,TensorImageType> SymmetricEigenDecompositionImageFilterType; SymmetricEigenDecompositionImageFilterType::Pointer symmetricEigenAnalysisFilter = SymmetricEigenDecompositionImageFilterType::New(); symmetricEigenAnalysisFilter->SetInput(smoothed_tensor); symmetricEigenAnalysisFilter->SetDimension(SpaceDimensions); symmetricEigenAnalysisFilter->OrderEigenValuesBy(SymmetricEigenDecompositionImageFilterType::EigenValueOrderType::OrderByMagnitude); symmetricEigenAnalysisFilter->Update(); smoothed_tensor = symmetricEigenAnalysisFilter->GetOutput(); if(write_to_tmp) WriteImage<SymmetricEigenDecompositionImageFilterType::OutputImageType>(smoothed_tensor, temp_directory + "/main_structure.vtk"); // lets define the Y direction as the main direction of motion and damp first component typedef TensorImageType::PixelType EigenSystemMatrixType; typedef vnl_vector<EigenSystemMatrixType::ValueType> EigenVectorType; // main direction is weighted proportional to the cross product with the Y direction // the other directions are scaled to unit magnitude { double max = std::numeric_limits<double>::lowest(); itk::ImageRegionConstIterator<GradientFilterType::OutputImageType> iter_gradient(gradientFilter->GetOutput(), gradientFilter->GetOutput()->GetLargestPossibleRegion()); itk::ImageRegionIterator<TensorImageType> iter_tensor(smoothed_tensor, smoothed_tensor->GetLargestPossibleRegion()); while(!iter_tensor.IsAtEnd()){ EigenSystemMatrixType M = iter_tensor.Get(); EigenVectorType yv = EigenVectorType(SpaceDimensions,0); for(unsigned d=0; d<SpaceDimensions; d++) yv[d]=md[d]; #if SpaceDimensions==2 ScalarType magnitude1 = M.GetVnlMatrix().get_row(1).two_norm(); // largest eigenvector ScalarType magnitude2 = M.GetVnlMatrix().get_row(0).two_norm(); // assumption: if first component is greater than 1e-8, the second one is not zero as well if(magnitude1>1e-8 && magnitude2>1e-8){ // Get eigenvector with largest eigenvalue, and normalize it to unit magnitude EigenVectorType normalized = M.GetVnlMatrix().get_row(1)/magnitude1; if(alignment_direction >= 0 && alignment_direction <SpaceDimensions){ // measure alignement to yv vector double factor = std::abs(vnl_cross_2d(normalized, yv)) + 1e-10; //factor /= direction_weight; factor = factor*factor / std::sqrt(SpaceDimensions); // weight the first direction with that measure for(unsigned d=0; d<SpaceDimensions; d++){ M(1,d) = M(1,d)*factor; } } // because we want exactly the opposite (get small magnitudes for large vectors) double reverse_scale = M.GetVnlMatrix().get_row(1).two_norm(); double scaler = std::pow(reverse_scale,1.5); if(max<reverse_scale) max = reverse_scale; for(unsigned d=0; d<SpaceDimensions; d++){ //M(1,d) = normalized[d]/(1+reverse_scale); //M(1,d) = normalized[d]/(1+std::log(1+reverse_scale)); //M(1,d) = normalized[d]/(std::exp(c2*std::abs(reverse_scale))); M(1,d) = normalized[d]/(1+scaler); } // normalize other dimensions to 1 for(unsigned d=0; d<SpaceDimensions; d++){ //M(0,d) = M(0,d)/magnitude2; M(0,d) = M(0,d)/magnitude2 * (1+std::pow(reverse_scale,1.5)/(1+std::pow(reverse_scale,1.5))); } M = M.GetVnlMatrix().flipud().transpose(); } else{ // just orthogonal unit vectors M(0,0) = 1; M(0,1) = 0; M(1,0) = 0; M(1,1) = 1; } #elif SpaceDimensions==3 ScalarType magnitude1 = M.GetVnlMatrix().get_row(2).two_norm(); // largest eigenvector ScalarType magnitude2 = M.GetVnlMatrix().get_row(1).two_norm(); ScalarType magnitude3 = M.GetVnlMatrix().get_row(0).two_norm(); // assumption: if first component is greater than 1e-8, the second one is not zero as well if(magnitude1>1e-8 && magnitude2>1e-8 && magnitude3>1e-8){ // Get eigenvector with largest eigenvalue, and normalize it to unit magnitude EigenVectorType normalized = M.GetVnlMatrix().get_row(2)/magnitude1; if(alignment_direction >= 0 && alignment_direction <SpaceDimensions){ // measure alignement to yv vector ScalarType factor = std::abs(vnl_cross_3d(normalized, yv).two_norm()) + 1e-10; //factor /= direction_weight; factor = factor*factor / std::sqrt(SpaceDimensions); // weight the first direction with that measure for(unsigned d=0; d<SpaceDimensions; d++){ M(2,d) = M(2,d)*factor; } } // because we want exactly the opposite (get small magnitudes for large vectors) ScalarType reverse_scale = M.GetVnlMatrix().get_row(2).two_norm(); ScalarType scaler = std::pow(reverse_scale,1.5); if(max<reverse_scale) max = reverse_scale; for(unsigned d=0; d<SpaceDimensions; d++){ //M(2,d) = normalized[d]/(1+reverse_scale); M(2,d) = normalized[d]/(1+scaler); } // normalize other dimensions to 1 for(unsigned d=0; d<SpaceDimensions; d++){ M(1,d) = M(1,d)/magnitude2 * (1+std::pow(reverse_scale,1.5)/(1+std::pow(reverse_scale,1.5))); M(0,d) = M(0,d)/magnitude3 * (1+std::pow(reverse_scale,1.5)/(1+std::pow(reverse_scale,1.5))); } M = M.GetVnlMatrix().flipud().transpose(); } else{ // just orthogonal unit vectors M(0,0) = 1; M(0,1) = 0; M(0,2) = 0; M(1,0) = 0; M(1,1) = 1; M(1,2) = 0; M(2,0) = 0; M(2,1) = 0; M(2,2) = 1; } #endif iter_tensor.Set(M); ++iter_gradient; ++iter_tensor; } maximum_tile = max; } if(write_to_tmp) WriteImage<SymmetricEigenDecompositionImageFilterType::OutputImageType>(smoothed_tensor, temp_directory + "/tensor_directions.vtk"); TensorImageType::Pointer covariance_tensor = TensorImageType::New(); covariance_tensor->CopyInformation( gradientFilter->GetOutput() ); covariance_tensor->SetRequestedRegion( gradientFilter->GetOutput()->GetRequestedRegion() ); covariance_tensor->SetBufferedRegion( gradientFilter->GetOutput()->GetBufferedRegion() ); covariance_tensor->Allocate(); { itk::ImageRegionConstIterator<TensorImageType> iter_direction(smoothed_tensor, smoothed_tensor->GetLargestPossibleRegion()); itk::ImageRegionIterator<TensorImageType> iter_covariance(covariance_tensor, covariance_tensor->GetLargestPossibleRegion()); while(!iter_direction.IsAtEnd() && !iter_covariance.IsAtEnd()){ EigenSystemMatrixType M = iter_direction.Get(); // calculate covariance matrix given eigenvectors double s = M.GetVnlMatrix().get_column(0).two_norm(); if(s<1e-8){ M.Fill(0); M.SetIdentity(); } else{ EigenSystemMatrixType::InternalMatrixType S(SpaceDimensions,SpaceDimensions); S.fill(0); S.set_identity(); EigenSystemMatrixType::InternalMatrixType U(SpaceDimensions,SpaceDimensions); U.fill(0); for(unsigned d=0; d<SpaceDimensions; d++){ ScalarType s = M.GetVnlMatrix().get_column(d).two_norm(); EigenVectorType n = M.GetVnlMatrix().get_column(d)/s; S(d,d) = std::sqrt(s); U.set_column(d,n); } M = U*S*S*U.transpose(); } if(vnl_determinant(M.GetVnlMatrix())<=0){ std::cout << "Matrix is not positive definite" << std::endl; std::cout << M << std::endl; return covariance_tensor; } iter_covariance.Set(M); ++iter_direction; ++iter_covariance; } } return covariance_tensor; } // alignment_direction: x=0, y=1, z=2, none=negative itk::SymmetricEigenDecompositionImageFilter<TensorImageType,TensorImageType>::OutputImageType::Pointer ComputeAnisotropicTensor(ImageType::Pointer input_mask, double sigma, double weight, int alignment_direction, bool anisotropic_filtering=true, bool write_to_tmp=true, std::string temp_directory="/tmp/"){ double maximum_tile = -1; return ComputeAnisotropicTensor(input_mask, sigma, weight, alignment_direction, maximum_tile, anisotropic_filtering, write_to_tmp); }
bfsdfs.h
namespace TSnap { ///////////////////////////////////////////////// // BFS and DFS /// Returns a directed Breadth-First-Search tree rooted at StartNId. ##GetBfsTree1 template <class PGraph> PNGraph GetBfsTree(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn); /// My BFS Function template <class PGraph> PNGraph MyGetBfsTree(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn); /// Returns the BFS tree size (number of nodes) and depth (number of levels) by following in-links (parameter FollowIn = true) and/or out-links (parameter FollowOut = true) of node StartNId. template <class PGraph> int GetSubTreeSz(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn, int& TreeSzX, int& TreeDepthX); /// Finds IDs of all nodes that are at distance Hop from node StartNId. ##GetSubTreeSz template <class PGraph> int GetNodesAtHop(const PGraph& Graph, const int& StartNId, const int& Hop, TIntV& NIdV, const bool& IsDir=false); /// Returns the number of nodes at each hop distance from the starting node StartNId. ##GetNodesAtHops template <class PGraph> int GetNodesAtHops(const PGraph& Graph, const int& StartNId, TIntPrV& HopCntV, const bool& IsDir=false); /// Returns the BFS path from node SrcNId to node DstNId. //template <class PGraph> int GetBfsPath(const PGraph& Graph, const int& SrcNId, const int& DstNId, TIntV& PathNIdV, const bool& IsDir = false); ///////////////////////////////////////////////// // Shortest paths /// Returns the length of the shortest path from node SrcNId to node DstNId. ##GetShortPath1 template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, const int& DstNId, const bool& IsDir=false); /// Returns the length of the shortest path from node SrcNId to all other nodes in the network. ##GetShortPath2 template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, TIntH& NIdToDistH, const bool& IsDir=false, const int& MaxDist=TInt::Mx); ///////////////////////////////////////////////// // Diameter /// Returns the (approximation of the) Diameter (maximum shortest path length) of a graph (by performing BFS from NTestNodes random starting nodes). ##GetBfsFullDiam template <class PGraph> int GetBfsFullDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir=false); /// Returns the (approximation of the) Effective Diameter (90-th percentile of the distribution of shortest path lengths) of a graph (by performing BFS from NTestNodes random starting nodes). ##GetBfsEffDiam1 template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir=false); /// Returns the (approximation of the) Effective Diameter and the Diameter of a graph (by performing BFS from NTestNodes random starting nodes). ##GetBfsEffDiam2 template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiamX, int& FullDiamX); /// Returns the (approximation of the) Effective Diameter, the Diameter and the Average Shortest Path length in a graph (by performing BFS from NTestNodes random starting nodes). ##GetBfsEffDiam3 template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiamX, int& FullDiamX, double& AvgSPLX); /// Returns the (approximation of the) Effective Diameter, the Diameter and the Average Shortest Path length in a graph (by performing BFS from NTestNodes random starting nodes). ##GetBfsEffDiamAll template <class PGraph> double GetBfsEffDiamAll(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiamX, int& FullDiamX, double& AvgSPLX); /// Use the whole graph (all edges) to measure the shortest path lengths but only report the path lengths between nodes in the SubGraphNIdV. ##GetBfsEffDiam4 template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const TIntV& SubGraphNIdV, const bool& IsDir, double& EffDiamX, int& FullDiamX); // TODO: Implement in the future //template <class PGraph> int GetRangeDist(const PGraph& Graph, const int& SrcNId, const int& DstNId, const bool& IsDir=false); //template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, TIntH& NIdToDistH, const bool& IsDir=false, const int& MaxDist=1000); //template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, const TIntSet& TargetSet, const bool& IsDir, TIntV& PathNIdV); //template <class PGraph> int GetShortPath(TIntH& NIdPrnH, TCcQueue<int>& NIdQ, const PGraph& Graph, const int& SrcNId, const TIntSet& TargetSet, const bool& IsDir, TIntV& PathNIdV); //template <class PGraph> int GetMxShortDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir=false); //template <class PGraph> int GetMxShortDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir, int& MxDistNId); //template <class PGraph> int GetMxShortDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir, int& MxDistNId, TCcQueue<int>& NIdQ, TCcQueue<int>& DistQ, TIntSet& VisitedH); //template <class PGraph> int GetMxGreedyDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir=false); //template <class PGraph> int GetMxGreedyDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir, TCcQueue<int>& NIdQ, TCcQueue<int>& DistQ, TIntSet& VisitedH); //template <class PGraph> PNGraph GetShortPathsSubGraph(const PGraph& Graph, const TIntV& SubGraphNIdV); //template <class PGraph> PGraph GetWccPathsSubGraph(const PGraph& Graph, const TIntV& NIdV); //template <class PGraph> void GetSubTreeSz(const PGraph& Graph, const int& StartNId, const bool& FollowOutEdges, int& TreeSz, int& TreeDepth); } // namespace TSnap //#////////////////////////////////////////////// /// Breath-First-Search class. /// The class is meant for executing many BFSs over a fixed graph. This means that the class can keep the hash tables and queues initialized between different calls of the DoBfs() function. template<class PGraph> class TBreathFS { public: PGraph Graph; TSnapQueue<int> Queue; TInt StartNId; TIntH NIdDistH; public: TBreathFS(const PGraph& GraphPt, const bool& InitBigQ=true) : Graph(GraphPt), Queue(InitBigQ?Graph->GetNodes():1024), NIdDistH(InitBigQ?Graph->GetNodes():1024) { } /// Sets the graph to be used by the BFS to GraphPt and resets the data structures. void SetGraph(const PGraph& GraphPt); /// Performs BFS from node id StartNode for at maps MxDist steps by only following in-links (parameter FollowIn = true) and/or out-links (parameter FollowOut = true). int DoBfs(const int& StartNode, const bool& FollowOut, const bool& FollowIn, const int& TargetNId=-1, const int& MxDist=TInt::Mx); /// Same functionality as DoBfs with better performance. int DoBfsHybrid(const int& StartNode, const bool& FollowOut, const bool& FollowIn, const int& TargetNId=-1, const int& MxDist=TInt::Mx); /// Returns the number of nodes visited/reached by the BFS. int GetNVisited() const { return NIdDistH.Len(); } /// Returns the IDs of the nodes visited/reached by the BFS. void GetVisitedNIdV(TIntV& NIdV) const { NIdDistH.GetKeyV(NIdV); } /// Returns the shortst path distance between SrcNId and DistNId. /// Note you have to first call DoBFs(). SrcNId must be equal to StartNode, otherwise return value is -1. int GetHops(const int& SrcNId, const int& DstNId) const; /// Returns a random shortest path from SrcNId to DstNId. /// Note you have to first call DoBFs(). SrcNId must be equal to StartNode, otherwise return value is -1. int GetRndPath(const int& SrcNId, const int& DstNId, TIntV& PathNIdV) const; /* Private variables and functions for DoBfsHybrid */ private: int Stage; // 0, 2: top down, 1: bottom up static const unsigned int alpha = 100; static const unsigned int beta = 20; /* Private functions */ bool TopDownStep(TIntV &NIdDistV, TIntV *Frontier, TIntV *NextFrontier, int& MaxDist, const int& TargetNId, const bool& FollowOut, const bool& FollowIn); bool BottomUpStep(TIntV &NIdDistV, TIntV *Frontier, TIntV *NextFrontier, int& MaxDist, const int& TargetNId, const bool& FollowOut, const bool& FollowIn); }; template<class PGraph> void TBreathFS<PGraph>::SetGraph(const PGraph& GraphPt) { Graph=GraphPt; const int N=GraphPt->GetNodes(); if (Queue.Reserved() < N) { Queue.Gen(N); } if (NIdDistH.GetReservedKeyIds() < N) { NIdDistH.Gen(N); } } template<class PGraph> int TBreathFS<PGraph>::DoBfs(const int& StartNode, const bool& FollowOut, const bool& FollowIn, const int& TargetNId, const int& MxDist) { StartNId = StartNode; IAssert(Graph->IsNode(StartNId)); // const typename PGraph::TObj::TNodeI StartNodeI = Graph->GetNI(StartNode); // IAssertR(StartNodeI.GetOutDeg() > 0, TStr::Fmt("No neighbors from start node %d.", StartNode)); NIdDistH.Clr(false); NIdDistH.AddDat(StartNId, 0); Queue.Clr(false); Queue.Push(StartNId); int v, MaxDist = 0; while (!Queue.Empty()) { const int NId = Queue.Top(); Queue.Pop(); const int Dist = NIdDistH.GetDat(NId); if (Dist == MxDist) { break; } // max distance limit reached const typename PGraph::TObj::TNodeI NodeI = Graph->GetNI(NId); if (FollowOut) { // out-links for (v = 0; v < NodeI.GetOutDeg(); v++) { // out-links const int DstNId = NodeI.GetOutNId(v); if (!NIdDistH.IsKey(DstNId)) { NIdDistH.AddDat(DstNId, Dist + 1); MaxDist = TMath::Mx(MaxDist, Dist + 1); if (DstNId == TargetNId) { return MaxDist; } Queue.Push(DstNId); } } } if (FollowIn) { // in-links for (v = 0; v < NodeI.GetInDeg(); v++) { const int DstNId = NodeI.GetInNId(v); if (!NIdDistH.IsKey(DstNId)) { NIdDistH.AddDat(DstNId, Dist + 1); MaxDist = TMath::Mx(MaxDist, Dist + 1); if (DstNId == TargetNId) { return MaxDist; } Queue.Push(DstNId); } } } } return MaxDist; } template<class PGraph> int TBreathFS<PGraph>::DoBfsHybrid(const int& StartNode, const bool& FollowOut, const bool& FollowIn, const int& TargetNId, const int& MxDist) { StartNId = StartNode; IAssert(Graph->IsNode(StartNId)); if (TargetNId == StartNode) return 0; const typename PGraph::TObj::TNodeI StartNodeI = Graph->GetNI(StartNode); // Initialize vector TIntV NIdDistV(Graph->GetMxNId() + 1); for (int i = 0; i < NIdDistV.Len(); i++) { NIdDistV.SetVal(i, -1); } TIntV *Frontier = new TIntV(Graph->GetNodes(), 0); TIntV *NextFrontier = new TIntV(Graph->GetNodes(), 0); NIdDistV.SetVal(StartNId, 0); Frontier->Add(StartNId); Stage = 0; int MaxDist = -1; const unsigned int TotalNodes = Graph->GetNodes(); unsigned int UnvisitedNodes = Graph->GetNodes(); while (! Frontier->Empty()) { MaxDist += 1; NextFrontier->Clr(false); if (MaxDist == MxDist) { break; } // max distance limit reached UnvisitedNodes -= Frontier->Len(); if (Stage == 0 && UnvisitedNodes / Frontier->Len() < alpha) { Stage = 1; } else if (Stage == 1 && TotalNodes / Frontier->Len() > beta) { Stage = 2; } // Top down or bottom up depending on stage bool targetFound = false; if (Stage == 0 || Stage == 2) { targetFound = TopDownStep(NIdDistV, Frontier, NextFrontier, MaxDist, TargetNId, FollowOut, FollowIn); } else { targetFound = BottomUpStep(NIdDistV, Frontier, NextFrontier, MaxDist, TargetNId, FollowOut, FollowIn); } if (targetFound) { MaxDist = NIdDistV[TargetNId]; break; } // swap Frontier and NextFrontier TIntV *temp = Frontier; Frontier = NextFrontier; NextFrontier = temp; } delete Frontier; delete NextFrontier; // Transform vector to hash table NIdDistH.Clr(false); for (int NId = 0; NId < NIdDistV.Len(); NId++) { if (NIdDistV[NId] != -1) { NIdDistH.AddDat(NId, NIdDistV[NId]); } } return MaxDist; } template<class PGraph> bool TBreathFS<PGraph>::TopDownStep(TIntV &NIdDistV, TIntV *Frontier, TIntV *NextFrontier, int& MaxDist, const int& TargetNId, const bool& FollowOut, const bool& FollowIn) { for (TIntV::TIter it = Frontier->BegI(); it != Frontier->EndI(); ++it) { // loop over frontier const int NId = *it; const int Dist = NIdDistV[NId]; IAssert(Dist == MaxDist); // Must equal to MaxDist const typename PGraph::TObj::TNodeI NodeI = Graph->GetNI(NId); if (FollowOut) { for (int v = 0; v < NodeI.GetOutDeg(); v++) { const int NeighborNId = NodeI.GetOutNId(v); if (NIdDistV[NeighborNId] == -1) { NIdDistV.SetVal(NeighborNId, Dist+1); if (NeighborNId == TargetNId) return true; NextFrontier->Add(NeighborNId); } } } if (FollowIn) { for (int v = 0; v < NodeI.GetInDeg(); v++) { const int NeighborNId = NodeI.GetInNId(v); if (NIdDistV[NeighborNId] == -1) { NIdDistV.SetVal(NeighborNId, Dist+1); if (NeighborNId == TargetNId) return true; NextFrontier->Add(NeighborNId); } } } } return false; } template<class PGraph> bool TBreathFS<PGraph>::BottomUpStep(TIntV &NIdDistV, TIntV *Frontier, TIntV *NextFrontier, int& MaxDist, const int& TargetNId, const bool& FollowOut, const bool& FollowIn) { for (typename PGraph::TObj::TNodeI NodeI = Graph->BegNI(); NodeI < Graph->EndNI(); NodeI++) { const int NId = NodeI.GetId(); if (NIdDistV[NId] == -1) { if (FollowOut) { for (int v = 0; v < NodeI.GetInDeg(); v++) { const int ParentNId = NodeI.GetInNId(v); if (NIdDistV[ParentNId] == MaxDist) { NIdDistV[NId] = MaxDist + 1; if (NId == TargetNId) return true; NextFrontier->Add(NId); break; } } } if (FollowIn && NIdDistV[NId] == -1) { for (int v = 0; v < NodeI.GetOutDeg(); v++) { const int ParentNId = NodeI.GetOutNId(v); if (NIdDistV[ParentNId] == MaxDist) { NIdDistV[NId] = MaxDist + 1; if (NId == TargetNId) return true; NextFrontier->Add(NId); break; } } } } } return false; } template<class PGraph> int TBreathFS<PGraph>::GetHops(const int& SrcNId, const int& DstNId) const { TInt Dist; if (SrcNId!=StartNId) { return -1; } if (! NIdDistH.IsKeyGetDat(DstNId, Dist)) { return -1; } return Dist.Val; } template<class PGraph> int TBreathFS<PGraph>::GetRndPath(const int& SrcNId, const int& DstNId, TIntV& PathNIdV) const { PathNIdV.Clr(false); if (SrcNId!=StartNId || ! NIdDistH.IsKey(DstNId)) { return -1; } PathNIdV.Add(DstNId); TIntV CloserNIdV; int CurNId = DstNId; TInt CurDist, NextDist; while (CurNId != SrcNId) { typename PGraph::TObj::TNodeI NI = Graph->GetNI(CurNId); IAssert(NIdDistH.IsKeyGetDat(CurNId, CurDist)); CloserNIdV.Clr(false); for (int e = 0; e < NI.GetDeg(); e++) { const int Next = NI.GetNbrNId(e); if (NIdDistH.IsKeyGetDat(Next, NextDist)) { if (NextDist == CurDist-1) { CloserNIdV.Add(Next); } } } IAssert(! CloserNIdV.Empty()); CurNId = CloserNIdV[TInt::Rnd.GetUniDevInt(CloserNIdV.Len())]; PathNIdV.Add(CurNId); } PathNIdV.Reverse(); return PathNIdV.Len()-1; } ///////////////////////////////////////////////// // Implementation namespace TSnap { template <class PGraph> PNGraph GetBfsTree(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(StartNId, FollowOut, FollowIn, -1, TInt::Mx); PNGraph Tree = TNGraph::New(); BFS.NIdDistH.SortByDat(); for (int i = 0; i < BFS.NIdDistH.Len(); i++) { const int NId = BFS.NIdDistH.GetKey(i); const int Dist = BFS.NIdDistH[i]; typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); if (!Tree->IsNode(NId)) { Tree->AddNode(NId); } if (FollowOut) { for (int e = 0; e < NI.GetInDeg(); e++) { const int Prev = NI.GetInNId(e); if (Tree->IsNode(Prev) && BFS.NIdDistH.GetDat(Prev)==Dist-1) { Tree->AddEdge(Prev, NId); } } } if (FollowIn) { for (int e = 0; e < NI.GetOutDeg(); e++) { const int Prev = NI.GetOutNId(e); if (Tree->IsNode(Prev) && BFS.NIdDistH.GetDat(Prev)==Dist-1) { Tree->AddEdge(Prev, NId); } } } } return Tree; } template <class PGraph> PNGraph MyGetBfsTree(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(StartNId, FollowOut, FollowIn, -1, TInt::Mx); PNGraph Tree = TNGraph::New(); BFS.NIdDistH.SortByDat(); for (int i = 0; i < BFS.NIdDistH.Len(); i++) { const int NId = BFS.NIdDistH.GetKey(i); const int Dist = BFS.NIdDistH[i]; typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); if (!Tree->IsNode(NId)) { Tree->AddNode(NId); //printf("%d\n", NId); } if (FollowOut) { for (int e = 0; e < NI.GetInDeg(); e++) { const int Prev = NI.GetInNId(e); if (Tree->IsNode(Prev) && BFS.NIdDistH.GetDat(Prev) == Dist - 1) { //printf("%d -> %d\n", Prev, NId); Tree->AddEdge(Prev, NId); break; } } } if (FollowIn) { for (int e = 0; e < NI.GetOutDeg(); e++) { const int Prev = NI.GetOutNId(e); if (Tree->IsNode(Prev) && BFS.NIdDistH.GetDat(Prev) == Dist - 1) { Tree->AddEdge(Prev, NId); break; } } } } return Tree; } //template <class PGraph> //int GetBfsPath(const PGraph& Graph, const int& SrcNId, const int& DstNId, TIntV& PathNIdV, const bool& IsDir = false) { // PathNIdV.Clr(); // TBreathFS<PGraph> BFS(Graph); // return BFS.GetRndPath(SrcNId, DstNId, PathNIdV); // //} template <class PGraph> int GetSubTreeSz(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn, int& TreeSz, int& TreeDepth) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(StartNId, FollowOut, FollowIn, -1, TInt::Mx); TreeSz = BFS.NIdDistH.Len(); TreeDepth = 0; for (int i = 0; i < BFS.NIdDistH.Len(); i++) { TreeDepth = TMath::Mx(TreeDepth, BFS.NIdDistH[i].Val); } return TreeSz; } template <class PGraph> int GetNodesAtHop(const PGraph& Graph, const int& StartNId, const int& Hop, TIntV& NIdV, const bool& IsDir) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(StartNId, true, !IsDir, -1, Hop); NIdV.Clr(false); for (int i = 0; i < BFS.NIdDistH.Len(); i++) { if (BFS.NIdDistH[i] == Hop) { NIdV.Add(BFS.NIdDistH.GetKey(i)); } } return NIdV.Len(); } template <class PGraph> int GetNodesAtHops(const PGraph& Graph, const int& StartNId, TIntPrV& HopCntV, const bool& IsDir) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(StartNId, true, !IsDir, -1, TInt::Mx); TIntH HopCntH; for (int i = 0; i < BFS.NIdDistH.Len(); i++) { HopCntH.AddDat(BFS.NIdDistH[i]) += 1; } HopCntH.GetKeyDatPrV(HopCntV); HopCntV.Sort(); return HopCntV.Len(); } template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, TIntH& NIdToDistH, const bool& IsDir, const int& MaxDist) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(SrcNId, true, ! IsDir, -1, MaxDist); NIdToDistH.Clr(); NIdToDistH.Swap(BFS.NIdDistH); return NIdToDistH[NIdToDistH.Len()-1]; } template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, const int& DstNId, const bool& IsDir) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(SrcNId, true, ! IsDir, DstNId, TInt::Mx); return BFS.GetHops(SrcNId, DstNId); } template <class PGraph> int GetBfsFullDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir) { int FullDiam; double EffDiam; GetBfsEffDiam(Graph, NTestNodes, IsDir, EffDiam, FullDiam); return FullDiam; } template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir) { int FullDiam; double EffDiam; GetBfsEffDiam(Graph, NTestNodes, IsDir, EffDiam, FullDiam); return EffDiam; } template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiam, int& FullDiam) { double AvgDiam; EffDiam = -1; FullDiam = -1; return GetBfsEffDiam(Graph, NTestNodes, IsDir, EffDiam, FullDiam, AvgDiam); } template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiam, int& FullDiam, double& AvgSPL) { EffDiam = -1; FullDiam = -1; AvgSPL = -1; TIntFltH DistToCntH; TBreathFS<PGraph> BFS(Graph); // shotest paths TIntV NodeIdV; Graph->GetNIdV(NodeIdV); NodeIdV.Shuffle(TInt::Rnd); for (int tries = 0; tries < TMath::Mn(NTestNodes, Graph->GetNodes()); tries++) { const int NId = NodeIdV[tries]; BFS.DoBfs(NId, true, ! IsDir, -1, TInt::Mx); for (int i = 0; i < BFS.NIdDistH.Len(); i++) { DistToCntH.AddDat(BFS.NIdDistH[i]) += 1; } } TIntFltKdV DistNbrsPdfV; double SumPathL=0, PathCnt=0; for (int i = 0; i < DistToCntH.Len(); i++) { DistNbrsPdfV.Add(TIntFltKd(DistToCntH.GetKey(i), DistToCntH[i])); SumPathL += DistToCntH.GetKey(i) * DistToCntH[i]; PathCnt += DistToCntH[i]; } DistNbrsPdfV.Sort(); EffDiam = TSnap::TSnapDetail::CalcEffDiamPdf(DistNbrsPdfV, 0.9); // effective diameter (90-th percentile) FullDiam = DistNbrsPdfV.Last().Key; // approximate full diameter (max shortest path length over the sampled nodes) AvgSPL = SumPathL/PathCnt; // average shortest path length return EffDiam; } template <class PGraph> double GetBfsEffDiamAll(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiam, int& FullDiam, double& AvgSPL) { return GetBfsEffDiam(Graph, NTestNodes, IsDir, EffDiam, FullDiam, AvgSPL); } template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const TIntV& SubGraphNIdV, const bool& IsDir, double& EffDiam, int& FullDiam) { EffDiam = -1; FullDiam = -1; TIntFltH DistToCntH; TBreathFS<PGraph> BFS(Graph); // shotest paths TIntV NodeIdV(SubGraphNIdV); NodeIdV.Shuffle(TInt::Rnd); TInt Dist; for (int tries = 0; tries < TMath::Mn(NTestNodes, SubGraphNIdV.Len()); tries++) { const int NId = NodeIdV[tries]; BFS.DoBfs(NId, true, ! IsDir, -1, TInt::Mx); for (int i = 0; i < SubGraphNIdV.Len(); i++) { if (BFS.NIdDistH.IsKeyGetDat(SubGraphNIdV[i], Dist)) { DistToCntH.AddDat(Dist) += 1; } } } TIntFltKdV DistNbrsPdfV; for (int i = 0; i < DistToCntH.Len(); i++) { DistNbrsPdfV.Add(TIntFltKd(DistToCntH.GetKey(i), DistToCntH[i])); } DistNbrsPdfV.Sort(); EffDiam = TSnap::TSnapDetail::CalcEffDiamPdf(DistNbrsPdfV, 0.9); // effective diameter (90-th percentile) FullDiam = DistNbrsPdfV.Last().Key; // approximate full diameter (max shortest path length over the sampled nodes) return EffDiam; // average shortest path length } template <class PGraph> int GetShortestDistances(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn, TIntV& ShortestDists) { PSOut StdOut = TStdOut::New(); int MxNId = Graph->GetMxNId(); int NonNodeDepth = 2147483647; // INT_MAX int InfDepth = 2147483646; // INT_MAX - 1 ShortestDists.Gen(MxNId); for (int NId = 0; NId < MxNId; NId++) { if (Graph->IsNode(NId)) { ShortestDists[NId] = InfDepth; } else { ShortestDists[NId] = NonNodeDepth; } } TIntV Vec1(MxNId, 0); // ensure enough capacity TIntV Vec2(MxNId, 0); // ensure enough capacity ShortestDists[StartNId] = 0; TIntV* PCurV = &Vec1; PCurV->Add(StartNId); TIntV* PNextV = &Vec2; int Depth = 0; // current depth while (!PCurV->Empty()) { Depth++; // increase depth for (int i = 0; i < PCurV->Len(); i++) { int NId = PCurV->GetVal(i); typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); for (int e = 0; e < NI.GetOutDeg(); e++) { const int OutNId = NI.GetOutNId(e); if (ShortestDists[OutNId].Val == InfDepth) { ShortestDists[OutNId] = Depth; PNextV->Add(OutNId); } } } // swap pointer, no copying TIntV* Tmp = PCurV; PCurV = PNextV; PNextV = Tmp; // clear next PNextV->Reduce(0); // reduce length, does not initialize new array } return Depth-1; } #ifdef USE_OPENMP template <class PGraph> int GetShortestDistancesMP2(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn, TIntV& ShortestDists) { int MxNId = Graph->GetMxNId(); int NonNodeDepth = 2147483647; // INT_MAX int InfDepth = 2147483646; // INT_MAX - 1 ShortestDists.Gen(MxNId); #pragma omp parallel for schedule(dynamic,10000) for (int NId = 0; NId < MxNId; NId++) { if (Graph->IsNode(NId)) { ShortestDists[NId] = InfDepth; } else { ShortestDists[NId] = NonNodeDepth; } } TIntV Vec1(MxNId, 0); // ensure enough capacity TIntV Vec2(MxNId, 0); // ensure enough capacity ShortestDists[StartNId] = 0; TIntV* PCurV = &Vec1; PCurV->Add(StartNId); TIntV* PNextV = &Vec2; int Depth = 0; // current depth while (!PCurV->Empty()) { Depth++; // increase depth #pragma omp parallel for schedule(dynamic,10000) for (int i = 0; i < PCurV->Len(); i++) { int NId = PCurV->GetVal(i); typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); for (int e = 0; e < NI.GetOutDeg(); e++) { const int OutNId = NI.GetOutNId(e); if (__sync_bool_compare_and_swap(&(ShortestDists[OutNId].Val), InfDepth, Depth)) { PNextV->AddMP(OutNId); } } } // #pragma omp parallel for schedule(dynamic,10000) // for (int NId = 0; NId < MxNId; NId++) { // if (ShortestDists[NId] == InfDepth) { // typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); // for (int e = 0; e < NI.GetInDeg(); e++) { // const int InNId = NI.GetInNId(e); // if (ShortestDists[InNId] < Depth) { // ShortestDists[NId] = Depth; // PNextV->AddMP(NId); // break; // } // } // } // } // swap pointer, no copying TIntV* Tmp = PCurV; PCurV = PNextV; PNextV = Tmp; // clear next PNextV->Reduce(0); // reduce length, does not initialize new array } return Depth-1; } #endif // USE_OPENMP } // namespace TSnap
laplace2d.c
/* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <math.h> #include <string.h> #include <openacc.h> #include "timer.h" int main(int argc, char** argv) { int n = 4096; int m = 4096; int iter_max = 1000; const float pi = 2.0f * asinf(1.0f); const float tol = 1.0e-5f; float error = 1.0f; float A[n][m]; float Anew[n][m]; float y0[n]; memset(A, 0, n * m * sizeof(float)); // set boundary conditions for (int i = 0; i < m; i++) { A[0][i] = 0.f; A[n-1][i] = 0.f; } for (int j = 0; j < n; j++) { y0[j] = sinf(pi * j / (n-1)); A[j][0] = y0[j]; A[j][m-1] = y0[j]*expf(-pi); } #if _OPENACC acc_init(acc_device_nvidia); #endif printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m); StartTimer(); int iter = 0; #pragma omp parallel for shared(Anew) for (int i = 1; i < m; i++) { Anew[0][i] = 0.f; Anew[n-1][i] = 0.f; } #pragma omp parallel for shared(Anew) for (int j = 1; j < n; j++) { Anew[j][0] = y0[j]; Anew[j][m-1] = y0[j]*expf(-pi); } #pragma acc data copy(A), create(Anew) while ( error > tol && iter < iter_max ) { error = 0.f; #pragma omp parallel for shared(m, n, Anew, A) #pragma acc kernels loop gang(32), vector(16) for( int j = 1; j < n-1; j++) { #pragma acc loop gang(16), vector(32) for( int i = 1; i < m-1; i++ ) { Anew[j][i] = 0.25f * ( A[j][i+1] + A[j][i-1] + A[j-1][i] + A[j+1][i]); error = fmaxf( error, fabsf(Anew[j][i]-A[j][i])); } } #pragma omp parallel for shared(m, n, Anew, A) #pragma acc kernels loop for( int j = 1; j < n-1; j++) { #pragma acc loop gang(16), vector(32) for( int i = 1; i < m-1; i++ ) { A[j][i] = Anew[j][i]; } } if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error); iter++; } double runtime = GetTimer(); printf(" total: %f s\n", runtime / 1000.f); }
cg.20190408_morph.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include "globals.h" #include "randdp.h" #include "timers.h" #include <omp.h> //--------------------------------------------------------------------- #define CACHE_LINE_SIZE_PAD 128 #define INT_PAD_SIZE CACHE_LINE_SIZE_PAD/sizeof(int) #define DOUBLE_PAD_SIZE CACHE_LINE_SIZE_PAD/sizeof(double) /* common / main_int_mem / */ static int colidx[NZ]; static int rowstr[NA+1]; static int iv[NA]; static int arow[NA]; static int acol[NAZ]; /* common / main_flt_mem / */ static double aelt[NAZ]; static double a[NZ]; static double x[NA+2]; static double z[NA+2]; static double p[NA+2]; static double q[NA+2]; static double r[NA+2]; /* common / partit_size / */ static int naa; static int nzz; static int firstrow; static int lastrow; static int firstcol; static int lastcol; /* common /urando/ */ static double amult; static double tran; /* common /timers/ */ static logical timeron; //--------------------------------------------------------------------- //--------------------------------------------------------------------- static void conj_grad(int colidx[], int rowstr[], double x[], double z[], double a[], double p[], double q[], double r[], double *rnorm); static void makea(int n, int nz, double a[], int colidx[], int rowstr[], int firstrow, int lastrow, int firstcol, int lastcol, int arow[], int acol[][NONZER+1], double aelt[][NONZER+1], int iv[]); static void sparse(double a[], int colidx[], int rowstr[], int n, int nz, int nozer, int arow[], int acol[][NONZER+1], double aelt[][NONZER+1], int firstrow, int lastrow, int nzloc[], double rcond, double shift); static void sprnvc(int n, int nz, int nn1, double v[], int iv[]); static int icnvrt(double x, int ipwr2); static void vecset(int n, double v[], int iv[], int *nzv, int i, double val); //--------------------------------------------------------------------- int main(int argc, char *argv[]) { omp_set_num_threads(omp_get_num_procs()); int i, j, k, it; double zeta; double rnorm; double norm_temp1, norm_temp2; double t, mflops, tmax; //char Class; logical verified; double zeta_verify_value, epsilon, err; char *t_names[T_last]; for (i = 0; i < T_last; i++) { timer_clear(i); } timer_start(T_init); firstrow = 0; lastrow = NA-1; firstcol = 0; lastcol = NA-1; zeta_verify_value = VALID_RESULT; printf("\nCG start...\n\n"); printf(" Size: %11d\n", NA); printf(" Iterations: %5d\n", NITER); printf("\n"); naa = NA; nzz = NZ; //--------------------------------------------------------------------- // Inialize random number generator //--------------------------------------------------------------------- tran = 314159265.0; amult = 1220703125.0; zeta = randlc(&tran, amult); //--------------------------------------------------------------------- // //--------------------------------------------------------------------- makea(naa, nzz, a, colidx, rowstr, firstrow, lastrow, firstcol, lastcol, arow, (int (*)[NONZER+1])(void*)acol, (double (*)[NONZER+1])(void*)aelt, iv); //--------------------------------------------------------------------- // Note: as a result of the above call to makea: // values of j used in indexing rowstr go from 0 --> lastrow-firstrow // values of colidx which are col indexes go from firstcol --> lastcol // So: // Shift the col index vals from actual (firstcol --> lastcol ) // to local, i.e., (0 --> lastcol-firstcol) //--------------------------------------------------------------------- //#pragma omp parallel default(shared) private(i,j,k) //{ //#pragma omp for nowait for (j = 0; j < lastrow - firstrow + 1; j++) { for (k = rowstr[j]; k < rowstr[j+1]; k++) { colidx[k] = colidx[k] - firstcol; } } //--------------------------------------------------------------------- // set starting vector to (1, 1, .... 1) //--------------------------------------------------------------------- //#pragma omp for nowait for (i = 0; i < NA+1; i++) { x[i] = 1.0; } //#pragma omp for nowait for (j = 0; j < lastcol - firstcol + 1; j++) { q[j] = 0.0; z[j] = 0.0; r[j] = 0.0; p[j] = 0.0; } //} zeta = 0.0; //--------------------------------------------------------------------- //----> // Do one iteration untimed to init all code and data page tables //----> (then reinit, start timing, to niter its) //--------------------------------------------------------------------- for (it = 1; it <= 1; it++) { //--------------------------------------------------------------------- // The call to the conjugate gradient routine: //--------------------------------------------------------------------- conj_grad(colidx, rowstr, x, z, a, p, q, r, &rnorm); //--------------------------------------------------------------------- // zeta = shift + 1/(x.z) // So, first: (x.z) // Also, find norm of z // So, first: (z.z) //--------------------------------------------------------------------- norm_temp1 = 0.0; norm_temp2 = 0.0; //#pragma omp parallel for default(shared) private(j) reduction(+:norm_temp1,norm_temp2) for (j = 0; j < lastcol - firstcol + 1; j++) { norm_temp1 = norm_temp1 + x[j] * z[j]; norm_temp2 = norm_temp2 + z[j] * z[j]; } norm_temp2 = 1.0 / sqrt(norm_temp2); //--------------------------------------------------------------------- // Normalize z to obtain x //--------------------------------------------------------------------- //#pragma omp parallel for default(shared) private(j) for (j = 0; j < lastcol - firstcol + 1; j++) { x[j] = norm_temp2 * z[j]; } } // end of do one iteration untimed //--------------------------------------------------------------------- // set starting vector to (1, 1, .... 1) //--------------------------------------------------------------------- //#pragma omp parallel for default(shared) private(i) for (i = 0; i < NA+1; i++) { x[i] = 1.0; } zeta = 0.0; timer_stop(T_init); printf(" Initialization time = %15.3f seconds\n", timer_read(T_init)); timer_start(T_bench); //--------------------------------------------------------------------- //----> // Main Iteration for inverse power method //----> //--------------------------------------------------------------------- for (it = 1; it <= NITER; it++) { //--------------------------------------------------------------------- // The call to the conjugate gradient routine: //--------------------------------------------------------------------- if (timeron) timer_start(T_conj_grad); conj_grad(colidx, rowstr, x, z, a, p, q, r, &rnorm); if (timeron) timer_stop(T_conj_grad); //--------------------------------------------------------------------- // zeta = shift + 1/(x.z) // So, first: (x.z) // Also, find norm of z // So, first: (z.z) //--------------------------------------------------------------------- norm_temp1 = 0.0; norm_temp2 = 0.0; //#pragma omp parallel for default(shared) private(j) reduction(+:norm_temp1,norm_temp2) for (j = 0; j < lastcol - firstcol + 1; j++) { norm_temp1 = norm_temp1 + x[j]*z[j]; norm_temp2 = norm_temp2 + z[j]*z[j]; } norm_temp2 = 1.0 / sqrt(norm_temp2); zeta = SHIFT + 1.0 / norm_temp1; if (it == 1) printf("\n iteration ||r|| zeta\n"); printf(" %5d %20.14E%20.13f\n", it, rnorm, zeta); //--------------------------------------------------------------------- // Normalize z to obtain x //--------------------------------------------------------------------- //#pragma omp parallel for default(shared) private(j) for (j = 0; j < lastcol - firstcol + 1; j++) { x[j] = norm_temp2 * z[j]; } } // end of main iter inv pow meth timer_stop(T_bench); //--------------------------------------------------------------------- // End of timed section //--------------------------------------------------------------------- t = timer_read(T_bench); printf("\nComplete...\n"); epsilon = 1.0e-10; err = fabs(zeta - zeta_verify_value) / zeta_verify_value; if (err <= epsilon) { verified = true; printf(" VERIFICATION SUCCESSFUL\n"); printf(" Zeta is %20.13E\n", zeta); printf(" Error is %20.13E\n", err); } else { verified = false; printf(" VERIFICATION FAILED\n"); printf(" Zeta %20.13E\n", zeta); printf(" The correct zeta is %20.13E\n", zeta_verify_value); } printf("\n\nExecution time : %lf seconds\n\n", t); return 0; } //--------------------------------------------------------------------- // Floaging point arrays here are named as in spec discussion of // CG algorithm //--------------------------------------------------------------------- static void conj_grad(int colidx[], int rowstr[], double x[], double z[], double a[], double p[], double q[], double r[], double *rnorm) { int j, k; int cgit, cgitmax = 25; double d, sum, rho, rho0, alpha, beta; rho = 0.0; //--------------------------------------------------------------------- // Initialize the CG algorithm: //--------------------------------------------------------------------- #pragma omp parallel default(shared) private(j) { #pragma omp for for (j = 0; j < naa+1; j++) { q[j] = 0.0; z[j] = 0.0; r[j] = x[j]; p[j] = r[j]; } //--------------------------------------------------------------------- // rho = r.r // Now, obtain the norm of r: First, sum squares of r elements locally... //--------------------------------------------------------------------- #pragma omp for reduction(+:rho) for (j = 0; j < lastcol - firstcol + 1; j++) { rho = rho + r[j]*r[j]; } } //--------------------------------------------------------------------- //----> // The conj grad iteration loop //----> //--------------------------------------------------------------------- for (cgit = 1; cgit <= cgitmax; cgit++) { //--------------------------------------------------------------------- // q = A.p // The partition submatrix-vector multiply: use workspace w //--------------------------------------------------------------------- // // NOTE: this version of the multiply is actually (slightly: maybe %5) // faster on the sp2 on 16 nodes than is the unrolled-by-2 version // below. On the Cray t3d, the reverse is true, i.e., the // unrolled-by-two version is some 10% faster. // The unrolled-by-8 version below is significantly faster // on the Cray t3d - overall speed of code is 1.5 times faster. rho0 = rho; d = 0.0; rho = 0.0; #pragma omp parallel default(shared) { #pragma omp for private(sum, j, k) for (j = 0; j < lastrow - firstrow + 1; j++) { sum = 0.0; for (k = rowstr[j]; k < rowstr[j+1]; k++) { sum = sum + a[k]*p[colidx[k]]; } q[j] = sum; } /* #pragma omp for for (j = 0; j < lastrow - firstrow + 1; j++) { double sum1 = 0.0; double sum2 = 0.0; int start_idx = rowstr[j]; int end_idx = rowstr[j+1]; int remainder = (end_idx-start_idx)%2; if(remainder == 1){ sum1 = sum1 + a[start_idx]*p[colidx[start_idx]]; } for (k = start_idx+remainder; k < end_idx; k+=2) { sum1 = sum1 + a[k]*p[colidx[k]]; sum2 = sum2 + a[k+1]*p[colidx[k+1]]; } q[j] = sum1+sum2; } */ /* #pragma omp for for (j = 0; j < lastrow - firstrow + 1; j++) { double sum0 = 0.0; double sum1 = 0.0; double sum2 = 0.0; double sum3 = 0.0; double sum4 = 0.0; double sum5 = 0.0; double sum6 = 0.0; double sum7 = 0.0; int start_idx = rowstr[j]; int end_idx = rowstr[j+1]; int remainder = (end_idx-start_idx)%8; for (k = start_idx; k < start_idx+remainder; k++){ sum0 = sum0 + a[k]*p[colidx[k]]; } for (k = start_idx+remainder; k < end_idx; k+=8) { sum0 = sum0 + a[k]*p[colidx[k]]; sum1 = sum1 + a[k+1]*p[colidx[k+1]]; sum2 = sum2 + a[k+2]*p[colidx[k+2]]; sum3 = sum3 + a[k+3]*p[colidx[k+3]]; sum4 = sum4 + a[k+4]*p[colidx[k+4]]; sum5 = sum5 + a[k+5]*p[colidx[k+5]]; sum6 = sum6 + a[k+6]*p[colidx[k+6]]; sum7 = sum7 + a[k+7]*p[colidx[k+7]]; } q[j] = sum0+sum1+sum2+sum3+sum4+sum5+sum6+sum7; } */ /* #pragma omp for private(j,k,sum) for (j = 0; j <= lastrow-firstrow+1; j++) { int iresidue; int i = rowstr[j]; iresidue = (rowstr[j+1]-i) % 8; sum = 0.0; for (k = i; k <= i+iresidue-1; k++) { sum = sum + a[k] * p[colidx[k]]; } for (k = i+iresidue; k <= rowstr[j+1]-8; k += 8) { sum = sum + a[k ] * p[colidx[k ]] + a[k+1] * p[colidx[k+1]] + a[k+2] * p[colidx[k+2]] + a[k+3] * p[colidx[k+3]] + a[k+4] * p[colidx[k+4]] + a[k+5] * p[colidx[k+5]] + a[k+6] * p[colidx[k+6]] + a[k+7] * p[colidx[k+7]]; } q[j] = sum; } */ //--------------------------------------------------------------------- // Obtain p.q //--------------------------------------------------------------------- #pragma omp for private(j) reduction(+:d) for (j = 0; j < lastcol - firstcol + 1; j++) { d = d + p[j]*q[j]; } //--------------------------------------------------------------------- // Obtain alpha = rho / (p.q) //--------------------------------------------------------------------- #pragma omp single alpha = rho0 / d; //--------------------------------------------------------------------- // Obtain z = z + alpha*p // and r = r - alpha*q //--------------------------------------------------------------------- #pragma omp for private(j) for (j = 0; j < lastcol - firstcol + 1; j++) { z[j] = z[j] + alpha*p[j]; r[j] = r[j] - alpha*q[j]; } //--------------------------------------------------------------------- // rho = r.r // Now, obtain the norm of r: First, sum squares of r elements locally... //--------------------------------------------------------------------- #pragma omp for private(j) reduction(+:rho) for (j = 0; j < lastcol - firstcol + 1; j++) { rho = rho + r[j]*r[j]; } //--------------------------------------------------------------------- // Obtain beta: //--------------------------------------------------------------------- #pragma omp single beta = rho / rho0; //--------------------------------------------------------------------- // p = r + beta*p //--------------------------------------------------------------------- #pragma omp for private(j) for (j = 0; j < lastcol - firstcol + 1; j++) { p[j] = r[j] + beta*p[j]; } } } // end of do cgit=1,cgitmax //--------------------------------------------------------------------- // Compute residual norm explicitly: ||r|| = ||x - A.z|| // First, form A.z // The partition submatrix-vector multiply //--------------------------------------------------------------------- /* for (j = 0; j < lastrow - firstrow + 1; j++) { printf("j = %d, colidx[%d] = %d, z[%d] = %lf\n", j, j, colidx[j], colidx[j], z[colidx[j]][0]); } */ double d_tmp; sum = 0.0; #pragma omp parallel default(shared) private(j, d, d_tmp) shared(sum) { #pragma omp for for (j = 0; j < lastrow - firstrow + 1; j++) { d = 0.0; for (k = rowstr[j]; k < rowstr[j+1]; k++) { d = d + a[k]*z[colidx[k]]; } r[j] = d; } //--------------------------------------------------------------------- // At this point, r contains A.z //--------------------------------------------------------------------- #pragma omp for reduction(+:sum) for (j = 0; j < lastcol-firstcol+1; j++) { d_tmp = x[j] - r[j]; sum = sum + d_tmp*d_tmp; } } *rnorm = sqrt(sum); } //--------------------------------------------------------------------- // generate the test problem for benchmark 6 // makea generates a sparse matrix with a // prescribed sparsity distribution // // parameter type usage // // input // // n i number of cols/rows of matrix // nz i nonzeros as declared array size // rcond r*8 condition number // shift r*8 main diagonal shift // // output // // a r*8 array for nonzeros // colidx i col indices // rowstr i row pointers // // workspace // // iv, arow, acol i // aelt r*8 //--------------------------------------------------------------------- static void makea(int n, int nz, double a[], int colidx[], int rowstr[], int firstrow, int lastrow, int firstcol, int lastcol, int arow[], int acol[][NONZER+1], double aelt[][NONZER+1], int iv[]) { int iouter, ivelt, nzv, nn1; int ivc[NONZER+1]; double vc[NONZER+1]; //--------------------------------------------------------------------- // nonzer is approximately (int(sqrt(nnza /n))); //--------------------------------------------------------------------- //--------------------------------------------------------------------- // nn1 is the smallest power of two not less than n //--------------------------------------------------------------------- nn1 = 1; do { nn1 = 2 * nn1; } while (nn1 < n); //--------------------------------------------------------------------- // Generate nonzero positions and save for the use in sparse. //--------------------------------------------------------------------- for (iouter = 0; iouter < n; iouter++) { nzv = NONZER; sprnvc(n, nzv, nn1, vc, ivc); vecset(n, vc, ivc, &nzv, iouter+1, 0.5); arow[iouter] = nzv; for (ivelt = 0; ivelt < nzv; ivelt++) { acol[iouter][ivelt] = ivc[ivelt] - 1; aelt[iouter][ivelt] = vc[ivelt]; } } //--------------------------------------------------------------------- // ... make the sparse matrix from list of elements with duplicates // (iv is used as workspace) //--------------------------------------------------------------------- sparse(a, colidx, rowstr, n, nz, NONZER, arow, acol, aelt, firstrow, lastrow, iv, RCOND, SHIFT); } //--------------------------------------------------------------------- // rows range from firstrow to lastrow // the rowstr pointers are defined for nrows = lastrow-firstrow+1 values //--------------------------------------------------------------------- static void sparse(double a[], int colidx[], int rowstr[], int n, int nz, int nozer, int arow[], int acol[][NONZER+1], double aelt[][NONZER+1], int firstrow, int lastrow, int nzloc[], double rcond, double shift) { int nrows; //--------------------------------------------------- // generate a sparse matrix from a list of // [col, row, element] tri //--------------------------------------------------- int i, j, j1, j2, nza, k, kk, nzrow, jcol; double size, scale, ratio, va; logical cont40; //--------------------------------------------------------------------- // how many rows of result //--------------------------------------------------------------------- nrows = lastrow - firstrow + 1; //--------------------------------------------------------------------- // ...count the number of triples in each row //--------------------------------------------------------------------- #pragma omp parallel for for (j = 0; j < nrows+1; j++) { rowstr[j] = 0; } for (i = 0; i < n; i++) { for (nza = 0; nza < arow[i]; nza++) { j = acol[i][nza] + 1; rowstr[j] = rowstr[j] + arow[i]; } } rowstr[0] = 0; for (j = 1; j < nrows+1; j++) { rowstr[j] = rowstr[j] + rowstr[j-1]; } nza = rowstr[nrows] - 1; //--------------------------------------------------------------------- // ... rowstr(j) now is the location of the first nonzero // of row j of a //--------------------------------------------------------------------- if (nza > nz) { printf("Space for matrix elements exceeded in sparse\n"); printf("nza, nzmax = %d, %d\n", nza, nz); exit(EXIT_FAILURE); } //--------------------------------------------------------------------- // ... preload data pages //--------------------------------------------------------------------- #pragma omp parallel for private(j, k) for (j = 0; j < nrows; j++) { for (k = rowstr[j]; k < rowstr[j+1]; k++) { a[k] = 0.0; colidx[k] = -1; } nzloc[j] = 0; } //--------------------------------------------------------------------- // ... generate actual values by summing duplicates //--------------------------------------------------------------------- size = 1.0; ratio = pow(rcond, (1.0 / (double)(n))); for (i = 0; i < n; i++) { for (nza = 0; nza < arow[i]; nza++) { j = acol[i][nza]; scale = size * aelt[i][nza]; for (nzrow = 0; nzrow < arow[i]; nzrow++) { jcol = acol[i][nzrow]; va = aelt[i][nzrow] * scale; //-------------------------------------------------------------------- // ... add the identity * rcond to the generated matrix to bound // the smallest eigenvalue from below by rcond //-------------------------------------------------------------------- if (jcol == j && j == i) { va = va + rcond - shift; } cont40 = false; for (k = rowstr[j]; k < rowstr[j+1]; k++) { if (colidx[k] > jcol) { //---------------------------------------------------------------- // ... insert colidx here orderly //---------------------------------------------------------------- for (kk = rowstr[j+1]-2; kk >= k; kk--) { if (colidx[kk] > -1) { a[kk+1] = a[kk]; colidx[kk+1] = colidx[kk]; } } colidx[k] = jcol; a[k] = 0.0; cont40 = true; break; } else if (colidx[k] == -1) { colidx[k] = jcol; cont40 = true; break; } else if (colidx[k] == jcol) { //-------------------------------------------------------------- // ... mark the duplicated entry //-------------------------------------------------------------- nzloc[j] = nzloc[j] + 1; cont40 = true; break; } } if (cont40 == false) { printf("internal error in sparse: i=%d\n", i); exit(EXIT_FAILURE); } a[k] = a[k] + va; } } size = size * ratio; } //--------------------------------------------------------------------- // ... remove empty entries and generate final results //--------------------------------------------------------------------- for (j = 1; j < nrows; j++) { nzloc[j] = nzloc[j] + nzloc[j-1]; } for (j = 0; j < nrows; j++) { if (j > 0) { j1 = rowstr[j] - nzloc[j-1]; } else { j1 = 0; } j2 = rowstr[j+1] - nzloc[j]; nza = rowstr[j]; for (k = j1; k < j2; k++) { a[k] = a[nza]; colidx[k] = colidx[nza]; nza = nza + 1; } } #pragma omp parallel for for (j = 1; j < nrows+1; j++) { rowstr[j] = rowstr[j] - nzloc[j-1]; } nza = rowstr[nrows] - 1; } //--------------------------------------------------------------------- // generate a sparse n-vector (v, iv) // having nzv nonzeros // // mark(i) is set to 1 if position i is nonzero. // mark is all zero on entry and is reset to all zero before exit // this corrects a performance bug found by John G. Lewis, caused by // reinitialization of mark on every one of the n calls to sprnvc //--------------------------------------------------------------------- static void sprnvc(int n, int nz, int nn1, double v[], int iv[]) { int nzv, ii, i; double vecelt, vecloc; nzv = 0; while (nzv < nz) { vecelt = randlc(&tran, amult); //--------------------------------------------------------------------- // generate an integer between 1 and n in a portable manner //--------------------------------------------------------------------- vecloc = randlc(&tran, amult); i = icnvrt(vecloc, nn1) + 1; if (i > n) continue; //--------------------------------------------------------------------- // was this integer generated already? //--------------------------------------------------------------------- logical was_gen = false; for (ii = 0; ii < nzv; ii++) { if (iv[ii] == i) { was_gen = true; break; } } if (was_gen) continue; v[nzv] = vecelt; iv[nzv] = i; nzv = nzv + 1; } } //--------------------------------------------------------------------- // scale a double precision number x in (0,1) by a power of 2 and chop it //--------------------------------------------------------------------- static int icnvrt(double x, int ipwr2) { return (int)(ipwr2 * x); } //--------------------------------------------------------------------- // set ith element of sparse vector (v, iv) with // nzv nonzeros to val //--------------------------------------------------------------------- static void vecset(int n, double v[], int iv[], int *nzv, int i, double val) { int k; logical set; set = false; for (k = 0; k < *nzv; k++) { if (iv[k] == i) { v[k] = val; set = true; } } if (set == false) { v[*nzv] = val; iv[*nzv] = i; *nzv = *nzv + 1; } }
gemver.pluto.par.c
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #define alpha 1 #define beta 1 double A[N][N +20]; double B[N][N +20]; double x[N]; double u1[N]; double u2[N]; double v1[N]; double v2[N]; double w[N]; double y[N]; double z[N]; void init_arrays() { int i, j; for (i=0; i<N; i++) { u1[i] = i; u2[i] = (i+1)/N/2.0; v1[i] = (i+1)/N/4.0; v2[i] = (i+1)/N/6.0; y[i] = (i+1)/N/8.0; z[i] = (i+1)/N/9.0; x[i] = 0.0; w[i] = 0.0; for (j=0; j<N; j++) { A[i][j] = ((double) i*j)/N; } } } double rtclock() { struct timezone tzp; struct timeval tp; int stat; gettimeofday (&tp, &tzp); return (tp.tv_sec + tp.tv_usec*1.0e-6); } int main() { init_arrays(); double annot_t_start=0, annot_t_end=0, annot_t_total=0; int annot_i; for (annot_i=0; annot_i<REPS; annot_i++) { annot_t_start = rtclock(); #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define S1(zT0,zT1,zT2,zT3,i,j) {B[i][j]=u2[i]*v2[j]+u1[i]*v1[j]+A[i][j];} #define S2(zT0,zT1,zT2,zT3,i,j) {x[i]=beta*B[j][i]*y[j]+x[i];} #define S3(i) {x[i]=z[i]+x[i];} #define S4(i,j) {w[i]=alpha*B[i][j]*x[j]+w[i];} int c1, c2, c3, c4, c5, c6, c7, c8, c9, c10; register int lb, ub, lb1, ub1, lb2, ub2; register int lbv, ubv; /* Generated from PLuTo-produced CLooG file by CLooG v0.14.1 64 bits in 0.05s. */ lb1=0; ub1=floord(N-1,256); #pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5,c6,c7,c8,c9,c10) for (c2=lb1; c2<=ub1; c2++) { for (c3=0;c3<=floord(N-1,256);c3++) { for (c4=max(0,8*c2);c4<=min(8*c2+7,floord(N-1,32));c4++) { for (c5=max(8*c3,0);c5<=min(floord(N-1,32),8*c3+7);c5++) { /*@ begin Loop( transform UnrollJam(ufactor=32) for (c6=max(32*c5,0);c6<=min(N-1,32*c5+31);c6++) { { lbv=max(32*c4,0); ubv=min(N-1,32*c4+31); #pragma ivdep #pragma vector always for (c7=lbv; c7<=ubv; c7++) { S1(c3,c2,c5,c4,c6,c7) ; S2(c2,c3,c4,c5,c7,c6) ; } } } ) @*/{ for (c6 = max(32 * c5, 0); c6 <= min(N - 1, 32 * c5 + 31) - 31; c6 = c6 + 32) { lbv=max(32*c4,0); ubv=min(N-1,32*c4+31); #pragma ivdep #pragma vector always for (c7=lbv; c7<=ubv; c7++) { S1(c3, c2, c5, c4, c6, c7); S2(c2, c3, c4, c5, c7, c6); S1(c3, c2, c5, c4, (c6 + 1), c7); S2(c2, c3, c4, c5, c7, (c6 + 1)); S1(c3, c2, c5, c4, (c6 + 2), c7); S2(c2, c3, c4, c5, c7, (c6 + 2)); S1(c3, c2, c5, c4, (c6 + 3), c7); S2(c2, c3, c4, c5, c7, (c6 + 3)); S1(c3, c2, c5, c4, (c6 + 4), c7); S2(c2, c3, c4, c5, c7, (c6 + 4)); S1(c3, c2, c5, c4, (c6 + 5), c7); S2(c2, c3, c4, c5, c7, (c6 + 5)); S1(c3, c2, c5, c4, (c6 + 6), c7); S2(c2, c3, c4, c5, c7, (c6 + 6)); S1(c3, c2, c5, c4, (c6 + 7), c7); S2(c2, c3, c4, c5, c7, (c6 + 7)); S1(c3, c2, c5, c4, (c6 + 8), c7); S2(c2, c3, c4, c5, c7, (c6 + 8)); S1(c3, c2, c5, c4, (c6 + 9), c7); S2(c2, c3, c4, c5, c7, (c6 + 9)); S1(c3, c2, c5, c4, (c6 + 10), c7); S2(c2, c3, c4, c5, c7, (c6 + 10)); S1(c3, c2, c5, c4, (c6 + 11), c7); S2(c2, c3, c4, c5, c7, (c6 + 11)); S1(c3, c2, c5, c4, (c6 + 12), c7); S2(c2, c3, c4, c5, c7, (c6 + 12)); S1(c3, c2, c5, c4, (c6 + 13), c7); S2(c2, c3, c4, c5, c7, (c6 + 13)); S1(c3, c2, c5, c4, (c6 + 14), c7); S2(c2, c3, c4, c5, c7, (c6 + 14)); S1(c3, c2, c5, c4, (c6 + 15), c7); S2(c2, c3, c4, c5, c7, (c6 + 15)); S1(c3, c2, c5, c4, (c6 + 16), c7); S2(c2, c3, c4, c5, c7, (c6 + 16)); S1(c3, c2, c5, c4, (c6 + 17), c7); S2(c2, c3, c4, c5, c7, (c6 + 17)); S1(c3, c2, c5, c4, (c6 + 18), c7); S2(c2, c3, c4, c5, c7, (c6 + 18)); S1(c3, c2, c5, c4, (c6 + 19), c7); S2(c2, c3, c4, c5, c7, (c6 + 19)); S1(c3, c2, c5, c4, (c6 + 20), c7); S2(c2, c3, c4, c5, c7, (c6 + 20)); S1(c3, c2, c5, c4, (c6 + 21), c7); S2(c2, c3, c4, c5, c7, (c6 + 21)); S1(c3, c2, c5, c4, (c6 + 22), c7); S2(c2, c3, c4, c5, c7, (c6 + 22)); S1(c3, c2, c5, c4, (c6 + 23), c7); S2(c2, c3, c4, c5, c7, (c6 + 23)); S1(c3, c2, c5, c4, (c6 + 24), c7); S2(c2, c3, c4, c5, c7, (c6 + 24)); S1(c3, c2, c5, c4, (c6 + 25), c7); S2(c2, c3, c4, c5, c7, (c6 + 25)); S1(c3, c2, c5, c4, (c6 + 26), c7); S2(c2, c3, c4, c5, c7, (c6 + 26)); S1(c3, c2, c5, c4, (c6 + 27), c7); S2(c2, c3, c4, c5, c7, (c6 + 27)); S1(c3, c2, c5, c4, (c6 + 28), c7); S2(c2, c3, c4, c5, c7, (c6 + 28)); S1(c3, c2, c5, c4, (c6 + 29), c7); S2(c2, c3, c4, c5, c7, (c6 + 29)); S1(c3, c2, c5, c4, (c6 + 30), c7); S2(c2, c3, c4, c5, c7, (c6 + 30)); S1(c3, c2, c5, c4, (c6 + 31), c7); S2(c2, c3, c4, c5, c7, (c6 + 31)); } } for (; c6 <= min(N - 1, 32 * c5 + 31); c6 = c6 + 1) { lbv=max(32*c4,0); ubv=min(N-1,32*c4+31); #pragma ivdep #pragma vector always for (c7=lbv; c7<=ubv; c7++) { S1(c3, c2, c5, c4, c6, c7); S2(c2, c3, c4, c5, c7, c6); } } } /*@ end @*/ } } } } lb1=0; ub1=N-1; #pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5,c6,c7,c8,c9,c10) for (c2=lb1; c2<=ub1; c2++) { S3(c2) ; } lb1=0; ub1=N-1; #pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5,c6,c7,c8,c9,c10) for (c2=lb1; c2<=ub1; c2++) { for (c3=0;c3<=N-1;c3++) { S4(c2,c3) ; } } /* End of CLooG code */ annot_t_end = rtclock(); annot_t_total += annot_t_end - annot_t_start; } annot_t_total = annot_t_total / REPS; printf("%f\n", annot_t_total); return ((int) w[0]); }
convolution_packnto1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_packnto1_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packnto1, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 4; const word_type vl = vsetvl_e32m1(packn); int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl); const float* kptr = (const float*)weight_data_packnto1.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const float* sptr = m.row(i * stride_h) + j * stride_w * packn; for (int k = 0; k < maxk; k++) { vfloat32m1_t _val = vle32_v_f32m1(sptr + space_ofs[k] * packn, vl); vfloat32m1_t _w = vle32_v_f32m1(kptr, vl); _sum = vfmacc_vv_f32m1(_sum, _val, _w, vl); kptr += packn; } } sum = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum, vfmv_s_f_f32m1(vfloat32m1_t(), sum, vl), vl)); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }
train9.c
#define _GNU_SOURCE #include <syscall.h> #include <sched.h> #include "graph.h" #include "mainFunctions.h" #include "powerperformacetracking.h" #include "print.h" #include <stdlib.h> #include<unistd.h> #define NO_OF_ARGS 2 //#define REPEAT 25 #define REPEAT 15 long long iters[8]; struct timeval start, end; // We define all additional paramemter here void setaffinity() { /* #pragma omp parallel { cpu_set_t newcpu; int threadid = omp_get_thread_num(); CPU_ZERO(&newcpu); CPU_SET ( threadid , &newcpu) ; int __t = sched_setaffinity ( syscall ( SYS_gettid ) , sizeof ( newcpu ) , &newcpu ) ; assert(__t == 0); } */ } void train9(graph *G, int id) { printf("The train 9 add %d \n", id); char title[50]; sprintf(title, "train9_%d.csv",id); gettimeofday(&start, NULL); inittracking(title); int pf = 0; int abc; for(abc =0; abc< REPEAT; abc++) { #pragma omp parallel { int flag; int t = 0; #pragma omp for schedule(dynamic, 1024) for (node_t u1 = 0; u1 < G->numNodes; u1 ++) { if(u1%2 == 0){ for (edge_t u_idx = G->begin[u1];u_idx < G->begin[u1+1] ; u_idx ++) { node_t u = G->node_idx [u_idx]; t = t + u1%2; } #pragma omp atomic pf= pf + 1; } } #pragma omp atomic pf -= t; t = 0; } } endtracking(); gettimeofday(&end, NULL); printf("The pf value is %d \n",pf); printTiming(ALGO_KERNEL,((end.tv_sec - start.tv_sec)*1000 + ((double)(end.tv_usec - start.tv_usec))/1000)); //free(G_member); } #define numTimes 7 /*** * Common entry point for all algorithms, **/ int runalgo(int argc,char** argv) { int i; setaffinity(); graph* G = readGraph(argv[1], argv[2]); for(i = 0;i< numTimes; i++) { printf("Run %d \n", i); train9(G,i); sleep(2); } return 0; } inline void kernel(graph *G) { }
GB_unaryop__ainv_int64_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int64_bool // op(A') function: GB_tran__ainv_int64_bool // C type: int64_t // A type: bool // cast: int64_t cij = (int64_t) aij // unaryop: cij = -aij #define GB_ATYPE \ bool #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT64 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int64_bool ( int64_t *Cx, // Cx and Ax may be aliased bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int64_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
time_multi_omp.c
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign // // File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign ////////////////////////////////////////////////////////////////////////////////////// #include "multi_bspline.h" #include "bspline.h" #include "multi_nubspline.h" #include "nubspline.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #ifdef _OPENMP #include <omp.h> #endif _OPENMP double drand48(); inline double get_time() { #ifdef _OPENMP return omp_get_wtime(); #else return (double)clock() / (double)CLOCKS_PER_SEC; #endif } void time_3d_real_double_omp() { // int avail = numa_available(); #ifdef _OPENMP int nthr = omp_get_max_threads(); #else int nthr = 1; #endif // int nnodes = numa_max_node(); // fprintf (stderr, "Performing test with %d NUMA nodes.\n", // avail, nnodes); // if (!nnodes) // nnodes++; int nnodes = nthr; fprintf (stderr, "Using %d threads.\n", nnodes); int Nx=63; int Ny=61; int Nz = 69; int num_splines = 256; Ugrid x_grid, y_grid, z_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; y_grid.start = 8.7; y_grid.end = 12.7; y_grid.num = Ny; z_grid.start = 4.5; z_grid.end = 9.3; z_grid.num = Nz; BCtype_d xBC, yBC, zBC; xBC.lCode = xBC.rCode = PERIODIC; yBC.lCode = yBC.rCode = PERIODIC; zBC.lCode = zBC.rCode = PERIODIC; // First, create splines the normal way UBspline_3d_d* norm_splines[num_splines]; multi_UBspline_3d_d *multi_spline[nnodes]; // First, create multispline #pragma omp parallel for for (int node=0; node<nnodes; node++) { // nodemask_t mask; // nodemask_zero(&mask); // nodemask_set (&mask, node); // numa_set_membind (&mask); multi_spline[node] = create_multi_UBspline_3d_d (x_grid, y_grid, z_grid, xBC, yBC, zBC, num_splines); } double data[Nx*Ny*Nz]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx*Ny*Nz; j++) data[j] = (drand48()-0.5); norm_splines[i] = create_UBspline_3d_d (x_grid, y_grid, z_grid, xBC, yBC, zBC, data); #pragma omp parallel for for (int node=0; node<nnodes; node++) { // nodemask_t mask; // nodemask_zero(&mask); // nodemask_set (&mask, node); // numa_set_membind (&mask); set_multi_UBspline_3d_d (multi_spline[node], i, data); } } // Now, test random values double rand_start, rand_end, norm_start[nthr], norm_end[nthr], multi_start[nthr], multi_end[nthr]; int num_vals = 10000; double multi_vals[nthr][num_splines], norm_vals[nthr][num_splines]; double multi_grads[nthr][3*num_splines], norm_grads[nthr][3*num_splines]; double multi_lapl[nthr][num_splines], norm_lapl[nthr][num_splines]; double multi_hess[nthr][9*num_splines], norm_hess[nthr][9*num_splines]; rand_start = get_time(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; } rand_end = get_time(); /////////////////////// // Check value routine // /////////////////////// double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; int thr_per_node = nthr/nnodes; #pragma omp parallel for for (int thr=0; thr<nthr; thr++) { int node = thr/thr_per_node; multi_start[thr] = get_time(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; eval_multi_UBspline_3d_d (multi_spline[node], x, y, z, multi_vals[thr]); } multi_end[thr] = get_time(); } // #pragma omp parallel for // for (int thr=0; thr<nthr; thr++) { // norm_start[thr] = get_time(); // for (int i=0; i<num_vals; i++) { // double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; // double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; // double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; // for (int j=0; j<num_splines; j++) // eval_UBspline_3d_d (norm_splines[j], x, y, z, &(norm_vals[thr][j])); // } // norm_end[thr] = get_time(); // } double norm_avg=0.0, multi_avg=0.0; for (int thr=0; thr<nthr; thr++) { double norm_time = (double)(norm_end[thr] - norm_start[thr] + rand_start - rand_end); double multi_time = (double)(multi_end[thr] - multi_start[thr] + rand_start - rand_end); norm_avg += norm_time; multi_avg += multi_time; } norm_avg /= nthr; multi_avg /= nthr; double norm_speed = (double) num_vals*num_splines / norm_avg; double multi_speed = (double) num_vals*num_splines / multi_avg; // fprintf (stderr, "Normal value speed = %13.3f evaluations per second.\n", // norm_speed); fprintf (stderr, "Multi value speed = %13.3f evaluations per second.\n", multi_speed); fprintf (stderr, "Aggregate bandwidth = %1.3f GB/s per socket\n", multi_speed * 64.0*8.0 * 8 * 1.0e-9); /////////////////////// // Check VGH routine // /////////////////////// #pragma omp parallel for for (int thr=0; thr<nthr; thr++) { int node = thr/thr_per_node; multi_start[thr] = get_time(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; eval_multi_UBspline_3d_d_vgh (multi_spline[node], x, y, z, multi_vals[thr], multi_grads[thr], multi_hess[thr]); } multi_end[thr] = get_time(); } // #pragma omp parallel for // for (int thr=0; thr<nthr; thr++) { // norm_start[thr] = get_time(); // for (int i=0; i<num_vals; i++) { // double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; // double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; // double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; // for (int j=0; j<num_splines; j++) // eval_UBspline_3d_d_vgh (norm_splines[j], x, y, z, &(norm_vals[thr][j]), // &(norm_grads[thr][3*j]), &(norm_hess[thr][9*j])); // } // norm_end[thr] = get_time(); // } norm_avg = multi_avg = 0.0; for (int thr=0; thr<nthr; thr++) { double norm_time = (double)(norm_end[thr] - norm_start[thr] + rand_start - rand_end); double multi_time = (double)(multi_end[thr] - multi_start[thr] + rand_start - rand_end); norm_avg += norm_time; multi_avg += multi_time; } norm_avg /= nthr; multi_avg /= nthr; norm_speed = (double) num_vals*num_splines / norm_avg; multi_speed = (double) num_vals*num_splines / multi_avg; // fprintf (stderr, "Normal VGH speed = %13.3f evaluations per second.\n", // norm_speed); fprintf (stderr, "Multi VGH speed = %13.3f evaluations per second.\n", multi_speed); fprintf (stderr, "%1.3f GFLOPS per socket\n", multi_speed * 64.0*2.0*10.0 * 8 * 1.0e-9); // destroy_Bspline (multi_spline); // for (int i=0; i<num_splines; i++) // destroy_Bspline(norm_splines[i]); } void time_3d_complex_double_omp() { #ifdef _OPENMP int nthr = omp_get_max_threads(); #else int nthr = 1; #endif int nnodes = nthr; fprintf (stderr, "Using %d threads.\n", nthr); int Nx=32; int Ny=32; int Nz = 32; int num_splines = 256; Ugrid x_grid, y_grid, z_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; y_grid.start = 8.7; y_grid.end = 12.7; y_grid.num = Ny; z_grid.start = 4.5; z_grid.end = 9.3; z_grid.num = Nz; BCtype_z xBC, yBC, zBC; xBC.lCode = xBC.rCode = PERIODIC; yBC.lCode = yBC.rCode = PERIODIC; zBC.lCode = zBC.rCode = PERIODIC; // First, create splines the normal way UBspline_3d_z* norm_splines[num_splines]; multi_UBspline_3d_z *multi_spline[nthr]; // First, create multispline #pragma omp parallel for for (int node=0; node<nthr; node++) { // nodemask_t mask; // nodemask_zero(&mask); // nodemask_set (&mask, node); // numa_set_membind (&mask); multi_spline[node] = create_multi_UBspline_3d_z (x_grid, y_grid, z_grid, xBC, yBC, zBC, num_splines); } double data[Nx*Ny*Nz*2]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx*Ny*Nz; j++) data[j] = (drand48()-0.5); norm_splines[i] = create_UBspline_3d_z (x_grid, y_grid, z_grid, xBC, yBC, zBC, (complex_double*)data); #pragma omp parallel for for (int node=0; node<nthr; node++) { // nodemask_t mask; // nodemask_zero(&mask); // nodemask_set (&mask, node); // numa_set_membind (&mask); set_multi_UBspline_3d_z (multi_spline[node], i, data); } } // Now, test random values double rand_start, rand_end, norm_start[nthr], norm_end[nthr], multi_start[nthr], multi_end[nthr]; int num_vals = 10000; complex_double multi_vals[nthr][num_splines], norm_vals[nthr][num_splines]; complex_double multi_grads[nthr][3*num_splines], norm_grads[nthr][3*num_splines]; complex_double multi_lapl[nthr][num_splines], norm_lapl[nthr][num_splines]; complex_double multi_hess[nthr][9*num_splines], norm_hess[nthr][9*num_splines]; rand_start = get_time(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; } rand_end = get_time(); /////////////////////// // Check value routine // /////////////////////// double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; int thr_per_node = nthr/nthr; #pragma omp parallel for for (int thr=0; thr<nthr; thr++) { int node = thr/thr_per_node; multi_start[thr] = get_time(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; eval_multi_UBspline_3d_z (multi_spline[node], x, y, z, multi_vals[thr]); } multi_end[thr] = get_time(); } // #pragma omp parallel for // for (int thr=0; thr<nthr; thr++) { // norm_start[thr] = get_time(); // for (int i=0; i<num_vals; i++) { // double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; // double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; // double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; // for (int j=0; j<num_splines; j++) // eval_UBspline_3d_z (norm_splines[j], x, y, z, &(norm_vals[thr][j])); // } // norm_end[thr] = get_time(); // } double norm_avg=0.0, multi_avg=0.0; for (int thr=0; thr<nthr; thr++) { double norm_time = (double)(norm_end[thr] - norm_start[thr] + rand_start - rand_end); double multi_time = (double)(multi_end[thr] - multi_start[thr] + rand_start - rand_end); norm_avg += norm_time; multi_avg += multi_time; } norm_avg /= nthr; multi_avg /= nthr; double norm_speed = (double) num_vals*num_splines / norm_avg; double multi_speed = (double) num_vals*num_splines / multi_avg; // fprintf (stderr, "Normal value speed = %13.3f evaluations per second.\n", // norm_speed); fprintf (stderr, "Multi value speed = %13.3f evaluations per second.\n", multi_speed); fprintf (stderr, "Aggregate bandwidth = %1.3f GB/s per socket\n", multi_speed * 64.0*16.0 * 8 * 1.0e-9); fprintf (stderr, "%1.3f GFLOPS per socket\n", multi_speed * 64.0*4.0 * 8 * 1.0e-9); /////////////////////// // Check VGH routine // /////////////////////// #pragma omp parallel for for (int thr=0; thr<nthr; thr++) { int node = thr/thr_per_node; multi_start[thr] = get_time(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; eval_multi_UBspline_3d_z_vgh (multi_spline[node], x, y, z, multi_vals[thr], multi_grads[thr], multi_hess[thr]); } multi_end[thr] = get_time(); } // #pragma omp parallel for // for (int thr=0; thr<nthr; thr++) { // norm_start[thr] = get_time(); // for (int i=0; i<num_vals; i++) { // double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; // double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; // double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; // for (int j=0; j<num_splines; j++) // eval_UBspline_3d_z_vgh (norm_splines[j], x, y, z, &(norm_vals[thr][j]), // &(norm_grads[thr][3*j]), &(norm_hess[thr][9*j])); // } // norm_end[thr] = get_time(); // } norm_avg = multi_avg = 0.0; for (int thr=0; thr<nthr; thr++) { double norm_time = (double)(norm_end[thr] - norm_start[thr] + rand_start - rand_end); double multi_time = (double)(multi_end[thr] - multi_start[thr] + rand_start - rand_end); norm_avg += norm_time; multi_avg += multi_time; } norm_avg /= nthr; multi_avg /= nthr; norm_speed = (double) num_vals*num_splines / norm_avg; multi_speed = (double) num_vals*num_splines / multi_avg; // fprintf (stderr, "Normal VGH speed = %13.3f evaluations per second.\n", // norm_speed); fprintf (stderr, "Multi VGH speed = %13.3f evaluations per second.\n", multi_speed); fprintf (stderr, "%1.3f GFLOPS per socket\n", multi_speed * 64.0*4.0*10.0 * 8 * 1.0e-9); // destroy_Bspline (multi_spline); // for (int i=0; i<num_splines; i++) // destroy_Bspline(norm_splines[i]); } main() { // fprintf (stderr, "Real:\n"); // time_3d_real_double_omp(); fprintf (stderr, "\nComplex:\n"); time_3d_complex_double_omp(); }
variables.c
#include <stdio.h> int main(void) { int var1 = 1, var2 = 2; #pragma omp parallel private(var1, var2) { printf("Region 1: var1=%i, var2=%i\n", var1, var2); var1++; var2++; } printf("After region 1: var1=%i, var2=%i\n\n", var1, var2); #pragma omp parallel firstprivate(var1, var2) { printf("Region 2: var1=%i, var2=%i\n", var1, var2); var1++; var2++; } printf("After region 2: var1=%i, var2=%i\n\n", var1, var2); #pragma omp parallel /* same as omp parallel shared(var1, var2) */ { printf("Region 3: var1=%i, var2=%i\n", var1, var2); /* Note that this introduces the data race condition! */ var1++; var2++; } printf("After region 3: var1=%i, var2=%i\n\n", var1, var2); return 0; }
rectangle_cmap.h
#pragma omp parallel for schedule(dynamic,1) reduction(+:counter) for (vidType v0 = 0; v0 < g.V(); v0++) { auto tid = omp_get_thread_num(); auto &cmap = cmaps.at(tid); for (auto v1 : g.N(v0)) { for (auto u : g.N(v1)) { if (u >= v0) break; cmap[u] = 1; } if (v1 >= v0) break; for (auto v2 : g.N(v0)) { if (v2 >= v1) break; for (auto v3 : g.N(v2)) { if (v3 >= v0) break; #ifdef PROFILE_LATENCY auto c1 = read_cycle(); #endif if (cmap[v3] == 1) counter ++; #ifdef PROFILE_LATENCY auto c2 = read_cycle(); if (nqueries[tid] < NUM_SAMPLES) { auto tick = c2 - c1; if (tick < 500) { nticks[tid] += tick; nqueries[tid] ++; } } #endif } } for (auto u : g.N(v1)) { if (u >= v0) break; cmap[u] = 0; } } }
elemwise_binary_scalar_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file elemwise_binary_scalar_op.h * \brief Function definition of elementwise binary scalar operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_ #include <mxnet/operator_util.h> #include <vector> #include <utility> #include "../mshadow_op.h" #include "../elemwise_op_common.h" #include "elemwise_unary_op.h" namespace mxnet { namespace op { class BinaryScalarOp : public UnaryOp { /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType> static void ComputeExDenseResultRsp(mshadow::Stream<cpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { const double alpha = nnvm::get<double>(attrs.parsed); CHECK_EQ(output.shape(), input.shape()); const int64_t row_count = output.shape()[0]; const int64_t items_per_row = output.shape().Size() / row_count; const DType result_for_zero = OP::Map(DType(0), DType(alpha)); mshadow::Tensor<cpu, 1, DType> input_data = input.data().FlatTo1D<cpu, DType>(stream); mshadow::Tensor<cpu, 1, DType> output_data = output.data().FlatTo1D<cpu, DType>(stream); const int64_t sparse_row_count = input.aux_shape(rowsparse::kIdx).Size(); if (sparse_row_count != row_count) { mshadow::Tensor<cpu, 1, IType> row_indexes = input.aux_data( rowsparse::kIdx).FlatTo1D<cpu, IType>(stream); int64_t input_iter = 0; int64_t output_row = 0; IType next_input_row = 0; while (output_row < row_count) { next_input_row = input_iter < sparse_row_count ? int64_t(row_indexes[input_iter]) : row_count; // Split up into blocks of contiguous data and do those together // Do contiguous dense blocks const int64_t dense_block_count = next_input_row - output_row; if (dense_block_count > 0) { MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, cpu>::Launch( stream, items_per_row * dense_block_count, output_data.dptr_ + items_per_row * output_row, result_for_zero); }); output_row += dense_block_count; continue; } // Do contiguous sparse blocks int64_t next_non_contiguous_sparse = input_iter; while (next_non_contiguous_sparse < sparse_row_count - 1) { if (row_indexes[next_non_contiguous_sparse + 1] != row_indexes[next_non_contiguous_sparse] + 1) { break; } ++next_non_contiguous_sparse; } const int64_t sparse_block_count = next_non_contiguous_sparse - input_iter + 1; if (sparse_block_count > 0) { MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( stream, items_per_row * sparse_block_count, &output_data.dptr_[items_per_row * output_row], &input_data.dptr_[items_per_row * input_iter], DType(alpha)); }); output_row += sparse_block_count; input_iter += sparse_block_count; continue; } } } else { // All rows exist (eventually we don't have to do complex // things to call GPU kernels because we don't need to access row indices) MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( stream, items_per_row * row_count, output_data.dptr_, input_data.dptr_, DType(alpha)); }); } } /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType> static void ComputeExDenseResultRsp(mshadow::Stream<gpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { LOG(FATAL) << "NOT IMPLEMENTED"; } /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType, typename CType> static void ComputeExDenseResultCsr(mshadow::Stream<cpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { CHECK_EQ(output.shape(), input.shape()); const double alpha = nnvm::get<double>(attrs.parsed); const DType dense_fill_val = OP::Map(DType(0), DType(alpha)); const TBlob column_indexes = input.aux_data(csr::kIdx); const size_t item_count = column_indexes.Size(); // Pre-fill dense with 0-input/output value FillDense<DType>(stream, output.shape().Size(), dense_fill_val, req, output.data().dptr<DType>()); mshadow::Tensor<cpu, 2, DType> out = AsRowise2D<DType>(stream, output.data()); if (item_count) { const DType *in = input.data().dptr<DType>(); const IType *column_indexes_ptr = column_indexes.dptr<IType>(); const auto row_count = static_cast<size_t>(input.shape()[0]); const TBlob row_starts = input.aux_data(csr::kIndPtr); const CType *row_starts_ptr = row_starts.dptr<CType>(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(row_count); ++i) { const bool last_row = i == static_cast<int>(row_count) - 1; // Split up into blocks of contiguous data and do those together const size_t row_item_start_iter = row_starts_ptr[i]; const size_t input_items_this_row = !last_row ? static_cast<size_t>(row_starts_ptr[i + 1]) - row_item_start_iter : item_count - row_item_start_iter; if (input_items_this_row) { const IType *this_row_column_indexes = column_indexes_ptr + row_item_start_iter; const DType *row_data_start = in + row_item_start_iter; DType *output_this_row = out[i].dptr_; // More overhead to use OMP for small loops, so don't if (input_items_this_row > 1000) { #pragma omp parallel for for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) { const IType col = this_row_column_indexes[j]; const DType val = row_data_start[j]; output_this_row[col] = OP::Map(val, DType(alpha)); } } else { for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) { const IType col = this_row_column_indexes[j]; const DType val = row_data_start[j]; output_this_row[col] = OP::Map(val, DType(alpha)); } } } } } } /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType, typename CType> static void ComputeExDenseResultCsr(mshadow::Stream<gpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { LOG(FATAL) << "NOT IMPLEMENTED"; } template<typename xpu, typename OP, typename DType, typename IType> static void ComputeExDenseResult(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray output) { mshadow::Stream<xpu> *stream = ctx.get_stream<xpu>(); CHECK_EQ(output.storage_type(), kDefaultStorage); switch (input.storage_type()) { case kRowSparseStorage: { ComputeExDenseResultRsp<OP, DType, IType>(stream, attrs, ctx, input, req, output); break; } case kCSRStorage: { MSHADOW_IDX_TYPE_SWITCH(input.aux_data(csr::kIndPtr).type_flag_, CType, { ComputeExDenseResultCsr<OP, DType, IType, CType>(stream, attrs, ctx, input, req, output); }); break; } default: CHECK(false) << "Unsupported sparse storage type"; break; } } public: template<typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); const double alpha = nnvm::get<double>(attrs.parsed); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch( s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha)); }); }); } template<typename xpu, typename OP> static void ComputeLogic(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); const double alpha = nnvm::get<double>(attrs.parsed); MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch( s, inputs[0].Size(), outputs[0].dptr<bool>(), inputs[0].dptr<DType>(), DType(alpha)); }); }); } template<typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); const auto in_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); if (req[0] == kNullOp) { return; } if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) || (in_stype == kCSRStorage && out_stype == kCSRStorage)) { // csr -> csr, or rsp -> rsp UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>); } else if (out_stype == kDefaultStorage && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { MSHADOW_TYPE_SWITCH(outputs[0].data().type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { ComputeExDenseResult<xpu, OP, DType, IType>(attrs, ctx, inputs[0], req[0], outputs[0]); }); }); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename OP> static void LogicComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); const auto in_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); if (req[0] == kNullOp) { return; } if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) || (in_stype == kCSRStorage && out_stype == kCSRStorage)) { // csr -> csr, or rsp -> rsp UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename OP> static void Backward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); const double alpha = nnvm::get<double>(attrs.parsed); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet::op::mxnet_op::Kernel<mxnet::op::mxnet_op::op_with_req< mxnet::op::mxnet_op::backward_grad_tuned<OP>, Req>, xpu>:: Launch(s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>(), DType(alpha)); }); }); } }; #define MXNET_OPERATOR_REGISTER_BINARY_SCALAR(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(1) \ .set_num_outputs(1) \ .set_attr_parser([](NodeAttrs* attrs) { \ attrs->parsed = std::stod(attrs->dict["scalar"]); \ }) \ .set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}}; \ }) \ .add_argument("data", "NDArray-or-Symbol", "source input") \ .add_argument("scalar", "float", "scalar input") } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
omp_clause.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> /** * 记录openmp中子句的用法 * * 包括shared, private, default, firstprivate, lastprivate, nowait, schedule, if, reduction, copyin, copyprivate, num_threads */ void omp_shared_private_default() { int i, n = 10; #pragma omp parallel for default(none) \ private(i) shared(n) for(i = 0; i < n; i++) { printf("thread %d execute loop %d\n", omp_get_thread_num(), i); } } void omp_firstprivate() { int n = 8; int i=0, a[n]; for(i = 0; i < n ;i++) { a[i] = i+1; } #pragma omp parallel for private(i) firstprivate(a) for ( i = 0; i<n; i++) { printf("thread %d: a[%d] is %d\n", omp_get_thread_num(), i, a[i]); } } void omp_lastprivate() { int n = 8; int i, a = 3; // lastprivate 将for中最后一次循环(i == n-1) a 的值赋给a #pragma omp parallel for private(i) lastprivate(a) for ( i = 0; i<n; i++) { a = i+1; printf("In for: thread %d has a value of a = %d for i = %d\n", omp_get_thread_num(),a,i); } printf("\n"); printf("Out for: thread %d has a value of a = %d\n", omp_get_thread_num(),a); printf("\n"); // lastprivate 将最后一个包含a的section的中a 的值赋给a #pragma omp parallel sections private(i) lastprivate(a) { #pragma omp section { a = 1; } #pragma omp section { a = 5; } } printf("after section: thread %d has a value of a = %d\n", omp_get_thread_num(),a); } void omp_nowait() { int i, n =6; #pragma omp parallel { #pragma omp for nowait for(i = 0; i < n; i++) { printf("thread %d: ++++\n", omp_get_thread_num()); } #pragma omp for for(i = 0; i < n; i++) { printf("thread %d: ----\n", omp_get_thread_num()); } } } void omp_schedule() { int i, n = 10; // schedule 取值 static, dynamic, guided, auto, runtime #pragma omp parallel for default(none) schedule(static, 2) \ private(i) shared(n) for(i = 0; i < n; i++) { printf("Iteration %d executed by thread %d\n", i, omp_get_thread_num()); } } void omp_if() { int n = 1, tid; printf("n = 1\n"); #pragma omp parallel if(n>5) default(none) \ private(tid) shared(n) { tid = omp_get_thread_num(); printf("thread %d is running\n", tid); } printf("\n"); n = 10; printf("n = 10\n"); #pragma omp parallel if(n>5) default(none) \ private(tid) shared(n) { tid = omp_get_thread_num(); printf("thread %d is running\n", tid); } } void omp_reduction() { int sum, i; int n = 10; int a[n]; for(i = 0; i < n; i++) { a[i] = (i+1); } #pragma omp parallel for default(none) \ private(i) shared(a,n) reduction(+:sum) for(i = 0; i < n; i++) { sum += a[i]; } printf("sum is %d\n", sum); } int counter = 10; #pragma omp threadprivate(counter) void omp_copyin() { printf("counter is %d\n", counter); #pragma omp parallel copyin(counter) { counter = omp_get_thread_num() + counter + 1; printf("thread %d : counter is %d\n", omp_get_thread_num(), counter); } printf("counter is %d\n", counter); } void omp_copyprivate() { int i; #pragma omp parallel private(i) { #pragma omp single copyprivate(i, counter) { i = 50; counter = 100; printf("thread %d execute single\n", omp_get_thread_num()); } printf("thread %d: i is %d and counter is %d\n",omp_get_thread_num(), i, counter); } } void omp_num_threads() { #pragma omp parallel num_threads(2) { printf("thread %d is running\n", omp_get_thread_num()); } } int main() { // omp_shared_private_default(); // omp_firstprivate(); // omp_lastprivate(); // omp_nowait(); // omp_schedule(); // omp_if(); // omp_reduction(); // omp_copyin(); // omp_copyprivate(); omp_num_threads(); }
ft.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - FT This benchmark is an OpenMP C version of the NPB FT code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: D. Bailey W. Saphir OpenMP C version: S. Satoh --------------------------------------------------------------------*/ #include <stdint.h> #include "npb-C.h" /* global variables */ #include "global.h" /* function declarations */ static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX], int t, int indexmap[NZ][NY][NX], int d[3]); static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]); static void ipow46(double a, int exponent, double *result); static void setup(void); static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]); static void print_timers(void); static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]); static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]); static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]); static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]); static void fft_init (int n); static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]); static void fftz2 (int is, int l, int m, int n, int ny, int ny1, dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]); static int ilog2(int n); static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]); static void verify (int d1, int d2, int d3, int nt, boolean *verified, char *class); /*-------------------------------------------------------------------- c FT benchmark c-------------------------------------------------------------------*/ static int realmain(void *carg) { unsigned arg = (uintptr_t)carg; /*c------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i, ierr; /*------------------------------------------------------------------ c u0, u1, u2 are the main arrays in the problem. c Depending on the decomposition, these arrays will have different c dimensions. To accomodate all possibilities, we allocate them as c one-dimensional arrays and pass them to subroutines for different c views c - u0 contains the initial (transformed) initial condition c - u1 and u2 are working arrays c - indexmap maps i,j,k of u0 to the correct i^2+j^2+k^2 for the c time evolution operator. c-----------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Large arrays are in common so that they are allocated on the c heap rather than the stack. This common block is not c referenced directly anywhere else. Padding is to avoid accidental c cache problems, since all array sizes are powers of two. c-------------------------------------------------------------------*/ static dcomplex u0[NZ][NY][NX]; static dcomplex pad1[3]; static dcomplex u1[NZ][NY][NX]; static dcomplex pad2[3]; static dcomplex u2[NZ][NY][NX]; static dcomplex pad3[3]; static int indexmap[NZ][NY][NX]; int iter; int nthreads = 1; double total_time, mflops; boolean verified; char class; omp_set_num_threads(arg); /*-------------------------------------------------------------------- c Run the entire problem once to make sure all data is touched. c This reduces variable startup costs, which is important for such a c short benchmark. The other NPB 2 implementations are similar. c-------------------------------------------------------------------*/ for (i = 0; i < T_MAX; i++) { timer_clear(i); } setup(); #pragma omp parallel { compute_indexmap(indexmap, dims[2]); #pragma omp single { compute_initial_conditions(u1, dims[0]); fft_init (dims[0][0]); } fft(1, u1, u0); } /* end parallel */ /*-------------------------------------------------------------------- c Start over from the beginning. Note that all operations must c be timed, in contrast to other benchmarks. c-------------------------------------------------------------------*/ for (i = 0; i < T_MAX; i++) { timer_clear(i); } timer_start(T_TOTAL); if (TIMERS_ENABLED == TRUE) timer_start(T_SETUP); #pragma omp parallel private(iter) firstprivate(niter) { compute_indexmap(indexmap, dims[2]); #pragma omp single { compute_initial_conditions(u1, dims[0]); fft_init (dims[0][0]); } if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_stop(T_SETUP); } if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_start(T_FFT); } fft(1, u1, u0); if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_stop(T_FFT); } for (iter = 1; iter <= niter; iter++) { if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_start(T_EVOLVE); } evolve(u0, u1, iter, indexmap, dims[0]); if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_stop(T_EVOLVE); } if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_start(T_FFT); } fft(-1, u1, u2); if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_stop(T_FFT); } if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_start(T_CHECKSUM); } checksum(iter, u2, dims[0]); if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_stop(T_CHECKSUM); } } #pragma omp single verify(NX, NY, NZ, niter, &verified, &class); #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end parallel */ timer_stop(T_TOTAL); total_time = timer_read(T_TOTAL); if( total_time != 0.0) { mflops = 1.0e-6*(double)(NTOTAL) * (14.8157+7.19641*log((double)(NTOTAL)) + (5.23518+7.21113*log((double)(NTOTAL)))*niter) /total_time; } else { mflops = 0.0; } #ifdef BOMP backend_create_time(arg); #endif printf("Computetime %d %f\n", arg, total_time); printf("client done\n"); /* c_print_results("FT", class, NX, NY, NZ, niter, nthreads, */ /* total_time, mflops, " floating point", verified, */ /* NPBVERSION, COMPILETIME, */ /* CS1, CS2, CS3, CS4, CS5, CS6, CS7); */ if (TIMERS_ENABLED == TRUE) print_timers(); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX], int t, int indexmap[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c evolve u0 -> u1 (t time steps) in fourier space c-------------------------------------------------------------------*/ int i, j, k; #pragma omp for for (k = 0; k < d[2]; k++) { for (j = 0; j < d[1]; j++) { for (i = 0; i < d[0]; i++) { crmul(u1[k][j][i], u0[k][j][i], ex[t*indexmap[k][j][i]]); } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Fill in array u0 with initial conditions from c random number generator c-------------------------------------------------------------------*/ int k; double x0, start, an, dummy; static double tmp[NX*2*MAXDIM+1]; int i,j,t; start = SEED; /*-------------------------------------------------------------------- c Jump to the starting element for our first plane. c-------------------------------------------------------------------*/ ipow46(A, (zstart[0]-1)*2*NX*NY + (ystart[0]-1)*2*NX, &an); dummy = randlc(&start, an); ipow46(A, 2*NX*NY, &an); /*-------------------------------------------------------------------- c Go through by z planes filling in one square at a time. c-------------------------------------------------------------------*/ for (k = 0; k < dims[0][2]; k++) { x0 = start; vranlc(2*NX*dims[0][1], &x0, A, tmp); t = 1; for (j = 0; j < dims[0][1]; j++) for (i = 0; i < NX; i++) { u0[k][j][i].real = tmp[t++]; u0[k][j][i].imag = tmp[t++]; } if (k != dims[0][2]) dummy = randlc(&start, an); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void ipow46(double a, int exponent, double *result) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute a^exponent mod 2^46 c-------------------------------------------------------------------*/ double dummy, q, r; int n, n2; /*-------------------------------------------------------------------- c Use c a^n = a^(n/2)*a^(n/2) if n even else c a^n = a*a^(n-1) if n odd c-------------------------------------------------------------------*/ *result = 1; if (exponent == 0) return; q = a; r = 1; n = exponent; while (n > 1) { n2 = n/2; if (n2 * 2 == n) { dummy = randlc(&q, q); n = n2; } else { dummy = randlc(&r, q); n = n-1; } } dummy = randlc(&r, q); *result = r; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void setup(void) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int ierr, i, j, fstatus; printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - FT Benchmark\n\n"); niter = NITER_DEFAULT; printf(" Size : %3dx%3dx%3d\n", NX, NY, NZ); printf(" Iterations : %7d\n", niter); /* 1004 format(' Number of processes : ', i7) 1005 format(' Processor array : ', i3, 'x', i3) 1006 format(' WARNING: compiled for ', i5, ' processes. ', > ' Will not verify. ')*/ for (i = 0;i < 3 ; i++) { dims[i][0] = NX; dims[i][1] = NY; dims[i][2] = NZ; } for (i = 0; i < 3; i++) { xstart[i] = 1; xend[i] = NX; ystart[i] = 1; yend[i] = NY; zstart[i] = 1; zend[i] = NZ; } /*-------------------------------------------------------------------- c Set up info for blocking of ffts and transposes. This improves c performance on cache-based systems. Blocking involves c working on a chunk of the problem at a time, taking chunks c along the first, second, or third dimension. c c - In cffts1 blocking is on 2nd dimension (with fft on 1st dim) c - In cffts2/3 blocking is on 1st dimension (with fft on 2nd and 3rd dims) c Since 1st dim is always in processor, we'll assume it's long enough c (default blocking factor is 16 so min size for 1st dim is 16) c The only case we have to worry about is cffts1 in a 2d decomposition. c so the blocking factor should not be larger than the 2nd dimension. c-------------------------------------------------------------------*/ fftblock = FFTBLOCK_DEFAULT; fftblockpad = FFTBLOCKPAD_DEFAULT; if (fftblock != FFTBLOCK_DEFAULT) fftblockpad = fftblock+3; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2 c for time evolution exponent. c-------------------------------------------------------------------*/ int i, j, k, ii, ii2, jj, ij2, kk; double ap; /*-------------------------------------------------------------------- c basically we want to convert the fortran indices c 1 2 3 4 5 6 7 8 c to c 0 1 2 3 -4 -3 -2 -1 c The following magic formula does the trick: c mod(i-1+n/2, n) - n/2 c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < dims[2][0]; i++) { ii = (i+1+xstart[2]-2+NX/2)%NX - NX/2; ii2 = ii*ii; for (j = 0; j < dims[2][1]; j++) { jj = (j+1+ystart[2]-2+NY/2)%NY - NY/2; ij2 = jj*jj+ii2; for (k = 0; k < dims[2][2]; k++) { kk = (k+1+zstart[2]-2+NZ/2)%NZ - NZ/2; indexmap[k][j][i] = kk*kk+ij2; } } } /*-------------------------------------------------------------------- c compute array of exponentials for time evolution. c-------------------------------------------------------------------*/ #pragma omp single { ap = - 4.0 * ALPHA * PI * PI; ex[0] = 1.0; ex[1] = exp(ap); for (i = 2; i <= EXPMAX; i++) { ex[i] = ex[i-1]*ex[1]; } } /* end single */ } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void print_timers(void) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i; char *tstrings[] = { " total ", " setup ", " fft ", " evolve ", " checksum ", " fftlow ", " fftcopy " }; for (i = 0; i < T_MAX; i++) { if (timer_read(i) != 0.0) { printf("timer %2d(%16s( :%10.6f\n", i, tstrings[i], timer_read(i)); } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ dcomplex y0[NX][FFTBLOCKPAD]; dcomplex y1[NX][FFTBLOCKPAD]; /*-------------------------------------------------------------------- c note: args x1, x2 must be different arrays c note: args for cfftsx are (direction, layout, xin, xout, scratch) c xin/xout may be the same and it can be somewhat faster c if they are c-------------------------------------------------------------------*/ if (dir == 1) { cffts1(1, dims[0], x1, x1, y0, y1); /* x1 -> x1 */ cffts2(1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */ cffts3(1, dims[2], x1, x2, y0, y1); /* x1 -> x2 */ } else { cffts3(-1, dims[2], x1, x1, y0, y1); /* x1 -> x1 */ cffts2(-1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */ cffts1(-1, dims[0], x1, x2, y0, y1); /* x1 -> x2 */ } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int logd[3]; int i, j, k, jj; for (i = 0; i < 3; i++) { logd[i] = ilog2(d[i]); } #pragma omp for for (k = 0; k < d[2]; k++) { for (jj = 0; jj <= d[1] - fftblock; jj+=fftblock) { /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (j = 0; j < fftblock; j++) { for (i = 0; i < d[0]; i++) { y0[i][j].real = x[k][j+jj][i].real; y0[i][j].imag = x[k][j+jj][i].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */ cfftz (is, logd[0], d[0], y0, y1); /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (j = 0; j < fftblock; j++) { for (i = 0; i < d[0]; i++) { xout[k][j+jj][i].real = y0[i][j].real; xout[k][j+jj][i].imag = y0[i][j].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int logd[3]; int i, j, k, ii; for (i = 0; i < 3; i++) { logd[i] = ilog2(d[i]); } #pragma omp for for (k = 0; k < d[2]; k++) { for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) { /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (j = 0; j < d[1]; j++) { for (i = 0; i < fftblock; i++) { y0[j][i].real = x[k][j][i+ii].real; y0[j][i].imag = x[k][j][i+ii].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */ cfftz (is, logd[1], d[1], y0, y1); /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (j = 0; j < d[1]; j++) { for (i = 0; i < fftblock; i++) { xout[k][j][i+ii].real = y0[j][i].real; xout[k][j][i+ii].imag = y0[j][i].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int logd[3]; int i, j, k, ii; for (i = 0;i < 3; i++) { logd[i] = ilog2(d[i]); } #pragma omp for for (j = 0; j < d[1]; j++) { for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) { /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (k = 0; k < d[2]; k++) { for (i = 0; i < fftblock; i++) { y0[k][i].real = x[k][j][i+ii].real; y0[k][i].imag = x[k][j][i+ii].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */ cfftz (is, logd[2], d[2], y0, y1); /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (k = 0; k < d[2]; k++) { for (i = 0; i < fftblock; i++) { xout[k][j][i+ii].real = y0[k][i].real; xout[k][j][i+ii].imag = y0[k][i].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fft_init (int n) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute the roots-of-unity array that will be used for subsequent FFTs. c-------------------------------------------------------------------*/ int m,nu,ku,i,j,ln; double t, ti; /*-------------------------------------------------------------------- c Initialize the U array with sines and cosines in a manner that permits c stride one access at each FFT iteration. c-------------------------------------------------------------------*/ nu = n; m = ilog2(n); u[0].real = (double)m; u[0].imag = 0.0; ku = 1; ln = 1; for (j = 1; j <= m; j++) { t = PI / ln; for (i = 0; i <= ln - 1; i++) { ti = i * t; u[i+ku].real = cos(ti); u[i+ku].imag = sin(ti); } ku = ku + ln; ln = 2 * ln; } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Computes NY N-point complex-to-complex FFTs of X using an algorithm due c to Swarztrauber. X is both the input and the output array, while Y is a c scratch array. It is assumed that N = 2^M. Before calling CFFTZ to c perform FFTs, the array U must be initialized by calling CFFTZ with IS c set to 0 and M set to MX, where MX is the maximum value of M for any c subsequent call. c-------------------------------------------------------------------*/ int i,j,l,mx; /*-------------------------------------------------------------------- c Check if input parameters are invalid. c-------------------------------------------------------------------*/ mx = (int)(u[0].real); if ((is != 1 && is != -1) || m < 1 || m > mx) { printf("CFFTZ: Either U has not been initialized, or else\n" "one of the input parameters is invalid%5d%5d%5d\n", is, m, mx); exit(1); } /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= m; l+=2) { fftz2 (is, l, m, n, fftblock, fftblockpad, u, x, y); if (l == m) break; fftz2 (is, l + 1, m, n, fftblock, fftblockpad, u, y, x); } /*-------------------------------------------------------------------- c Copy Y to X. c-------------------------------------------------------------------*/ if (m % 2 == 1) { for (j = 0; j < n; j++) { for (i = 0; i < fftblock; i++) { x[j][i].real = y[j][i].real; x[j][i].imag = y[j][i].imag; } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fftz2 (int is, int l, int m, int n, int ny, int ny1, dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Performs the L-th iteration of the second variant of the Stockham FFT. c-------------------------------------------------------------------*/ int k,n1,li,lj,lk,ku,i,j,i11,i12,i21,i22; dcomplex u1,x11,x21; /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ n1 = n / 2; if (l-1 == 0) { lk = 1; } else { lk = 2 << ((l - 1)-1); } if (m-l == 0) { li = 1; } else { li = 2 << ((m - l)-1); } lj = 2 * lk; ku = li; for (i = 0; i < li; i++) { i11 = i * lk; i12 = i11 + n1; i21 = i * lj; i22 = i21 + lk; if (is >= 1) { u1.real = u[ku+i].real; u1.imag = u[ku+i].imag; } else { u1.real = u[ku+i].real; u1.imag = -u[ku+i].imag; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k = 0; k < lk; k++) { for (j = 0; j < ny; j++) { double x11real, x11imag; double x21real, x21imag; x11real = x[i11+k][j].real; x11imag = x[i11+k][j].imag; x21real = x[i12+k][j].real; x21imag = x[i12+k][j].imag; y[i21+k][j].real = x11real + x21real; y[i21+k][j].imag = x11imag + x21imag; y[i22+k][j].real = u1.real * (x11real - x21real) - u1.imag * (x11imag - x21imag); y[i22+k][j].imag = u1.real * (x11imag - x21imag) + u1.imag * (x11real - x21real); } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static int ilog2(int n) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int nn, lg; if (n == 1) { return 0; } lg = 1; nn = 2; while (nn < n) { nn = nn << 1; lg++; } return lg; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int j, q,r,s, ierr; dcomplex chk,allchk; chk.real = 0.0; chk.imag = 0.0; #pragma omp for nowait for (j = 1; j <= 1024; j++) { q = j%NX+1; if (q >= xstart[0] && q <= xend[0]) { r = (3*j)%NY+1; if (r >= ystart[0] && r <= yend[0]) { s = (5*j)%NZ+1; if (s >= zstart[0] && s <= zend[0]) { cadd(chk,chk,u1[s-zstart[0]][r-ystart[0]][q-xstart[0]]); } } } } #pragma omp critical { sums[i].real += chk.real; sums[i].imag += chk.imag; } #pragma omp barrier #pragma omp single { /* complex % real */ sums[i].real = sums[i].real/(double)(NTOTAL); sums[i].imag = sums[i].imag/(double)(NTOTAL); /* printf("T = %5d Checksum = %22.12e %22.12e\n", */ /* i, sums[i].real, sums[i].imag); */ } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void verify (int d1, int d2, int d3, int nt, boolean *verified, char *class) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int ierr, size, i; double err, epsilon; /*-------------------------------------------------------------------- c Sample size reference checksums c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Class S size reference checksums c-------------------------------------------------------------------*/ double vdata_real_s[6+1] = { 0.0, 5.546087004964e+02, 5.546385409189e+02, 5.546148406171e+02, 5.545423607415e+02, 5.544255039624e+02, 5.542683411902e+02 }; double vdata_imag_s[6+1] = { 0.0, 4.845363331978e+02, 4.865304269511e+02, 4.883910722336e+02, 4.901273169046e+02, 4.917475857993e+02, 4.932597244941e+02 }; /*-------------------------------------------------------------------- c Class W size reference checksums c-------------------------------------------------------------------*/ double vdata_real_w[6+1] = { 0.0, 5.673612178944e+02, 5.631436885271e+02, 5.594024089970e+02, 5.560698047020e+02, 5.530898991250e+02, 5.504159734538e+02 }; double vdata_imag_w[6+1] = { 0.0, 5.293246849175e+02, 5.282149986629e+02, 5.270996558037e+02, 5.260027904925e+02, 5.249400845633e+02, 5.239212247086e+02 }; /*-------------------------------------------------------------------- c Class A size reference checksums c-------------------------------------------------------------------*/ double vdata_real_a[6+1] = { 0.0, 5.046735008193e+02, 5.059412319734e+02, 5.069376896287e+02, 5.077892868474e+02, 5.085233095391e+02, 5.091487099959e+02 }; double vdata_imag_a[6+1] = { 0.0, 5.114047905510e+02, 5.098809666433e+02, 5.098144042213e+02, 5.101336130759e+02, 5.104914655194e+02, 5.107917842803e+02 }; /*-------------------------------------------------------------------- c Class B size reference checksums c-------------------------------------------------------------------*/ double vdata_real_b[20+1] = { 0.0, 5.177643571579e+02, 5.154521291263e+02, 5.146409228649e+02, 5.142378756213e+02, 5.139626667737e+02, 5.137423460082e+02, 5.135547056878e+02, 5.133910925466e+02, 5.132470705390e+02, 5.131197729984e+02, 5.130070319283e+02, 5.129070537032e+02, 5.128182883502e+02, 5.127393733383e+02, 5.126691062020e+02, 5.126064276004e+02, 5.125504076570e+02, 5.125002331720e+02, 5.124551951846e+02, 5.124146770029e+02 }; double vdata_imag_b[20+1] = { 0.0, 5.077803458597e+02, 5.088249431599e+02, 5.096208912659e+02, 5.101023387619e+02, 5.103976610617e+02, 5.105948019802e+02, 5.107404165783e+02, 5.108576573661e+02, 5.109577278523e+02, 5.110460304483e+02, 5.111252433800e+02, 5.111968077718e+02, 5.112616233064e+02, 5.113203605551e+02, 5.113735928093e+02, 5.114218460548e+02, 5.114656139760e+02, 5.115053595966e+02, 5.115415130407e+02, 5.115744692211e+02 }; /*-------------------------------------------------------------------- c Class C size reference checksums c-------------------------------------------------------------------*/ double vdata_real_c[20+1] = { 0.0, 5.195078707457e+02, 5.155422171134e+02, 5.144678022222e+02, 5.140150594328e+02, 5.137550426810e+02, 5.135811056728e+02, 5.134569343165e+02, 5.133651975661e+02, 5.132955192805e+02, 5.132410471738e+02, 5.131971141679e+02, 5.131605205716e+02, 5.131290734194e+02, 5.131012720314e+02, 5.130760908195e+02, 5.130528295923e+02, 5.130310107773e+02, 5.130103090133e+02, 5.129905029333e+02, 5.129714421109e+02 }; double vdata_imag_c[20+1] = { 0.0, 5.149019699238e+02, 5.127578201997e+02, 5.122251847514e+02, 5.121090289018e+02, 5.121143685824e+02, 5.121496764568e+02, 5.121870921893e+02, 5.122193250322e+02, 5.122454735794e+02, 5.122663649603e+02, 5.122830879827e+02, 5.122965869718e+02, 5.123075927445e+02, 5.123166486553e+02, 5.123241541685e+02, 5.123304037599e+02, 5.123356167976e+02, 5.123399592211e+02, 5.123435588985e+02, 5.123465164008e+02 }; epsilon = 1.0e-12; *verified = TRUE; *class = 'U'; if (d1 == 64 && d2 == 64 && d3 == 64 && nt == 6) { *class = 'S'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_s[i]) / vdata_real_s[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_s[i]) / vdata_imag_s[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 128 && d2 == 128 && d3 == 32 && nt == 6) { *class = 'W'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_w[i]) / vdata_real_w[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_w[i]) / vdata_imag_w[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 256 && d2 == 256 && d3 == 128 && nt == 6) { *class = 'A'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_a[i]) / vdata_real_a[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_a[i]) / vdata_imag_a[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 512 && d2 == 256 && d3 == 256 && nt == 20) { *class = 'B'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_b[i]) / vdata_real_b[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_b[i]) / vdata_imag_b[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 512 && d2 == 512 && d3 == 512 && nt == 20) { *class = 'C'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_c[i]) / vdata_real_c[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_c[i]) / vdata_imag_c[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } if (*class != 'U') { printf("Result verification successful\n"); } else { printf("Result verification failed\n"); } printf("class = %1c\n", *class); } #define STACK_SIZE (8 * 1024 * 1024) int main(int argc, char** argv) { if (argc != 2) { /* Print usage */ printf("Usage: %s <Number of threads>\n", argv[0]); exit(-1); } #ifdef BOMP backend_span_domain(atoi(argv[1]), STACK_SIZE); bomp_custom_init(NULL); backend_thread_create_varstack(realmain, (void*)((uint64_t)atoi(argv[1])), STACK_SIZE); backend_thread_exit(); #else /* BOMP */ realmain(atoi(argv[1])); #endif /* BOMP */ }
bicg.c
/** * bicg.c: This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef _OPENMP #include <omp.h> #endif #include "BenchmarksUtil.h" // Error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.7 /* Problem size. */ #ifdef RUN_TEST #define SIZE 1100 #elif RUN_BENCHMARK #define SIZE 9600 #else #define SIZE 1000 #endif #define NX SIZE #define NY SIZE #ifndef M_PI #define M_PI 3.14159 #endif /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_array(DATA_TYPE *A, DATA_TYPE *p, DATA_TYPE *r) { int i, j; for (i = 0; i < NX; i++) { r[i] = i * M_PI; for (j = 0; j < NY; j++) { A[i * NY + j] = ((DATA_TYPE)i * j) / NX; } } for (i = 0; i < NY; i++) { p[i] = i * M_PI; } } int compareResults(DATA_TYPE *s, DATA_TYPE *s_outputFromGpu, DATA_TYPE *q, DATA_TYPE *q_outputFromGpu) { int i, fail; fail = 0; // Compare s with s_cuda for (i = 0; i < NX; i++) { if (percentDiff(q[i], q_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } for (i = 0; i < NY; i++) { if (percentDiff(s[i], s_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); return fail; } void bicg_cpu(DATA_TYPE *A, DATA_TYPE *r, DATA_TYPE *s, DATA_TYPE *p, DATA_TYPE *q) { int i, j; for (i = 0; i < NY; i++) { s[i] = 0.0; } for (i = 0; i < NX; i++) { q[i] = 0.0; for (j = 0; j < NY; j++) { s[j] = s[j] + r[i] * A[i * NY + j]; q[i] = q[i] + A[i * NY + j] * p[j]; } } } void bicg_OMP(DATA_TYPE *A, DATA_TYPE *r, DATA_TYPE *s, DATA_TYPE *p, DATA_TYPE *q) { int i, j; for (i = 0; i < NY; i++) { s[i] = 0.0; } #pragma omp target device(DEVICE_ID) map( \ to : A[ : NX *NY], p[ : NY], r[ : NX]) map(tofrom : s[ : NY], q[ : NX]) { #pragma omp parallel for collapse(1) for (j = 0; j < NY; j++) { for (i = 0; i < NX; i++) { s[j] = s[j] + r[i] * A[i * NY + j]; } } #pragma omp parallel for collapse(1) for (i = 0; i < NX; i++) { q[i] = 0.0; for (j = 0; j < NY; j++) { q[i] = q[i] + A[i * NY + j] * p[j]; } } } } int main(int argc, char **argv) { double t_start, t_end; int fail = 0; DATA_TYPE *A; DATA_TYPE *r; DATA_TYPE *s; DATA_TYPE *p; DATA_TYPE *q; DATA_TYPE *s_GPU; DATA_TYPE *q_GPU; A = (DATA_TYPE *)malloc(NX * NY * sizeof(DATA_TYPE)); r = (DATA_TYPE *)malloc(NX * sizeof(DATA_TYPE)); s = (DATA_TYPE *)malloc(NY * sizeof(DATA_TYPE)); p = (DATA_TYPE *)malloc(NY * sizeof(DATA_TYPE)); q = (DATA_TYPE *)malloc(NX * sizeof(DATA_TYPE)); s_GPU = (DATA_TYPE *)malloc(NY * sizeof(DATA_TYPE)); q_GPU = (DATA_TYPE *)malloc(NX * sizeof(DATA_TYPE)); fprintf(stdout, "<< BiCG Sub Kernel of BiCGStab Linear Solver >>\n"); init_array(A, p, r); t_start = rtclock(); bicg_OMP(A, r, s_GPU, p, q_GPU); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); #ifdef RUN_TEST t_start = rtclock(); bicg_cpu(A, r, s, p, q); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); fail = compareResults(s, s_GPU, q, q_GPU); #endif free(A); free(r); free(s); free(p); free(q); free(s_GPU); free(q_GPU); return fail; }
rose_jacobi_avx2.c
#include "rex_kmp.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/timeb.h> #include <malloc.h> #include <immintrin.h> #include <immintrin.h> #define REAL float static double read_timer_ms() { struct timeb tm; ftime(&tm); return ((double )tm . time) * 1000.0 + ((double )tm . millitm); } /************************************************************ * program to solve a finite difference * discretization of Helmholtz equation : * (d2/dx2)u + (d2/dy2)u - alpha u = f * using Jacobi iterative method. * * Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 * Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 * * This c version program is translated by * Chunhua Liao, University of Houston, Jan, 2005 * * Directives are used in this code to achieve parallelism. * All do loops are parallelized with default 'static' scheduling. * * Input : n - grid dimension in x direction * m - grid dimension in y direction * alpha - Helmholtz constant (always greater than 0.0) * tol - error tolerance for iterative solver * relax - Successice over relaxation parameter * mits - Maximum iterations for iterative solver * * On output * : u(n,m) - Dependent variable (solutions) * : f(n,m) - Right hand side function *************************************************************/ #define DEFAULT_DIMSIZE 256 void print_array(char *title,char *name,float *A,int n,int m) { printf("%s:\n",title); int i; int j; for (i = 0; i < n; i++) { for (j = 0; j < m; j++) { printf("%s[%d][%d]:%f ",name,i,j,A[i * m + j]); } printf("\n"); } printf("\n"); } /* subroutine initialize (n,m,alpha,dx,dy,u,f) ****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize(int n,int m,float alpha,float *dx,float *dy,float *u_p,float *f_p) { int i; int j; int xx; int yy; float (*u)[m] = ((float (*)[m])u_p); float (*f)[m] = ((float (*)[m])f_p); //double PI=3.1415926; *dx = (2.0 / (n - 1)); *dy = (2.0 / (m - 1)); /* Initialize initial condition and RHS */ //#pragma omp parallel for private(xx,yy,j,i) for (i = 0; i < n; i++) for (j = 0; j < m; j++) { xx = ((int )(- 1.0 + ( *dx * (i - 1)))); yy = ((int )(- 1.0 + ( *dy * (j - 1)))); u[i][j] = 0.0; f[i][j] = (- 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy))); } } /* subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none ************************************************************ * Checks error between numerical and exact solution * ************************************************************/ void error_check(int n,int m,float alpha,float dx,float dy,float *u_p,float *f_p) { int i; int j; float xx; float yy; float temp; float error; error = 0.0; float (*u)[m] = ((float (*)[m])u_p); float (*f)[m] = ((float (*)[m])f_p); //#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error) for (i = 0; i < n; i++) for (j = 0; j < m; j++) { xx = (- 1.0 + (dx * (i - 1))); yy = (- 1.0 + (dy * (j - 1))); temp = (u[i][j] - (1.0 - (xx * xx)) * (1.0 - (yy * yy))); error = error + temp * temp; } error = (sqrt(error) / (n * m)); printf("Solution Error: %2.6g\n",error); } void jacobi_seq(int n,int m,float dx,float dy,float alpha,float relax,float *u_p,float *f_p,float tol,int mits); void jacobi_omp(int n,int m,float dx,float dy,float alpha,float relax,float *u_p,float *f_p,float tol,int mits); int main(int argc,char *argv[]) { int status = 0; int n = 256; int m = 256; float alpha = 0.0543; float tol = 0.0000000001; float relax = 1.0; int mits = 5000; /*fprintf(stderr, "Usage: jacobi [<n> <m> <alpha> <tol> <relax> <mits>]\n"); fprintf(stderr, "\tn - grid dimension in x direction, default: %d\n", n); fprintf(stderr, "\tm - grid dimension in y direction, default: n if provided or %d\n", m); fprintf(stderr, "\talpha - Helmholtz constant (always greater than 0.0), default: %g\n", alpha); fprintf(stderr, "\ttol - error tolerance for iterative solver, default: %g\n", tol); fprintf(stderr, "\trelax - Successice over relaxation parameter, default: %g\n", relax); fprintf(stderr, "\tmits - Maximum iterations for iterative solver, default: %d\n", mits);*/ if (argc == 2) { sscanf(argv[1],"%d",&n); m = n; } else if (argc == 3) { sscanf(argv[1],"%d",&n); sscanf(argv[2],"%d",&m); } else if (argc == 4) { sscanf(argv[1],"%d",&n); sscanf(argv[2],"%d",&m); sscanf(argv[3],"%g",&alpha); } else if (argc == 5) { sscanf(argv[1],"%d",&n); sscanf(argv[2],"%d",&m); sscanf(argv[3],"%g",&alpha); sscanf(argv[4],"%g",&tol); } else if (argc == 6) { sscanf(argv[1],"%d",&n); sscanf(argv[2],"%d",&m); sscanf(argv[3],"%g",&alpha); sscanf(argv[4],"%g",&tol); sscanf(argv[5],"%g",&relax); } else if (argc == 7) { sscanf(argv[1],"%d",&n); sscanf(argv[2],"%d",&m); sscanf(argv[3],"%g",&alpha); sscanf(argv[4],"%g",&tol); sscanf(argv[5],"%g",&relax); sscanf(argv[6],"%d",&mits); } else { /* the rest of arg ignored */ } printf("jacobi %d %d %g %g %g %d\n",n,m,alpha,tol,relax,mits); printf("------------------------------------------------------------------------------------------------------\n"); /** init the array */ float *u = (float *)(malloc(sizeof(float ) * n * m)); float *uomp = (float *)(malloc(sizeof(float ) * n * m)); float *f = (float *)(malloc(sizeof(float ) * n * m)); float dx; /* grid spacing in x direction */ float dy; /* grid spacing in y direction */ initialize(n,m,alpha,&dx,&dy,u,f); memcpy(uomp,u,sizeof(float ) * n * m); double elapsed = read_timer_ms(); jacobi_seq(n,m,dx,dy,alpha,relax,u,f,tol,mits); elapsed = read_timer_ms() - elapsed; printf("seq elasped time(ms): %4f\n",elapsed); double mflops = 0.001 * mits * (n - 2) * (m - 2) * 13 / elapsed; printf("MFLOPS: %12.6g\n",mflops); puts("================"); elapsed = read_timer_ms(); jacobi_omp(n,m,dx,dy,alpha,relax,uomp,f,tol,mits); elapsed = read_timer_ms() - elapsed; printf("OpenMP elasped time(ms): %4f\n",elapsed); mflops = 0.001 * mits * (n - 2) * (m - 2) * 13 / elapsed; printf("MFLOPS: %12.6g\n",mflops); //print_array("Sequential Run", "u",(REAL*)u, n, m); error_check(n,m,alpha,dx,dy,u,f); free(u); free(f); free(uomp); return 0; } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,mits) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * mits Maximum number of iterations * * Output : u(n,m) - Solution *****************************************************************/ void jacobi_seq(int n,int m,float dx,float dy,float alpha,float omega,float *u_p,float *f_p,float tol,int mits) { int i; int j; int k; float error; float ax; float ay; float b; float resid; float uold[n][m]; float (*u)[m] = ((float (*)[m])u_p); float (*f)[m] = ((float (*)[m])f_p); /* * Initialize coefficients */ /* X-direction coef */ ax = (1.0 / (dx * dx)); /* Y-direction coef */ ay = (1.0 / (dy * dy)); /* Central coeff */ b = (- 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha); error = (10.0 * tol); k = 1; while(k <= mits && error > tol){ error = 0.0; /* Copy new solution into old */ for (i = 0; i < n; i++) for (j = 0; j < m; j++) uold[i][j] = u[i][j]; for (i = 1; i < n - 1; i++) for (j = 1; j < m - 1; j++) { resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b; //printf("i: %d, j: %d, resid: %f\n", i, j, resid); u[i][j] = uold[i][j] - omega * resid; error = error + resid * resid; } /* Error check */ //if (k % 500 == 0) // printf("Finished %d iteration with error: %g\n", k, error); error = (sqrt(error) / (n * m)); k = k + 1; /* End iteration loop */ } printf("Total Number of Iterations: %d\n",k); printf("Residual: %.15g\n",error); } void jacobi_omp(int n,int m,float dx,float dy,float alpha,float omega,float *u_p,float *f_p,float tol,int mits) { int i; int j; int k; float error; float ax; float ay; float b; float resid; float *tmp = (float *)(malloc(sizeof(float ) * n * m)); float (*uold)[m] = ((float (*)[m])tmp); float (*u)[m] = ((float (*)[m])u_p); float (*f)[m] = ((float (*)[m])f_p); /* * Initialize coefficients */ /* X-direction coef */ ax = (1.0 / (dx * dx)); /* Y-direction coef */ ay = (1.0 / (dy * dy)); /* Central coeff */ b = (- 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha); error = (10.0 * tol); k = 1; while(k <= mits && error > tol){ error = 0.0; //printf("===================== iteration %d ===========================\n", k); /* Copy new solution into old */ for (i = 0; i < n; i++) { for (j = 0; j <= m - 1; j += 8) { float *__ptr39 = uold[i]; float *__ptr40 = u[i]; __m256 __vec41 = _mm256_loadu_ps(&__ptr40[j]); _mm256_storeu_ps(&__ptr39[j],__vec41); } } for (i = 1; i < n - 1; i++) { __m256 __vec0 = _mm256_set1_ps(ax); __m256 __vec7 = _mm256_set1_ps(ay); __m256 __vec15 = _mm256_set1_ps(b); __m256 __vec23 = _mm256_set1_ps(b); __m256 __part25 = _mm256_setzero_ps(); __m256 __vec29 = _mm256_set1_ps(omega); __m256 __vec30 = _mm256_set1_ps(resid); __m256 __vec33 = _mm256_set1_ps(error); __m256 __vec34 = _mm256_set1_ps(resid); __m256 __vec35 = _mm256_set1_ps(resid); __m256 __part38 = _mm256_setzero_ps(); for (j = 1; j <= m - 1 - 1; j += 8) { float *__ptr1 = uold[i - 1]; __m256 __vec2 = _mm256_loadu_ps(&__ptr1[j]); float *__ptr3 = uold[i + 1]; __m256 __vec4 = _mm256_loadu_ps(&__ptr3[j]); __m256 __vec5 = _mm256_add_ps(__vec4,__vec2); __m256 __vec6 = _mm256_mul_ps(__vec5,__vec0); float *__ptr8 = uold[i]; __m256 __vec9 = _mm256_loadu_ps(&__ptr8[j - 1]); float *__ptr10 = uold[i]; __m256 __vec11 = _mm256_loadu_ps(&__ptr10[j + 1]); __m256 __vec12 = _mm256_add_ps(__vec11,__vec9); __m256 __vec13 = _mm256_mul_ps(__vec12,__vec7); __m256 __vec14 = _mm256_add_ps(__vec13,__vec6); float *__ptr16 = uold[i]; __m256 __vec17 = _mm256_loadu_ps(&__ptr16[j]); __m256 __vec18 = _mm256_mul_ps(__vec17,__vec15); __m256 __vec19 = _mm256_add_ps(__vec18,__vec14); float *__ptr20 = f[i]; __m256 __vec21 = _mm256_loadu_ps(&__ptr20[j]); __m256 __vec22 = _mm256_sub_ps(__vec21,__vec19); __m256 __vec24 = _mm256_div_ps(__vec23,__vec22); __part25 = _mm256_add_ps(__part25,__vec24); float *__ptr26 = u[i]; float *__ptr27 = uold[i]; __m256 __vec28 = _mm256_loadu_ps(&__ptr27[j]); __m256 __vec31 = _mm256_mul_ps(__vec30,__vec29); __m256 __vec32 = _mm256_sub_ps(__vec31,__vec28); _mm256_storeu_ps(&__ptr26[j],__vec32); __m256 __vec36 = _mm256_mul_ps(__vec35,__vec34); __m256 __vec37 = _mm256_add_ps(__vec36,__vec33); __part38 = _mm256_add_ps(__part38,__vec37); } __m256 __buf4 = __part38; __buf4 = _mm256_hadd_ps(__buf4,__buf4); __buf4 = _mm256_hadd_ps(__buf4,__buf4); float __buf5[8]; _mm256_storeu_ps(&__buf5,__buf4); error = __buf5[0] + __buf5[6]; __m256 __buf1 = __part25; __buf1 = _mm256_hadd_ps(__buf1,__buf1); __buf1 = _mm256_hadd_ps(__buf1,__buf1); float __buf2[8]; _mm256_storeu_ps(&__buf2,__buf1); resid = __buf2[0] + __buf2[6]; } /* Error check */ //if (k % 500 == 0) // printf("Finished %d iteration with error: %g\n", k, error); error = (sqrt(error) / (n * m)); k = k + 1; /* End iteration loop */ } printf("Total Number of Iterations: %d\n",k); printf("Residual: %.15g\n",error); free(tmp); }